python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
##
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
| spark-rapids-branch-23.10 | python/rapids/__init__.py |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import signal
import select
import socket
import sys
import traceback
import time
import gc
from errno import EINTR, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN
from pyspark.serializers import read_int, write_int
from pyspark.daemon import worker
from rapids.worker import initialize_gpu_mem
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
# re-open stdin/stdout in 'wb' mode
stdin_bin = os.fdopen(sys.stdin.fileno(), 'rb', 4)
stdout_bin = os.fdopen(sys.stdout.fileno(), 'wb', 4)
write_int(listen_port, stdout_bin)
stdout_bin.flush()
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
sys.exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
signal.signal(SIGCHLD, SIG_IGN)
reuse = os.environ.get("SPARK_REUSE_WORKER")
# Initialization complete
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
if 0 in ready_fds:
try:
worker_pid = read_int(stdin_bin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile(mode='wb')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
# It should close the standard input in the child process so that
# Python native function executions stay intact.
#
# Note that if we just close the standard input (file descriptor 0),
# the lowest file descriptor (file descriptor 0) will be allocated,
# later when other file descriptors should happen to open.
#
# Therefore, here we redirects it to '/dev/null' by duplicating
# another file descriptor for '/dev/null' to the standard input (0).
# See SPARK-26175.
devnull = open(os.devnull, 'r')
os.dup2(devnull.fileno(), 0)
devnull.close()
try:
# GPU context setup
initialize_gpu_mem()
# Acknowledge that the fork was successful
outfile = sock.makefile(mode="wb")
write_int(os.getpid(), outfile)
outfile.flush()
outfile.close()
authenticated = False
while True:
code = worker(sock, authenticated)
if code == 0:
authenticated = True
if not reuse or code:
# wait for closing
try:
while sock.recv(1024):
pass
except Exception:
pass
break
gc.collect()
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| spark-rapids-branch-23.10 | python/rapids/daemon.py |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import signal
import select
import socket
import sys
import traceback
import time
import gc
from errno import EINTR, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN
from pyspark.serializers import read_int, write_int, UTF8Deserializer
from pyspark.daemon import worker
from rapids.worker import initialize_gpu_mem
utf8_deserializer = UTF8Deserializer()
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
# re-open stdin/stdout in 'wb' mode
stdin_bin = os.fdopen(sys.stdin.fileno(), 'rb', 4)
stdout_bin = os.fdopen(sys.stdout.fileno(), 'wb', 4)
write_int(listen_port, stdout_bin)
stdout_bin.flush()
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
sys.exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
signal.signal(SIGCHLD, SIG_IGN)
reuse = os.environ.get("SPARK_REUSE_WORKER")
# Initialization complete
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
if 0 in ready_fds:
try:
worker_pid = read_int(stdin_bin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile(mode='wb')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
# It should close the standard input in the child process so that
# Python native function executions stay intact.
#
# Note that if we just close the standard input (file descriptor 0),
# the lowest file descriptor (file descriptor 0) will be allocated,
# later when other file descriptors should happen to open.
#
# Therefore, here we redirects it to '/dev/null' by duplicating
# another file descriptor for '/dev/null' to the standard input (0).
# See SPARK-26175.
devnull = open(os.devnull, 'r')
os.dup2(devnull.fileno(), 0)
devnull.close()
try:
# GPU context setup
initialize_gpu_mem()
infile = sock.makefile(mode="rb")
executor_username = utf8_deserializer.loads(infile)
# Acknowledge that the fork was successful
outfile = sock.makefile(mode="wb")
write_int(os.getpid(), outfile)
outfile.flush()
outfile.close()
authenticated = False
while True:
code = worker(sock, authenticated, executor_username)
if code == 0:
authenticated = True
if not reuse or code:
# wait for closing
try:
while sock.recv(1024):
pass
except Exception:
pass
break
gc.collect()
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| spark-rapids-branch-23.10 | python/rapids/daemon_databricks.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pytest_addoption(parser):
"""Pytest hook to define command line options for pytest"""
parser.addoption(
"--mortgage_format", action="store", default="parquet", help="format of Mortgage data"
)
parser.addoption(
"--mortgage_path", action="store", default=None, help="path to Mortgage data"
)
parser.addoption(
"--std_input_path", action="store", default=None, help="path to standard input files"
)
parser.addoption(
"--tmp_path", action="store", default=None, help="path to store tmp files"
)
parser.addoption(
"--debug_tmp_path", action='store_true', default=False, help="if true don't delete tmp_path contents for debugging"
)
parser.addoption(
"--runtime_env", action='store', default="Apache", help="the runtime environment for the tests - apache or databricks"
)
parser.addoption(
"--cudf_udf", action='store_true', default=False, help="if true enable cudf_udf test"
)
parser.addoption(
"--test_type", action='store', default="developer",
help="the type of tests that are being run to help check all the correct tests are run - developer, pre-commit, or nightly"
)
parser.addoption(
"--fuzz_test", action='store_true', default=False, help="if true enable fuzz tests"
)
parser.addoption(
"--iceberg", action="store_true", default=False, help="if true enable Iceberg tests"
)
parser.addoption(
"--delta_lake", action="store_true", default=False, help="if true enable Delta Lake tests"
)
parser.addoption(
"--test_oom_injection_mode", action='store', default="random",
help="in what way, if any, should the tests inject OOMs at test time. Valid options are: random, always, or never"
)
parser.addoption(
"--force_parquet_testing_tests", action="store_true", default=False,
help="if true forces parquet-testing tests to fail if input data cannot be found"
)
parser.addoption(
"--large_data_test", action='store_true', default=False, help="if enable tests with large data"
)
| spark-rapids-branch-23.10 | integration_tests/conftest.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pytest import main
#import cProfile
if __name__ == '__main__':
#cProfile.run('main(sys.argv[1:])', 'test_profile')
# arguments are the same as for pytest https://docs.pytest.org/en/latest/usage.html
# or run pytest -h
sys.exit(main(sys.argv[1:]))
| spark-rapids-branch-23.10 | integration_tests/runtests.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from conftest import is_at_least_precommit_run
from spark_session import is_databricks_runtime, is_before_spark_330, is_before_spark_350, is_spark_350_or_later
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version, require_minimum_pandas_version
try:
require_minimum_pandas_version()
except Exception as e:
if is_at_least_precommit_run():
raise AssertionError("incorrect pandas version during required testing " + str(e))
pytestmark = pytest.mark.skip(reason=str(e))
try:
require_minimum_pyarrow_version()
except Exception as e:
if is_at_least_precommit_run():
raise AssertionError("incorrect pyarrow version during required testing " + str(e))
pytestmark = pytest.mark.skip(reason=str(e))
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from marks import approximate_float, allow_non_gpu, ignore_order
from pyspark.sql import Window
from pyspark.sql.types import *
import pyspark.sql.functions as f
import pandas as pd
import pyarrow
from typing import Iterator, Tuple
arrow_udf_conf = {
'spark.sql.execution.arrow.pyspark.enabled': 'true',
'spark.rapids.sql.exec.WindowInPandasExec': 'true',
'spark.rapids.sql.exec.FlatMapCoGroupsInPandasExec': 'true'
}
data_gens_nested_for_udf = arrow_array_gens + arrow_struct_gens
####################################################################
# NOTE: pytest does not play well with pyspark udfs, because pyspark
# tries to import the dependencies for top level functions and
# pytest messes around with imports. To make this work, all UDFs
# must either be lambdas or totally defined within the test method
# itself.
####################################################################
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_pandas_math_udf(data_gen):
def add(a, b):
return a + b
my_udf = f.pandas_udf(add, returnType=LongType())
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen, num_slices=4).select(
my_udf(f.col('a') - 3, f.col('b'))),
conf=arrow_udf_conf)
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_iterator_math_udf(data_gen):
def iterator_add(to_process: Iterator[Tuple[pd.Series, pd.Series]]) -> Iterator[pd.Series]:
for a, b in to_process:
yield a + b
my_udf = f.pandas_udf(iterator_add, returnType=LongType())
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen, num_slices=4).select(
my_udf(f.col('a'), f.col('b'))),
conf=arrow_udf_conf)
@pytest.mark.parametrize('data_gen', data_gens_nested_for_udf, ids=idfn)
def test_pandas_scalar_udf_nested_type(data_gen):
def nested_size(nested):
return pd.Series([nested.size]).repeat(len(nested))
my_udf = f.pandas_udf(nested_size, returnType=LongType())
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, num_slices=4).select(my_udf(f.col('a'))),
conf=arrow_udf_conf)
# ======= Test aggregate in Pandas =======
@approximate_float
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_single_aggregate_udf(data_gen):
@f.pandas_udf('double')
def pandas_sum(to_process: pd.Series) -> float:
return to_process.sum()
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
pandas_sum(f.col('a'))),
conf=arrow_udf_conf)
@approximate_float
@pytest.mark.parametrize('data_gen', arrow_common_gen, ids=idfn)
def test_single_aggregate_udf_more_types(data_gen):
@f.pandas_udf('double')
def group_size_udf(to_process: pd.Series) -> float:
return len(to_process)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
group_size_udf(f.col('a'))),
conf=arrow_udf_conf)
@ignore_order
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_group_aggregate_udf(data_gen):
@f.pandas_udf('long')
def pandas_sum(to_process: pd.Series) -> int:
# Sort the values before computing the sum.
# For details please go to
# https://github.com/NVIDIA/spark-rapids/issues/740#issuecomment-784917512
return to_process.sort_values().sum()
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen)\
.groupBy('a')\
.agg(pandas_sum(f.col('b'))),
conf=arrow_udf_conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', arrow_common_gen, ids=idfn)
def test_group_aggregate_udf_more_types(data_gen):
@f.pandas_udf('long')
def group_size_udf(to_process: pd.Series) -> int:
return len(to_process)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen, 430)\
.groupBy('a')\
.agg(group_size_udf(f.col('b'))),
conf=arrow_udf_conf)
# ======= Test window in Pandas =======
# range frame is not supported yet.
no_part_win = Window\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
unbounded_win = Window\
.partitionBy('a')\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
cur_follow_win = Window\
.partitionBy('a')\
.orderBy('b')\
.rowsBetween(Window.currentRow, Window.unboundedFollowing)
pre_cur_win = Window\
.partitionBy('a')\
.orderBy('b')\
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
low_upper_win = Window.partitionBy('a').orderBy('b').rowsBetween(-3, 3)
udf_windows = [no_part_win, unbounded_win, cur_follow_win, pre_cur_win, low_upper_win]
window_ids = ['No_Partition', 'Unbounded', 'Unbounded_Following', 'Unbounded_Preceding',
'Lower_Upper']
@ignore_order
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
@pytest.mark.parametrize('window', udf_windows, ids=window_ids)
def test_window_aggregate_udf(data_gen, window):
@f.pandas_udf('long')
def pandas_sum(to_process: pd.Series) -> int:
# Sort the values before computing the sum.
# For details please go to
# https://github.com/NVIDIA/spark-rapids/issues/740#issuecomment-784917512
return to_process.sort_values().sum()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
pandas_sum(f.col('b')).over(window)),
conf=arrow_udf_conf)
@ignore_order
@pytest.mark.parametrize('data_gen', [byte_gen, short_gen, int_gen], ids=idfn)
@pytest.mark.parametrize('window', udf_windows, ids=window_ids)
def test_window_aggregate_udf_array_from_python(data_gen, window):
@f.pandas_udf(returnType=ArrayType(LongType()))
def pandas_sum(to_process: pd.Series) -> list:
return [to_process.sum()]
# When receiving the data of array type from Python side, split it right away
# in case the following expressions or plans may not support array type yet.
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen)\
.select(pandas_sum(f.col('b')).over(window).alias('py_array'))\
.select([f.col('py_array').getItem(i) for i in range(0, 1)]),
conf=arrow_udf_conf)
# ======= Test flat map group in Pandas =======
# separate the tests into before and after db 91. To verify
# the new "zero-conf-conversion" feature introduced from db 9.1.
@pytest.mark.skipif(not is_databricks_runtime(), reason="zero-conf is supported only from db9.1")
@ignore_order(local=True)
@pytest.mark.parametrize('zero_enabled', [False, True])
@pytest.mark.parametrize('data_gen', [LongGen()], ids=idfn)
def test_group_apply_udf_zero_conf(data_gen, zero_enabled):
def pandas_add(data):
data.sum = data.b + data.a
return data
conf_with_zero = arrow_udf_conf.copy()
conf_with_zero.update({
'spark.databricks.execution.pandasZeroConfConversion.groupbyApply.enabled': zero_enabled
})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen)\
.groupBy('a')\
.applyInPandas(pandas_add, schema="a long, b long"),
conf=conf_with_zero)
@pytest.mark.skipif(is_databricks_runtime(), reason="This is tested by other tests from db9.1")
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [LongGen()], ids=idfn)
def test_group_apply_udf(data_gen):
def pandas_add(data):
data.sum = data.b + data.a
return data
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen)\
.groupBy('a')\
.applyInPandas(pandas_add, schema="a long, b long"),
conf=arrow_udf_conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', arrow_common_gen, ids=idfn)
def test_group_apply_udf_more_types(data_gen):
def group_size_udf(key, pdf):
return pd.DataFrame([[len(key), len(pdf), len(pdf.columns)]])
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen, 5000)\
.groupBy('a')\
.applyInPandas(group_size_udf, schema="c long, d long, e long"),
conf=arrow_udf_conf)
# ======= Test map in Pandas =======
@ignore_order
@pytest.mark.parametrize('data_gen', [LongGen()], ids=idfn)
def test_map_apply_udf(data_gen):
def pandas_filter(iterator):
for data in iterator:
yield data[data.b <= data.a]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen, num_slices=4)\
.mapInPandas(pandas_filter, schema="a long, b long"),
conf=arrow_udf_conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', data_gens_nested_for_udf, ids=idfn)
def test_pandas_map_udf_nested_type(data_gen):
# Supported UDF output types by plugin: (commonCudfTypes + ARRAY).nested() + STRUCT
# STRUCT represents the whole dataframe in Map Pandas UDF, so no struct column in UDF output.
# More details is here
# https://github.com/apache/spark/blob/master/python/pyspark/sql/udf.py#L119
udf_out_schema = 'c_integral long,' \
'c_string string,' \
'c_fp double,' \
'c_bool boolean,' \
'c_date date,' \
'c_time timestamp,' \
'c_array_array array<array<long>>,' \
'c_array_string array<string>'
def col_types_udf(pdf_itr):
for pdf in pdf_itr:
# Return a data frame with columns of supported type, and there is only one row.
# The values can not be generated randomly because it should return the same data
# for both CPU and GPU runs.
yield pd.DataFrame({
"c_integral": [len(pdf)],
"c_string": ["size" + str(len(pdf))],
"c_fp": [float(len(pdf))],
"c_bool": [False],
"c_date": [date(2021, 4, 2)],
"c_time": [datetime(2021, 4, 2, tzinfo=timezone.utc)],
"c_array_array": [[[len(pdf)]]],
"c_array_string": [["size" + str(len(pdf))]]
})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, num_slices=4)\
.mapInPandas(col_types_udf, schema=udf_out_schema),
conf=arrow_udf_conf)
def create_df(spark, data_gen, left_length, right_length):
left = binary_op_df(spark, data_gen, length=left_length)
right = binary_op_df(spark, data_gen, length=right_length)
return left, right
@ignore_order
@pytest.mark.parametrize('data_gen', [ShortGen(nullable=False)], ids=idfn)
def test_cogroup_apply_udf(data_gen):
def asof_join(l, r):
return pd.merge_asof(l, r, on='a', by='b')
def do_it(spark):
left, right = create_df(spark, data_gen, 500, 500)
return left.groupby('a').cogroup(
right.groupby('a')).applyInPandas(
asof_join, schema="a int, b int")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=arrow_udf_conf)
@ignore_order
@allow_non_gpu('FlatMapCoGroupsInPandasExec')
def test_cogroup_apply_fallback():
def asof_join(l, r):
return r
def do_it(spark):
left = two_col_df(spark, int_gen, int_gen, length=100)
right = two_col_df(spark, short_gen, int_gen, length=100)
return left.groupby('a').cogroup(
right.groupby('a')).applyInPandas(
asof_join, schema="a int, b int")
assert_gpu_fallback_collect(do_it, 'FlatMapCoGroupsInPandasExec', conf=arrow_udf_conf)
@ignore_order
@pytest.mark.parametrize('data_gen', [LongGen(nullable=False)], ids=idfn)
@pytest.mark.skipif(is_before_spark_330(), reason='mapInArrow is introduced in Pyspark 3.3.0')
def test_map_arrow_apply_udf(data_gen):
def filter_func(iterator):
for batch in iterator:
pdf = batch.to_pandas()
yield pyarrow.RecordBatch.from_pandas(pdf[pdf.b <= pdf.a])
# this test does not involve string or binary types, so there is no need
# to fallback if useLargeVarTypes is enabled
conf = arrow_udf_conf.copy()
conf.update({
'spark.sql.execution.arrow.useLargeVarTypes': True
})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen, num_slices=4) \
.mapInArrow(filter_func, schema="a long, b long"),
conf=conf)
@pytest.mark.parametrize('data_type', ['string', 'binary'], ids=idfn)
@allow_non_gpu('PythonMapInArrowExec')
@pytest.mark.skipif(is_before_spark_350(), reason='spark.sql.execution.arrow.useLargeVarTypes is introduced in Pyspark 3.5.0')
def test_map_arrow_large_var_types_fallback(data_type):
def filter_func(iterator):
for batch in iterator:
pdf = batch.to_pandas()
yield pyarrow.RecordBatch.from_pandas(pdf[pdf.b <= pdf.a])
conf = arrow_udf_conf.copy()
conf.update({
'spark.sql.execution.arrow.useLargeVarTypes': True
})
if data_type == "string":
data_gen = StringGen()
elif data_type == "binary":
data_gen = BinaryGen()
assert_gpu_fallback_collect(
lambda spark: binary_op_df(spark, data_gen, num_slices=4) \
.mapInArrow(filter_func, schema=f"a {data_type}, b {data_type}"),
"PythonMapInArrowExec",
conf=conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/udf_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from spark_session import is_before_spark_320, is_before_spark_330
from data_gen import *
from marks import ignore_order, allow_non_gpu
import pyspark.sql.functions as f
# 4 level nested struct
# each level has a different number of children to avoid a bug in spark < 3.1
nested_struct = StructGen([
['child0', StructGen([
['child0', StructGen([
['child0', StructGen([
['child0', DecimalGen(7, 2)],
['child1', BooleanGen()],
['child2', BooleanGen()],
['child3', BooleanGen()]
])],
['child1', BooleanGen()],
['child2', BooleanGen()]
])],
['child1', BooleanGen()]
])]])
# map generators without ArrayType value, since Union on ArrayType is not supported
map_gens = [simple_string_to_string_map_gen,
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(BooleanGen(nullable=False), boolean_gen, max_length=2),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen),
MapGen(
LongGen(nullable=False),
MapGen(
DecimalGen(7, 2, nullable=False),
MapGen(
IntegerGen(nullable=False),
StringGen(pattern='value_[0-9]', nullable=False),
max_length=4),
max_length=7),
max_length=5)]
struct_of_maps = StructGen([['child0', BooleanGen()]] + [
['child%d' % (i + 1), gen] for i, gen in enumerate(map_gens)])
@pytest.mark.parametrize('data_gen', [pytest.param((StructGen([['child0', DecimalGen(7, 2)]]),
StructGen([['child1', IntegerGen()]]))),
# left_struct(child0 = 4 level nested struct, child1 = Int)
# right_struct(child0 = 4 level nested struct, child1 = missing)
(StructGen([['child0', StructGen([['child0', StructGen([['child0', StructGen([['child0',
StructGen([['child0', DecimalGen(7, 2)]])]])]])]])], ['child1', IntegerGen()]], nullable=False),
StructGen([['child0', StructGen([['child0', StructGen([['child0', StructGen([['child0',
StructGen([['child0', DecimalGen(7, 2)]])]])]])]])]], nullable=False)),
# left_struct(child0 = 4 level nested struct, child1=missing)
# right_struct(child0 = missing struct, child1 = Int)
(StructGen([['child0', StructGen([['child0', StructGen([['child0', StructGen([['child0',
StructGen([['child0', DecimalGen(7, 2)]])]])]])]])]], nullable=False),
StructGen([['child1', IntegerGen()]], nullable=False)),
(StructGen([['child0', DecimalGen(7, 2)]], nullable=False),
StructGen([['child1', IntegerGen()]], nullable=False)),
# left_struct(child0 = Map[String, String], child1 = missing map)
# right_struct(child0 = missing map, child1 = Map[Boolean, Boolean])
(StructGen([['child0', simple_string_to_string_map_gen]], nullable=False),
StructGen([['child1', MapGen(BooleanGen(nullable=False), boolean_gen)]], nullable=False))], ids=idfn)
# This tests the union of DF of structs with different types of cols as long as the struct itself
# isn't null. This is a limitation in cudf because we don't support nested types as literals
def test_union_struct_missing_children(data_gen):
left_gen, right_gen = data_gen
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, left_gen).unionByName(binary_op_df(
spark, right_gen), True))
@pytest.mark.parametrize('data_gen', all_gen + map_gens + array_gens_sample +
[all_basic_struct_gen,
StructGen([['child0', DecimalGen(7, 2)]]),
nested_struct,
struct_of_maps], ids=idfn)
# This tests union of two DFs of two cols each. The types of the left col and right col is the same
def test_union(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).union(binary_op_df(spark, data_gen)))
@pytest.mark.parametrize('data_gen', all_gen + map_gens + array_gens_sample +
[all_basic_struct_gen,
StructGen([['child0', DecimalGen(7, 2)]]),
nested_struct,
struct_of_maps], ids=idfn)
# This tests union of two DFs of two cols each. The types of the left col and right col is the same
def test_unionAll(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).unionAll(binary_op_df(spark, data_gen)))
@pytest.mark.parametrize('data_gen', all_gen + map_gens + array_gens_sample +
[all_basic_struct_gen,
pytest.param(all_basic_struct_gen),
pytest.param(StructGen([[ 'child0', DecimalGen(7, 2)]])),
nested_struct,
StructGen([['child0', StructGen([['child0', StructGen([['child0', StructGen([['child0',
StructGen([['child0', DecimalGen(7, 2)]])]])]])]])], ['child1', IntegerGen()]]),
struct_of_maps], ids=idfn)
# This tests the union of two DFs of structs with missing child column names. The missing child
# column will be replaced by nulls in the output DF. This is a feature added in 3.1+
def test_union_by_missing_col_name(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).withColumnRenamed("a", "x")
.unionByName(binary_op_df(spark, data_gen).withColumnRenamed("a", "y"), True))
# the first number ('1' and '2') is the nest level
# the second number ('one' and 'two') is the fields number in the struct
base_one = (ArrayGen(StructGen([["ba", StringGen()]]), 1, 1), ArrayGen(StructGen([["bb", StringGen()]]), 1, 1))
base_two = (ArrayGen(StructGen([["ba", StringGen()], ["bb", StringGen()]]), 1, 1), ArrayGen(StructGen([["bb", StringGen()], ["ba", StringGen()]]), 1, 1))
nest_1_one = (StructGen([('b', base_one[0])]), StructGen([('b', base_one[1])]))
nest_1_two = (StructGen([('b', base_two[0])]), StructGen([('b', base_two[1])]))
nest_2_one = (StructGen([('b', ArrayGen(base_one[0], 1, 1))]), StructGen([('b', ArrayGen(base_one[1],1,1))]))
nest_2_two = (StructGen([('b', ArrayGen(base_two[0], 1, 1))]), StructGen([('b', ArrayGen(base_two[1],1,1))]))
@pytest.mark.parametrize('gen_pair', [base_one, base_two,
nest_1_one, nest_1_two,
nest_2_one, nest_2_two])
@pytest.mark.skipif(is_before_spark_330(), reason="This is supported only in Spark 3.3.0+")
def test_union_by_missing_field_name_in_arrays_structs(gen_pair):
"""
This tests the union of two DFs of arrays of structs with missing field names.
The missing field will be replaced be nulls in the output DF. This is a feature added in 3.3+
This test is for https://github.com/NVIDIA/spark-rapids/issues/3953
Test cases are copies from https://github.com/apache/spark/commit/5241d98800
"""
def assert_union_equal(gen1, gen2):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen1).unionByName(unary_op_df(spark, gen2), True)
)
assert_union_equal(gen_pair[0], gen_pair[1])
assert_union_equal(gen_pair[1], gen_pair[0])
@pytest.mark.parametrize('data_gen', all_gen + map_gens + array_gens_sample +
[all_basic_struct_gen,
StructGen([['child0', DecimalGen(7, 2)]]),
nested_struct,
struct_of_maps], ids=idfn)
def test_union_by_name(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).unionByName(binary_op_df(spark, data_gen)))
@pytest.mark.parametrize('data_gen', [
pytest.param([('basic' + str(i), gen) for i, gen in enumerate(all_basic_gens + decimal_gens + [binary_gen])]),
pytest.param([('struct' + str(i), gen) for i, gen in enumerate(struct_gens_sample)]),
pytest.param([('array' + str(i), gen) for i, gen in enumerate(array_gens_sample + [ArrayGen(BinaryGen(max_length=5), max_length=5)])]),
pytest.param([('map' + str(i), gen) for i, gen in enumerate(map_gens_sample)]),
], ids=idfn)
def test_coalesce_types(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).coalesce(2))
@pytest.mark.parametrize('num_parts', [1, 10, 100, 1000, 2000], ids=idfn)
@pytest.mark.parametrize('length', [0, 2048, 4096], ids=idfn)
def test_coalesce_df(num_parts, length):
#This should change eventually to be more than just the basic gens
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(all_basic_gens + decimal_gens + [binary_gen])]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen_list, length=length).coalesce(num_parts))
@pytest.mark.parametrize('data_gen', [
pytest.param([('_c' + str(i), gen) for i, gen in enumerate(all_basic_gens + decimal_gens + [binary_gen])]),
pytest.param([('s', StructGen([['child0', all_basic_struct_gen]]))]),
pytest.param([('a', ArrayGen(string_gen))]),
pytest.param([('m', simple_string_to_string_map_gen)]),
], ids=idfn)
@pytest.mark.parametrize('num_parts', [1, 10, 2345], ids=idfn)
@pytest.mark.parametrize('length', [0, 2048, 4096], ids=idfn)
@ignore_order(local=True) # To avoid extra data shuffle by 'sort on Spark' for this repartition test.
def test_repartition_df(data_gen, num_parts, length):
from pyspark.sql.functions import lit
assert_gpu_and_cpu_are_equal_collect(
# Add a computed column to avoid shuffle being optimized back to a CPU shuffle
lambda spark : gen_df(spark, data_gen, length=length).withColumn('x', lit(1)).repartition(num_parts),
# disable sort before shuffle so round robin works for maps
conf = {'spark.sql.execution.sortBeforeRepartition': 'false'})
@pytest.mark.parametrize('data_gen', [
pytest.param([('_c' + str(i), gen) for i, gen in enumerate(all_basic_gens + decimal_gens)]),
pytest.param([('s', StructGen([['child0', all_basic_struct_gen]]))]),
pytest.param([('_c' + str(i), ArrayGen(gen)) for i, gen in enumerate(all_basic_gens + decimal_gens)]),
], ids=idfn)
@pytest.mark.parametrize('num_parts', [1, 10, 2345], ids=idfn)
@pytest.mark.parametrize('length', [0, 2048, 4096], ids=idfn)
@ignore_order(local=True) # To avoid extra data shuffle by 'sort on Spark' for this repartition test.
def test_repartition_df_for_round_robin(data_gen, num_parts, length):
from pyspark.sql.functions import lit
assert_gpu_and_cpu_are_equal_collect(
# Add a computed column to avoid shuffle being optimized back to a CPU shuffle
lambda spark : gen_df(spark, data_gen, length=length).withColumn('x', lit(1)).repartition(num_parts),
# Enable sort for round robin partition
conf = {'spark.sql.execution.sortBeforeRepartition': 'true'}) # default is true
@allow_non_gpu('ShuffleExchangeExec', 'RoundRobinPartitioning')
@pytest.mark.parametrize('data_gen', [[('a', simple_string_to_string_map_gen)]], ids=idfn)
@ignore_order(local=True) # To avoid extra data shuffle by 'sort on Spark' for this repartition test.
def test_round_robin_sort_fallback(data_gen):
from pyspark.sql.functions import lit
assert_gpu_fallback_collect(
# Add a computed column to avoid shuffle being optimized back to a CPU shuffle like in test_repartition_df
lambda spark : gen_df(spark, data_gen).withColumn('extra', lit(1)).repartition(13),
'ShuffleExchangeExec')
@allow_non_gpu("ProjectExec", "ShuffleExchangeExec")
@ignore_order(local=True) # To avoid extra data shuffle by 'sort on Spark' for this repartition test.
@pytest.mark.parametrize('num_parts', [2, 10, 17, 19, 32], ids=idfn)
@pytest.mark.parametrize('gen', [([('ag', ArrayGen(StructGen([('b1', long_gen)])))], ['ag'])], ids=idfn)
def test_hash_repartition_exact_fallback(gen, num_parts):
data_gen = gen[0]
part_on = gen[1]
assert_gpu_fallback_collect(
lambda spark : gen_df(spark, data_gen, length=1024) \
.repartition(num_parts, *part_on) \
.withColumn('id', f.spark_partition_id()) \
.selectExpr('*'), "ShuffleExchangeExec")
@allow_non_gpu("ProjectExec")
@pytest.mark.parametrize('data_gen', [ArrayGen(StructGen([('b1', long_gen)]))], ids=idfn)
def test_hash_fallback(data_gen):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen, length=1024) \
.selectExpr('*', 'hash(a) as h'), "ProjectExec")
@ignore_order(local=True) # To avoid extra data shuffle by 'sort on Spark' for this repartition test.
@pytest.mark.parametrize('num_parts', [1, 2, 10, 17, 19, 32], ids=idfn)
@pytest.mark.parametrize('gen', [
([('a', boolean_gen)], ['a']),
([('a', byte_gen)], ['a']),
([('a', short_gen)], ['a']),
([('a', int_gen)], ['a']),
([('a', long_gen)], ['a']),
([('a', float_gen)], ['a']),
([('a', double_gen)], ['a']),
([('a', timestamp_gen)], ['a']),
([('a', date_gen)], ['a']),
([('a', decimal_gen_32bit)], ['a']),
([('a', decimal_gen_64bit)], ['a']),
([('a', decimal_gen_128bit)], ['a']),
([('a', string_gen)], ['a']),
([('a', null_gen)], ['a']),
([('a', StructGen([('c0', boolean_gen), ('c1', StructGen([('c1_0', byte_gen), ('c1_1', string_gen), ('c1_2', boolean_gen)]))]))], ['a']),
([('a', long_gen), ('b', StructGen([('b1', long_gen)]))], ['a']),
([('a', long_gen), ('b', ArrayGen(long_gen, max_length=2))], ['a']),
([('a', byte_gen)], [f.col('a') - 5]),
([('a', long_gen)], [f.col('a') + 15]),
([('a', ArrayGen(long_gen, max_length=2)), ('b', long_gen)], ['a']),
([('a', StructGen([('aa', ArrayGen(long_gen, max_length=2))])), ('b', long_gen)], ['a']),
([('a', byte_gen), ('b', boolean_gen)], ['a', 'b']),
([('a', short_gen), ('b', string_gen)], ['a', 'b']),
([('a', int_gen), ('b', byte_gen)], ['a', 'b']),
([('a', long_gen), ('b', null_gen)], ['a', 'b']),
([('a', byte_gen), ('b', boolean_gen), ('c', short_gen)], ['a', 'b', 'c']),
([('a', float_gen), ('b', double_gen), ('c', short_gen)], ['a', 'b', 'c']),
([('a', timestamp_gen), ('b', date_gen), ('c', int_gen)], ['a', 'b', 'c']),
([('a', short_gen), ('b', string_gen), ('c', int_gen)], ['a', 'b', 'c']),
([('a', decimal_gen_64bit), ('b', decimal_gen_64bit), ('c', decimal_gen_64bit)], ['a', 'b', 'c']),
([('a', decimal_gen_128bit), ('b', decimal_gen_128bit), ('c', decimal_gen_128bit)], ['a', 'b', 'c']),
], ids=idfn)
def test_hash_repartition_exact(gen, num_parts):
data_gen = gen[0]
part_on = gen[1]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, data_gen, length=1024)\
.repartition(num_parts, *part_on)\
.withColumn('id', f.spark_partition_id())\
.withColumn('hashed', f.hash(*part_on))\
.selectExpr('*', 'pmod(hashed, {})'.format(num_parts)))
# Test a query that should cause Spark to leverage getShuffleRDD
@ignore_order(local=True)
def test_union_with_filter():
def doit(spark):
dfa = spark.range(1, 100).withColumn("id2", f.col("id"))
dfb = dfa.groupBy("id").agg(f.size(f.collect_set("id2")).alias("idc"))
dfc = dfb.filter(f.col("idc") == 1).select("id")
return dfc.union(dfc)
conf = { "spark.sql.adaptive.enabled": "true" }
assert_gpu_and_cpu_are_equal_collect(doit, conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/repart_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_equal
from data_gen import *
import pyspark.sql.functions as f
from spark_session import with_cpu_session, with_gpu_session, is_before_spark_330
from join_test import create_df
from marks import incompat, allow_non_gpu, ignore_order
import pyspark.mllib.linalg as mllib
import pyspark.ml.linalg as ml
enable_vectorized_confs = [{"spark.sql.inMemoryColumnarStorage.enableVectorizedReader": "true"},
{"spark.sql.inMemoryColumnarStorage.enableVectorizedReader": "false"}]
# Many tests sort the results, so use a sortable decimal generator as many Spark versions
# fail to sort some large decimals properly.
_cache_decimal_gens = [decimal_gen_32bit, decimal_gen_64bit, orderable_decimal_gen_128bit]
_cache_single_array_gens_no_null = [ArrayGen(gen) for gen in all_basic_gens_no_null + _cache_decimal_gens]
decimal_struct_gen= StructGen([['child0', sub_gen] for ind, sub_gen in enumerate(_cache_decimal_gens)])
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@allow_non_gpu('CollectLimitExec')
def test_passing_gpuExpr_as_Expr(enable_vectorized_conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, string_gen)
.select(f.col("a")).na.drop()
.groupBy(f.col("a"))
.agg(f.count(f.col("a")).alias("count_a"))
.orderBy(f.col("count_a").desc(), f.col("a"))
.cache()
.limit(50), enable_vectorized_conf)
# creating special cases to just remove -0.0 because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min Spark version we can drop this
double_special_cases = [
DoubleGen.make_from(1, DOUBLE_MAX_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(0, DOUBLE_MAX_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(1, DOUBLE_MIN_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(0, DOUBLE_MIN_EXP, DOUBLE_MAX_FRACTION),
0.0, 1.0, -1.0, float('inf'), float('-inf'), float('nan'),
NEG_DOUBLE_NAN_MAX_VALUE
]
all_gen = [StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
pytest.param(FloatGen(special_cases=[FLOAT_MIN, FLOAT_MAX, 0.0, 1.0, -1.0]), marks=[incompat]),
pytest.param(DoubleGen(special_cases=double_special_cases), marks=[incompat]),
BooleanGen(), DateGen(), TimestampGen()] + _cache_decimal_gens
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@ignore_order
def test_cache_join(data_gen, enable_vectorized_conf):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 500)
cached = left.join(right, left.a == right.r_a, 'Inner').cache()
cached.count() # populates cache
return cached
assert_gpu_and_cpu_are_equal_collect(do_join, conf=enable_vectorized_conf)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
# We are OK running everything on CPU until we complete 'https://github.com/NVIDIA/spark-rapids/issues/360'
# because we have an explicit check in our code that disallows InMemoryTableScan to have anything other than
# AttributeReference
@allow_non_gpu(any=True)
@ignore_order
def test_cached_join_filter(data_gen, enable_vectorized_conf):
data = data_gen
def do_join(spark):
left, right = create_df(spark, data, 500, 500)
cached = left.join(right, left.a == right.r_a, 'Inner').cache()
cached.count() #populates the cache
return cached.filter("a is not null")
assert_gpu_and_cpu_are_equal_collect(do_join, conf=enable_vectorized_conf)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@ignore_order
def test_cache_expand_exec(data_gen, enable_vectorized_conf):
def op_df(spark, length=2048, seed=0):
cached = gen_df(spark, StructGen([
('a', data_gen),
('b', IntegerGen())], nullable=False), length=length, seed=seed).cache()
cached.count() # populate the cache
return cached.rollup(f.col("a"), f.col("b")).agg(f.col("b"))
assert_gpu_and_cpu_are_equal_collect(op_df, conf=enable_vectorized_conf)
@pytest.mark.parametrize('data_gen', [all_basic_struct_gen, StructGen([['child0', StructGen([['child1', byte_gen]])]]),
ArrayGen(
StructGen([['child0', StringGen()],
['child1',
StructGen([['child0', IntegerGen()]])]])),
decimal_struct_gen] + _cache_single_array_gens_no_null + all_gen, ids=idfn)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@allow_non_gpu('CollectLimitExec')
def test_cache_partial_load(data_gen, enable_vectorized_conf):
def partial_return(col):
def partial_return_cache(spark):
return two_col_df(spark, data_gen, string_gen).select(f.col("a"), f.col("b")).cache().limit(50).select(col)
return partial_return_cache
assert_gpu_and_cpu_are_equal_collect(partial_return(f.col("a")), conf=enable_vectorized_conf)
assert_gpu_and_cpu_are_equal_collect(partial_return(f.col("b")), conf=enable_vectorized_conf)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@allow_non_gpu('CollectLimitExec')
@ignore_order
def test_cache_reverse_order(enable_vectorized_conf):
col0 = StructGen([['child0', StructGen([['child1', byte_gen]])]])
col1 = StructGen([['child0', byte_gen]])
def partial_return():
def partial_return_cache(spark):
return two_col_df(spark, col0, col1).select(f.col("a"), f.col("b")).cache().select(f.col("b"), f.col("a"))
return partial_return_cache
assert_gpu_and_cpu_are_equal_collect(partial_return(), conf=enable_vectorized_conf)
@allow_non_gpu('CollectLimitExec')
def test_cache_diff_req_order(spark_tmp_path):
def n_fold(spark):
data_path_cpu = spark_tmp_path + '/PARQUET_DATA/{}/{}'
data = spark.range(100).selectExpr(
"cast(id as double) as col0",
"cast(id - 100 as double) as col1",
"cast(id * 2 as double) as col2",
"rand(100) as col3",
"rand(200) as col4")
num_buckets = 10
with_random = data.selectExpr("*", "cast(rand(0) * {} as int) as BUCKET".format(num_buckets)).cache()
for test_bucket in range(0, num_buckets):
with_random.filter(with_random.BUCKET == test_bucket).drop("BUCKET") \
.write.parquet(data_path_cpu.format("test_data", test_bucket))
with_random.filter(with_random.BUCKET != test_bucket).drop("BUCKET") \
.write.parquet(data_path_cpu.format("train_data", test_bucket))
with_cpu_session(n_fold)
# This test doesn't allow negative scale for Decimals as ` df.write.mode('overwrite').parquet(data_path)`
# writes parquet which doesn't allow negative decimals
@pytest.mark.parametrize('data_gen', [StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
ArrayGen(
StructGen([['child0', StringGen()],
['child1',
StructGen([['child0', IntegerGen()]])]])),
pytest.param(FloatGen(special_cases=[FLOAT_MIN, FLOAT_MAX, 0.0, 1.0, -1.0]), marks=[incompat]),
pytest.param(DoubleGen(special_cases=double_special_cases), marks=[incompat]),
BooleanGen(), DateGen(), TimestampGen(), decimal_gen_32bit, decimal_gen_64bit,
orderable_decimal_gen_128bit] + _cache_single_array_gens_no_null, ids=idfn)
@pytest.mark.parametrize('ts_write', ['TIMESTAMP_MICROS', 'TIMESTAMP_MILLIS'])
@pytest.mark.parametrize('enable_vectorized', ['true', 'false'], ids=idfn)
@ignore_order
@allow_non_gpu("SortExec", "ShuffleExchangeExec", "RangePartitioning")
def test_cache_columnar(spark_tmp_path, data_gen, enable_vectorized, ts_write):
data_path_gpu = spark_tmp_path + '/PARQUET_DATA'
def read_parquet_cached(data_path):
def write_read_parquet_cached(spark):
df = unary_op_df(spark, data_gen)
df.write.mode('overwrite').parquet(data_path)
cached = spark.read.parquet(data_path).cache()
cached.count()
return cached.select(f.col("a"))
return write_read_parquet_cached
# rapids-spark doesn't support LEGACY read for parquet
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead' : 'CORRECTED',
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInWrite' : 'CORRECTED',
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.inMemoryColumnarStorage.enableVectorizedReader' : enable_vectorized,
'spark.sql.parquet.outputTimestampType': ts_write}
assert_gpu_and_cpu_are_equal_collect(read_parquet_cached(data_path_gpu), conf)
@pytest.mark.parametrize('data_gen', [all_basic_struct_gen, StructGen([['child0', StructGen([['child1', byte_gen]])]]),
decimal_struct_gen,
ArrayGen(
StructGen([['child0', StringGen()],
['child1',
StructGen([['child0', IntegerGen()]])]]))] + _cache_single_array_gens_no_null + all_gen, ids=idfn)
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
def test_cache_cpu_gpu_mixed(data_gen, enable_vectorized_conf):
def func(spark):
df = unary_op_df(spark, data_gen)
df.cache().count()
enabled = spark.conf.get("spark.rapids.sql.enabled")
spark.conf.set("spark.rapids.sql.enabled", not enabled)
return df.selectExpr("a")
assert_gpu_and_cpu_are_equal_collect(func, conf=enable_vectorized_conf)
@pytest.mark.parametrize('enable_vectorized', ['false', 'true'], ids=idfn)
@pytest.mark.parametrize('with_x_session', [with_gpu_session, with_cpu_session])
@allow_non_gpu("ProjectExec", "Alias", "Literal", "DateAddInterval", "MakeInterval", "Cast",
"ExtractIntervalYears", "Year", "Month", "Second", "ExtractIntervalMonths",
"ExtractIntervalSeconds", "SecondWithFraction", "ColumnarToRowExec")
@pytest.mark.parametrize('select_expr', [("NULL as d", "d"),
# In order to compare the results, since pyspark doesn't
# know how to parse interval types, we need to "extract"
# values from the interval. NOTE, "extract" is a misnomer
# because we are actually coverting the value to the
# requested time precision, which is not actually extraction
# i.e. 'extract(years from d) will actually convert the
# entire interval to year
("make_interval(y,m,w,d,h,min,s) as d", ["cast(extract(years from d) as long)", "extract(months from d)", "extract(seconds from d)"])])
def test_cache_additional_types(enable_vectorized, with_x_session, select_expr):
def with_cache(cache):
select_expr_df, select_expr_project = select_expr
def helper(spark):
# the goal is to just get a DF of CalendarIntervalType, therefore limiting the values
# so when we do get the individual parts of the interval, it doesn't overflow
df = gen_df(spark, StructGen([('m', IntegerGen(min_val=-1000, max_val=1000, nullable=False)),
('y', IntegerGen(min_val=-10000, max_val=10000, nullable=False)),
('w', IntegerGen(min_val=-10000, max_val=10000, nullable=False)),
('h', IntegerGen(min_val=-10000, max_val=10000, nullable=False)),
('min', IntegerGen(min_val=-10000, max_val=10000, nullable=False)),
('d', IntegerGen(min_val=-10000, max_val=10000, nullable=False)),
('s', IntegerGen(min_val=-10000, max_val=10000, nullable=False))],
nullable=False), seed=1)
duration_df = df.selectExpr(select_expr_df)
if (cache):
duration_df.cache()
duration_df.count
df_1 = duration_df.selectExpr(select_expr_project)
return df_1.collect()
return helper
cached_result = with_x_session(with_cache(True),
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED',
'spark.sql.inMemoryColumnarStorage.enableVectorizedReader': enable_vectorized})
reg_result = with_x_session(with_cache(False),
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED',
'spark.sql.inMemoryColumnarStorage.enableVectorizedReader': enable_vectorized})
# NOTE: we aren't comparing cpu and gpu results, we are comparing the cached and non-cached results.
assert_equal(reg_result, cached_result)
def generate_data_and_test_func_on_cached_df(with_x_session, func, data_gen, test_conf):
df = lambda spark: unary_op_df(spark, data_gen)
function_to_test_on_df(with_x_session, df, func, test_conf)
def function_to_test_on_df(with_x_session, df_gen, func_on_df, test_conf):
def with_cache(cached):
def helper(spark):
_df = df_gen(spark)
if cached:
_df.cache().count()
return func_on_df(_df)
return helper
reg_result = with_x_session(with_cache(False), test_conf)
cached_result = with_x_session(with_cache(True), test_conf)
assert_equal(reg_result, cached_result)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('with_x_session', [with_gpu_session, with_cpu_session])
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@pytest.mark.parametrize('batch_size', [{"spark.rapids.sql.batchSizeBytes": "100"}, {}], ids=idfn)
@ignore_order
def test_cache_count(data_gen, with_x_session, enable_vectorized_conf, batch_size):
test_conf = copy_and_update(enable_vectorized_conf, batch_size)
generate_data_and_test_func_on_cached_df(with_x_session, lambda df: df.count(), data_gen, test_conf)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('with_x_session', [with_cpu_session, with_gpu_session])
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@pytest.mark.parametrize('batch_size', [{"spark.rapids.sql.batchSizeBytes": "100"}, {}], ids=idfn)
@ignore_order
# This tests the cached and uncached values returned by collect on the CPU and GPU.
# When running on the GPU with the DefaultCachedBatchSerializer, to project the results Spark adds a ColumnarToRowExec
# to be able to show the results which will cause this test to throw an exception as it's not on the GPU so we have to
# add that case to the `allowed` list. As of now there is no way for us to limit the scope of allow_non_gpu based on a
# condition therefore we must allow it in all cases
@allow_non_gpu('ColumnarToRowExec')
def test_cache_multi_batch(data_gen, with_x_session, enable_vectorized_conf, batch_size):
test_conf = copy_and_update(enable_vectorized_conf, batch_size)
generate_data_and_test_func_on_cached_df(with_x_session, lambda df: df.collect(), data_gen, test_conf)
@pytest.mark.parametrize('data_gen', all_basic_map_gens + _cache_single_array_gens_no_null, ids=idfn)
@pytest.mark.parametrize('enable_vectorized', enable_vectorized_confs, ids=idfn)
def test_cache_map_and_array(data_gen, enable_vectorized):
def helper(spark):
df = gen_df(spark, StructGen([['a', data_gen]], nullable=False))
df.persist()
return df.selectExpr("a")
assert_gpu_and_cpu_are_equal_collect(helper)
def test_cache_udt():
def fun(spark):
df = spark.sparkContext.parallelize([
(mllib.DenseVector([1, ]), ml.DenseVector([1, ])),
(mllib.SparseVector(1, [0, ], [1, ]), ml.SparseVector(1, [0, ], [1, ]))
]).toDF(["mllib_v", "ml_v"])
df.cache().count()
return df.selectExpr("mllib_v", "ml_v").collect()
cpu_result = with_cpu_session(fun)
gpu_result = with_gpu_session(fun)
# assert_gpu_and_cpu_are_equal_collect method doesn't handle UDT so we just write a single
# statement here to compare
assert cpu_result == gpu_result, "not equal"
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Spark3.3.0')
@pytest.mark.parametrize('enable_vectorized_conf', enable_vectorized_confs, ids=idfn)
@ignore_order(local=True)
def test_cache_daytimeinterval(enable_vectorized_conf):
def test_func(spark):
df = two_col_df(spark, DayTimeIntervalGen(), int_gen)
df.cache().count()
return df.selectExpr("b", "a")
assert_gpu_and_cpu_are_equal_collect(test_func, enable_vectorized_conf)
# For AQE, test the computeStats(...) implementation in GpuInMemoryTableScanExec
# NOTE: this test is here because the necessary cache configuration is only
# available when this test file is used
@ignore_order(local=True)
@allow_non_gpu("ShuffleExchangeExec", "ColumnarToRowExec")
@pytest.mark.parametrize("data_gen", integral_gens, ids=idfn)
def test_aqe_cache_join(data_gen):
conf = {'spark.sql.adaptive.enabled': 'true'}
def do_it(spark):
df1 = unary_op_df(spark, data_gen).orderBy('a').cache()
df2 = df1.alias('df2')
return df1.join(df2, df1.a == df2.a, 'Outer')
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
# TODO: remove this test after timezone support is added.
# This test is testing cache when InMemoryTableScanExec fallsback to the CPU which causes the HostColumnarToGPU
# to be pushed to the query plan which can cause ArrayIndexOutOfBoundsException
@ignore_order
@allow_non_gpu("InMemoryTableScanExec", "ProjectExec", "ColumnarToRowExec")
def test_inmem_cache_count():
conf={"spark.sql.session.timeZone": "America/Los_Angeles"}
function_to_test_on_df(with_gpu_session, lambda spark: unary_op_df(spark, int_gen).selectExpr("cast(a as timestamp)"), lambda df: df.count(), test_conf=conf)
@pytest.mark.parametrize('with_x_session', [with_gpu_session, with_cpu_session])
def test_batch_no_cols(with_x_session):
function_to_test_on_df(with_x_session, lambda spark: unary_op_df(spark, int_gen).drop("a"), lambda df: df.count(), test_conf={}) | spark-rapids-branch-23.10 | integration_tests/src/main/python/cache_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyspark.sql.functions as f
import pytest
import string
from asserts import *
from data_gen import *
from marks import *
from delta_lake_write_test import assert_gpu_and_cpu_delta_logs_equivalent, delta_meta_allow, delta_writes_enabled_conf
from pyspark.sql.types import *
from spark_session import is_before_spark_320, is_databricks_runtime, is_databricks122_or_later, spark_version
# Databricks changes the number of files being written, so we cannot compare logs
num_slices_to_test = [10] if is_databricks_runtime() else [1, 10]
delta_merge_enabled_conf = copy_and_update(delta_writes_enabled_conf,
{"spark.rapids.sql.command.MergeIntoCommand": "true",
"spark.rapids.sql.command.MergeIntoCommandEdge": "true"})
delta_write_fallback_allow = "ExecutedCommandExec,DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
delta_write_fallback_check = "DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
def read_delta_path(spark, path):
return spark.read.format("delta").load(path)
def read_delta_path_with_cdf(spark, path):
return spark.read.format("delta") \
.option("readChangeDataFeed", "true").option("startingVersion", 0) \
.load(path).drop("_commit_timestamp")
def schema_to_ddl(spark, schema):
return spark.sparkContext._jvm.org.apache.spark.sql.types.DataType.fromJson(schema.json()).toDDL()
def make_df(spark, gen, num_slices):
return three_col_df(spark, gen, SetValuesGen(StringType(), string.ascii_lowercase),
SetValuesGen(StringType(), string.ascii_uppercase), num_slices=num_slices)
def setup_dest_tables(spark, data_path, dest_table_func, use_cdf, partition_columns=None, enable_deletion_vectors=False):
for name in ["CPU", "GPU"]:
path = "{}/{}".format(data_path, name)
dest_df = dest_table_func(spark)
writer = dest_df.write.format("delta")
ddl = schema_to_ddl(spark, dest_df.schema)
table_properties = {}
if use_cdf:
table_properties['delta.enableChangeDataFeed'] = 'true'
if enable_deletion_vectors:
table_properties['delta.enableDeletionVectors'] = 'true'
if len(table_properties) > 0:
# if any table properties are specified then we need to use SQL to define the table
sql_text = "CREATE TABLE delta.`{path}` ({ddl}) USING DELTA".format(path=path, ddl=ddl)
if partition_columns:
sql_text += " PARTITIONED BY ({})".format(",".join(partition_columns))
properties = ', '.join(key + ' = ' + value for key, value in table_properties.items())
sql_text += " TBLPROPERTIES ({})".format(properties)
spark.sql(sql_text)
elif partition_columns:
writer = writer.partitionBy(*partition_columns)
if use_cdf or enable_deletion_vectors:
writer = writer.mode("append")
writer.save(path)
def delta_sql_merge_test(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, check_func,
partition_columns=None):
data_path = spark_tmp_path + "/DELTA_DATA"
src_table = spark_tmp_table_factory.get()
def setup_tables(spark):
setup_dest_tables(spark, data_path, dest_table_func, use_cdf, partition_columns)
src_table_func(spark).createOrReplaceTempView(src_table)
def do_merge(spark, path):
dest_table = spark_tmp_table_factory.get()
read_delta_path(spark, path).createOrReplaceTempView(dest_table)
return spark.sql(merge_sql.format(src_table=src_table, dest_table=dest_table)).collect()
with_cpu_session(setup_tables)
check_func(data_path, do_merge)
def assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql,
compare_logs, partition_columns=None,
conf=delta_merge_enabled_conf):
def read_data(spark, path):
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
df = read_func(spark, path)
return df.sort(df.columns)
def checker(data_path, do_merge):
cpu_path = data_path + "/CPU"
gpu_path = data_path + "/GPU"
# compare resulting dataframe from the merge operation (some older Spark versions return empty here)
cpu_result = with_cpu_session(lambda spark: do_merge(spark, cpu_path), conf=conf)
gpu_result = with_gpu_session(lambda spark: do_merge(spark, gpu_path), conf=conf)
assert_equal(cpu_result, gpu_result)
# compare merged table data results, read both via CPU to make sure GPU write can be read by CPU
cpu_result = with_cpu_session(lambda spark: read_data(spark, cpu_path).collect(), conf=conf)
gpu_result = with_cpu_session(lambda spark: read_data(spark, gpu_path).collect(), conf=conf)
assert_equal(cpu_result, gpu_result)
if compare_logs:
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
delta_sql_merge_test(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, checker, partition_columns)
@allow_non_gpu(delta_write_fallback_allow, *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("disable_conf",
[{"spark.rapids.sql.format.delta.write.enabled": "false"},
{"spark.rapids.sql.format.parquet.enabled": "false"},
{"spark.rapids.sql.format.parquet.write.enabled": "false"},
{"spark.rapids.sql.command.MergeIntoCommand": "false"},
delta_writes_enabled_conf # Test disabled by default
], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_merge_disabled_fallback(spark_tmp_path, spark_tmp_table_factory, disable_conf):
def checker(data_path, do_merge):
assert_gpu_fallback_write(do_merge, read_delta_path, data_path,
delta_write_fallback_check, conf=disable_conf)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.a == {src_table}.a" \
" WHEN NOT MATCHED THEN INSERT *"
delta_sql_merge_test(spark_tmp_path, spark_tmp_table_factory,
use_cdf=False,
src_table_func=lambda spark: unary_op_df(spark, SetValuesGen(IntegerType(), range(100))),
dest_table_func=lambda spark: unary_op_df(spark, int_gen),
merge_sql=merge_sql,
check_func=checker)
@allow_non_gpu("ExecutedCommandExec,BroadcastHashJoinExec,ColumnarToRowExec,BroadcastExchangeExec,DataWritingCommandExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_databricks_runtime() and spark_version() < "3.3.2", reason="NOT MATCHED BY SOURCE added in DBR 12.2")
@pytest.mark.skipif((not is_databricks_runtime()) and is_before_spark_340(), reason="NOT MATCHED BY SOURCE added in Delta Lake 2.4")
def test_delta_merge_not_matched_by_source_fallback(spark_tmp_path, spark_tmp_table_factory):
def checker(data_path, do_merge):
assert_gpu_fallback_write(do_merge, read_delta_path, data_path, "ExecutedCommandExec", conf = delta_merge_enabled_conf)
merge_sql = "MERGE INTO {dest_table} " \
"USING {src_table} " \
"ON {src_table}.a == {dest_table}.a " \
"WHEN MATCHED THEN " \
" UPDATE SET {dest_table}.b = {src_table}.b " \
"WHEN NOT MATCHED THEN " \
" INSERT (a, b) VALUES ({src_table}.a, {src_table}.b) " \
"WHEN NOT MATCHED BY SOURCE AND {dest_table}.b > 0 THEN " \
" UPDATE SET {dest_table}.b = 0"
delta_sql_merge_test(spark_tmp_path, spark_tmp_table_factory,
use_cdf=False,
src_table_func=lambda spark: binary_op_df(spark, SetValuesGen(IntegerType(), range(10))),
dest_table_func=lambda spark: binary_op_df(spark, SetValuesGen(IntegerType(), range(20, 30))),
merge_sql=merge_sql,
check_func=checker)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"], ["b"], ["a", "b"]], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
@pytest.mark.parametrize("disable_conf", [
"spark.rapids.sql.exec.RapidsProcessDeltaMergeJoinExec",
"spark.rapids.sql.expression.Add"], ids=idfn)
def test_delta_merge_partial_fallback_via_conf(spark_tmp_path, spark_tmp_table_factory,
use_cdf, partition_columns, num_slices, disable_conf):
src_range, dest_range = range(20), range(10, 30)
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), src_range), num_slices) \
.groupBy("a").agg(f.max("b").alias("b"),f.min("c").alias("c"))
dest_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), dest_range), num_slices)
merge_sql = "MERGE INTO {dest_table} d USING {src_table} s ON d.a == s.a" \
" WHEN MATCHED THEN UPDATE SET d.a = s.a + 4 WHEN NOT MATCHED THEN INSERT *"
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
conf = copy_and_update(delta_merge_enabled_conf, { disable_conf: "false" })
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs,
partition_columns, conf=conf)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("table_ranges", [(range(20), range(10)), # partial insert of source
(range(5), range(5)), # no-op insert
(range(10), range(20, 30)) # full insert of source
], ids=idfn)
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"], ["b"], ["a", "b"]], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_not_match_insert_only(spark_tmp_path, spark_tmp_table_factory, table_ranges,
use_cdf, partition_columns, num_slices):
src_range, dest_range = table_ranges
src_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), src_range), num_slices)
dest_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), dest_range), num_slices)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.a == {src_table}.a" \
" WHEN NOT MATCHED THEN INSERT *"
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs,
partition_columns)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("table_ranges", [(range(10), range(20)), # partial delete of target
(range(5), range(5)), # full delete of target
(range(10), range(20, 30)) # no-op delete
], ids=idfn)
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"], ["b"], ["a", "b"]], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_match_delete_only(spark_tmp_path, spark_tmp_table_factory, table_ranges,
use_cdf, partition_columns, num_slices):
src_range, dest_range = table_ranges
src_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), src_range), num_slices)
dest_table_func = lambda spark: make_df(spark, SetValuesGen(IntegerType(), dest_range), num_slices)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.a == {src_table}.a" \
" WHEN MATCHED THEN DELETE"
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs,
partition_columns)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_standard_upsert(spark_tmp_path, spark_tmp_table_factory, use_cdf, num_slices):
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_table_func = lambda spark: two_col_df(spark, int_gen, string_gen, num_slices=num_slices).groupBy("a").agg(f.max("b").alias("b"))
dest_table_func = lambda spark: two_col_df(spark, int_gen, string_gen, seed=1, num_slices=num_slices)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.a == {src_table}.a" \
" WHEN MATCHED THEN UPDATE SET * WHEN NOT MATCHED THEN INSERT *"
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("merge_sql", [
"MERGE INTO {dest_table} d USING {src_table} s ON d.a == s.a" \
" WHEN MATCHED AND s.b > 'q' THEN UPDATE SET d.a = s.a / 2, d.b = s.b" \
" WHEN NOT MATCHED THEN INSERT *",
"MERGE INTO {dest_table} d USING {src_table} s ON d.a == s.a" \
" WHEN NOT MATCHED AND s.b > 'q' THEN INSERT *",
"MERGE INTO {dest_table} d USING {src_table} s ON d.a == s.a" \
" WHEN MATCHED AND s.b > 'a' AND s.b < 'g' THEN UPDATE SET d.a = s.a / 2, d.b = s.b" \
" WHEN MATCHED AND s.b > 'g' AND s.b < 'z' THEN UPDATE SET d.a = s.a / 4, d.b = concat('extra', s.b)" \
" WHEN NOT MATCHED AND s.b > 'b' AND s.b < 'f' THEN INSERT *" \
" WHEN NOT MATCHED AND s.b > 'f' AND s.b < 'z' THEN INSERT (b) VALUES ('not here')" ], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_upsert_with_condition(spark_tmp_path, spark_tmp_table_factory, use_cdf, merge_sql, num_slices):
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_table_func = lambda spark: two_col_df(spark, int_gen, string_gen, num_slices=num_slices).groupBy("a").agg(f.max("b").alias("b"))
dest_table_func = lambda spark: two_col_df(spark, int_gen, string_gen, seed=1, num_slices=num_slices)
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_upsert_with_unmatchable_match_condition(spark_tmp_path, spark_tmp_table_factory, use_cdf, num_slices):
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_table_func = lambda spark: two_col_df(spark, int_gen, string_gen, num_slices=num_slices).groupBy("a").agg(f.max("b").alias("b"))
dest_table_func = lambda spark: two_col_df(spark, SetValuesGen(IntegerType(), range(100)), string_gen, seed=1, num_slices=num_slices)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.a == {src_table}.a" \
" WHEN MATCHED AND {dest_table}.a > 100 THEN UPDATE SET *"
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
compare_logs = num_slices == 1
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
def test_delta_merge_update_with_aggregation(spark_tmp_path, spark_tmp_table_factory, use_cdf):
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_table_func = lambda spark: spark.range(10).withColumn("x", f.col("id") + 1)\
.select(f.col("id"), (f.col("x") + 1).alias("x"))\
.drop_duplicates(["id"])\
.limit(10)
dest_table_func = lambda spark: spark.range(5).withColumn("x", f.col("id") + 1)
merge_sql = "MERGE INTO {dest_table} USING {src_table} ON {dest_table}.id == {src_table}.id" \
" WHEN MATCHED THEN UPDATE SET {dest_table}.x = {src_table}.x + 2" \
" WHEN NOT MATCHED AND {src_table}.x < 7 THEN INSERT *"
assert_delta_sql_merge_collect(spark_tmp_path, spark_tmp_table_factory, use_cdf,
src_table_func, dest_table_func, merge_sql, compare_logs=False)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.xfail(not is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/7573")
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("num_slices", num_slices_to_test, ids=idfn)
def test_delta_merge_dataframe_api(spark_tmp_path, use_cdf, num_slices):
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/DELTA_DATA"
dest_table_func = lambda spark: two_col_df(spark, SetValuesGen(IntegerType(), [None] + list(range(100))), string_gen, seed=1, num_slices=num_slices)
with_cpu_session(lambda spark: setup_dest_tables(spark, data_path, dest_table_func, use_cdf))
def do_merge(spark, path):
# Need to eliminate duplicate keys in the source table otherwise update semantics are ambiguous
src_df = two_col_df(spark, int_gen, string_gen, num_slices=num_slices).groupBy("a").agg(f.max("b").alias("b"))
dest_table = DeltaTable.forPath(spark, path)
dest_table.alias("dest").merge(src_df.alias("src"), "dest.a == src.a") \
.whenMatchedUpdateAll() \
.whenNotMatchedInsertAll() \
.execute()
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
assert_gpu_and_cpu_writes_are_equal_collect(do_merge, read_func, data_path, conf=delta_merge_enabled_conf)
# Non-deterministic input for each task means we can only reliably compare record counts when using only one task
if num_slices == 1:
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_merge_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql, assert_gpu_and_cpu_sql_writes_are_equal_collect, assert_gpu_fallback_collect
from conftest import get_non_gpu_allowed
from data_gen import *
from enum import Enum
from marks import *
from pyspark.sql.types import *
from spark_session import is_spark_cdh, with_cpu_session
hive_text_enabled_conf = {"spark.rapids.sql.format.hive.text.enabled": True,
"spark.rapids.sql.format.hive.text.read.enabled": True}
hive_text_write_enabled_conf = {"spark.rapids.sql.format.hive.text.enabled": True,
"spark.rapids.sql.format.hive.text.write.enabled": True}
acq_schema = StructType([
StructField('loan_id', LongType()),
StructField('orig_channel', StringType()),
StructField('seller_name', StringType()),
StructField('orig_interest_rate', DoubleType()),
StructField('orig_upb', IntegerType()),
StructField('orig_loan_term', IntegerType()),
StructField('orig_date', StringType()),
StructField('first_pay_date', StringType()),
StructField('orig_ltv', DoubleType()),
StructField('orig_cltv', DoubleType()),
StructField('num_borrowers', DoubleType()),
StructField('dti', DoubleType()),
StructField('borrower_credit_score', DoubleType()),
StructField('first_home_buyer', StringType()),
StructField('loan_purpose', StringType()),
StructField('property_type', StringType()),
StructField('num_units', IntegerType()),
StructField('occupancy_status', StringType()),
StructField('property_state', StringType()),
StructField('zip', IntegerType()),
StructField('mortgage_insurance_percent', DoubleType()),
StructField('product_type', StringType()),
StructField('coborrow_credit_score', DoubleType()),
StructField('mortgage_insurance_type', DoubleType()),
StructField('relocation_mortgage_indicator', StringType())])
perf_schema = StructType([
StructField('loan_id', LongType()),
StructField('monthly_reporting_period', StringType()),
StructField('servicer', StringType()),
StructField('interest_rate', DoubleType()),
StructField('current_actual_upb', DoubleType()),
StructField('loan_age', DoubleType()),
StructField('remaining_months_to_legal_maturity', DoubleType()),
StructField('adj_remaining_months_to_maturity', DoubleType()),
StructField('maturity_date', StringType()),
StructField('msa', DoubleType()),
StructField('current_loan_delinquency_status', IntegerType()),
StructField('mod_flag', StringType()),
StructField('zero_balance_code', StringType()),
StructField('zero_balance_effective_date', StringType()),
StructField('last_paid_installment_date', StringType()),
StructField('foreclosed_after', StringType()),
StructField('disposition_date', StringType()),
StructField('foreclosure_costs', DoubleType()),
StructField('prop_preservation_and_repair_costs', DoubleType()),
StructField('asset_recovery_costs', DoubleType()),
StructField('misc_holding_expenses', DoubleType()),
StructField('holding_taxes', DoubleType()),
StructField('net_sale_proceeds', DoubleType()),
StructField('credit_enhancement_proceeds', DoubleType()),
StructField('repurchase_make_whole_proceeds', StringType()),
StructField('other_foreclosure_proceeds', DoubleType()),
StructField('non_interest_bearing_upb', DoubleType()),
StructField('principal_forgiveness_upb', StringType()),
StructField('repurchase_make_whole_proceeds_flag', StringType()),
StructField('foreclosure_principal_write_off_amount', StringType()),
StructField('servicing_activity_indicator', StringType())])
timestamp_schema = StructType([
StructField('ts', TimestampType())])
date_schema = StructType([
StructField('date', DateType())])
trucks_schema = StructType([
StructField('make', StringType()),
StructField('model', StringType()),
StructField('year', IntegerType()),
StructField('price', StringType()),
StructField('comment', StringType())])
def make_schema(column_type):
"""
Constructs a table schema with a single column of the specified type
"""
return StructType([StructField('number', column_type)])
def read_hive_text_sql(data_path, schema, spark_tmp_table_factory, options=None):
if options is None:
options = {}
opts = options
if schema is not None:
opts = copy_and_update(options, {'schema': schema})
def read_impl(spark):
tmp_name = spark_tmp_table_factory.get()
return spark.catalog.createTable(tmp_name, source='hive', path=data_path, **opts)
return read_impl
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@approximate_float
@pytest.mark.parametrize('name,schema,options', [
# Numeric Reads.
('hive-delim-text/simple-boolean-values', make_schema(BooleanType()), {}),
('hive-delim-text/simple-int-values', make_schema(ByteType()), {}),
('hive-delim-text/simple-int-values', make_schema(ShortType()), {}),
('hive-delim-text/simple-int-values', make_schema(IntegerType()), {}),
('hive-delim-text/simple-int-values', make_schema(LongType()), {}),
('hive-delim-text/simple-int-values', make_schema(FloatType()), {}),
('hive-delim-text/simple-int-values', make_schema(DoubleType()), {}),
('hive-delim-text/simple-int-values', make_schema(StringType()), {}),
pytest.param('hive-delim-text/simple-int-values', make_schema(DecimalType(10, 2)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/simple-int-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
# Floating Point.
('hive-delim-text/simple-float-values', make_schema(FloatType()), {}),
('hive-delim-text/simple-float-values', make_schema(DoubleType()), {}),
pytest.param('hive-delim-text/simple-float-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/simple-float-values', make_schema(DecimalType(38, 10)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
('hive-delim-text/simple-float-values', make_schema(IntegerType()), {}),
('hive-delim-text/extended-float-values', make_schema(IntegerType()), {}),
('hive-delim-text/extended-float-values', make_schema(FloatType()), {}),
('hive-delim-text/extended-float-values', make_schema(DoubleType()), {}),
pytest.param('hive-delim-text/extended-float-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(reason="GPU supports more valid values than CPU. "
"https://github.com/NVIDIA/spark-rapids/issues/7246")),
pytest.param('hive-delim-text/extended-float-values', make_schema(DecimalType(38, 10)), {},
marks=pytest.mark.xfail(reason="GPU supports more valid values than CPU. "
"https://github.com/NVIDIA/spark-rapids/issues/7246")),
# Custom datasets
('hive-delim-text/Acquisition_2007Q3', acq_schema, {}),
('hive-delim-text/Performance_2007Q3', perf_schema, {'serialization.null.format': ''}),
('hive-delim-text/Performance_2007Q3', perf_schema, {}),
('hive-delim-text/trucks-1', trucks_schema, {}),
('hive-delim-text/trucks-err', trucks_schema, {}),
# Date/Time
pytest.param('hive-delim-text/timestamp', timestamp_schema, {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/date', date_schema, {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
# Test that lines beginning with comments ('#') aren't skipped.
('hive-delim-text/comments', StructType([StructField("str", StringType()),
StructField("num", IntegerType()),
StructField("another_str", StringType())]), {}),
# Test that carriage returns ('\r'/'^M') are treated similarly to newlines ('\n')
('hive-delim-text/carriage-return', StructType([StructField("str", StringType())]), {}),
('hive-delim-text/carriage-return-err', StructType([StructField("str", StringType())]), {}),
], ids=idfn)
def test_basic_hive_text_read(std_input_path, name, schema, spark_tmp_table_factory, options):
assert_gpu_and_cpu_are_equal_collect(read_hive_text_sql(std_input_path + '/' + name,
schema, spark_tmp_table_factory, options),
conf=hive_text_enabled_conf)
hive_text_supported_gens = [
StringGen('(\\w| |\t|\ud720){0,10}', nullable=False),
StringGen('[aAbB ]{0,10}'),
StringGen('[nN][aA][nN]'),
StringGen('[+-]?[iI][nN][fF]([iI][nN][iI][tT][yY])?'),
byte_gen, short_gen, int_gen, long_gen, boolean_gen, date_gen,
float_gen,
FloatGen(no_nans=False),
double_gen,
DoubleGen(no_nans=False),
TimestampGen(),
]
def create_hive_text_table(spark, column_gen, text_table_name, data_path, fields="my_field"):
"""
Helper method to create a Hive Text table with contents from the specified
column generator.
:param spark: Spark context for the test
:param column_gen: Data generator for the table's column
:param text_table_name: (Temp) Name of the created Hive Text table
:param data_path: Data location for the created Hive Text table
:param fields: The fields composing the table to be created
"""
gen_df(spark, column_gen).repartition(1).createOrReplaceTempView("input_view")
spark.sql("DROP TABLE IF EXISTS " + text_table_name)
spark.sql("CREATE TABLE " + text_table_name + " STORED AS TEXTFILE " +
"LOCATION '" + data_path + "' " +
"AS SELECT " + fields + " FROM input_view")
def read_hive_text_table(spark, text_table_name, fields="my_field"):
"""
Helper method to read the contents of a Hive (Text) table.
:param spark: Spark context for the test
:param text_table_name: Name of the Hive (Text) table to be read
:param fields: The fields to be read from the specified table
"""
return spark.sql("SELECT " + fields + " FROM " + text_table_name)
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@approximate_float
@pytest.mark.parametrize('data_gen', hive_text_supported_gens, ids=idfn)
def test_hive_text_round_trip(spark_tmp_path, data_gen, spark_tmp_table_factory):
gen = StructGen([('my_field', data_gen)], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: create_hive_text_table(spark, gen, table_name, data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: read_hive_text_table(spark, table_name),
conf=hive_text_enabled_conf)
def create_hive_text_table_partitioned(spark, column_gen, text_table_name, data_path):
gen_df(spark, column_gen).repartition(1).createOrReplaceTempView("input_view")
spark.sql("DROP TABLE IF EXISTS " + text_table_name)
column_type = column_gen.children[0][1].data_type.simpleString() # Because StructGen([('my_field', gen)]).
spark.sql("CREATE TABLE " + text_table_name +
"( my_field " + column_type + ") "
"PARTITIONED BY (dt STRING) "
"STORED AS TEXTFILE "
"LOCATION '" + data_path + "' ")
spark.sql("INSERT OVERWRITE " + text_table_name + " PARTITION( dt='1' ) "
"SELECT my_field FROM input_view")
spark.sql("INSERT OVERWRITE " + text_table_name + " PARTITION( dt='2' ) "
"SELECT my_field FROM input_view")
def read_hive_text_table_partitions(spark, text_table_name, partition):
"""
Helper method to read the contents of a Hive (Text) table.
:param spark: Spark context for the test
:param text_table_name: Name of the Hive (Text) table to be read
:param partition: Partition selection string (e.g. "dt=1")
"""
return spark.sql("SELECT my_field FROM %s WHERE %s" % (text_table_name, partition))
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@approximate_float
@allow_non_gpu("EqualTo,IsNotNull,Literal") # Accounts for partition predicate: `WHERE dt='1'`
@pytest.mark.parametrize('data_gen', hive_text_supported_gens, ids=idfn)
def test_hive_text_round_trip_partitioned(spark_tmp_path, data_gen, spark_tmp_table_factory):
gen = StructGen([('my_field', data_gen)], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: create_hive_text_table_partitioned(spark, gen, table_name, data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: read_hive_text_table_partitions(spark, table_name, "dt='1'"),
conf=hive_text_enabled_conf)
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@approximate_float
@allow_non_gpu("EqualTo,IsNotNull,Literal,Or") # Accounts for partition predicate
@pytest.mark.parametrize('data_gen', hive_text_supported_gens, ids=idfn)
def test_hive_text_round_trip_two_partitions(spark_tmp_path, data_gen, spark_tmp_table_factory):
"""
Added to reproduce: https://github.com/NVIDIA/spark-rapids/issues/7383
"""
gen = StructGen([('my_field', data_gen)], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: create_hive_text_table_partitioned(spark, gen, table_name, data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: read_hive_text_table_partitions(spark, table_name, "dt='1' or dt='2'"),
conf=hive_text_enabled_conf)
hive_text_unsupported_gens = [
ArrayGen(string_gen),
StructGen([('int_field', int_gen), ('string_field', string_gen)]),
MapGen(StringGen(nullable=False), string_gen),
binary_gen,
StructGen([('b', byte_gen), ('i', int_gen), ('arr_of_i', ArrayGen(int_gen))]),
ArrayGen(StructGen([('b', byte_gen), ('i', int_gen), ('arr_of_i', ArrayGen(int_gen))]))
]
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@allow_non_gpu("org.apache.spark.sql.hive.execution.HiveTableScanExec")
@pytest.mark.parametrize('unsupported_gen', hive_text_unsupported_gens, ids=idfn)
def test_hive_text_fallback_for_unsupported_types(spark_tmp_path, unsupported_gen, spark_tmp_table_factory):
supported_gen = int_gen # Generator for 1 supported data type. (IntegerGen chosen arbitrarily.)
gen = StructGen([('my_supported_int_field', supported_gen),
('my_unsupported_field', unsupported_gen), ], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: create_hive_text_table(spark,
gen,
table_name,
data_path,
"my_supported_int_field, my_unsupported_field"))
assert_gpu_fallback_collect(
lambda spark: read_hive_text_table(spark, table_name, "my_unsupported_field"),
cpu_fallback_class_name=get_non_gpu_allowed()[0],
conf=hive_text_enabled_conf)
# GpuHiveTableScanExec cannot partially read only those columns that are of supported types.
# Even if the output-projection uses only supported types, the read should fall back to CPU
# if the table has even one column of an unsupported type.
assert_gpu_fallback_collect(
lambda spark: read_hive_text_table(spark, table_name, "my_supported_int_field"),
cpu_fallback_class_name=get_non_gpu_allowed()[0],
conf=hive_text_enabled_conf)
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@pytest.mark.parametrize('data_gen', [StringGen()], ids=idfn)
def test_hive_text_default_enabled(spark_tmp_path, data_gen, spark_tmp_table_factory):
gen = StructGen([('my_field', data_gen)], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: create_hive_text_table(spark, gen, table_name, data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: read_hive_text_table(spark, table_name),
conf={})
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@allow_non_gpu("org.apache.spark.sql.hive.execution.HiveTableScanExec")
@pytest.mark.parametrize('data_gen', [TimestampGen()], ids=idfn)
def test_custom_timestamp_formats_disabled(spark_tmp_path, data_gen, spark_tmp_table_factory):
"""
This is to test that the plugin falls back to CPU execution, in case a Hive delimited
text table is set up with a custom timestamp format, via the "timestamp.formats"
property.
Note that this property could be specified in either table properties,
or SerDe properties.
"""
gen = StructGen([('my_field', data_gen)], nullable=False)
data_path = spark_tmp_path + '/hive_text_table'
table_name = spark_tmp_table_factory.get()
from enum import Enum
class PropertyLocation(Enum):
TBLPROPERTIES = 1,
SERDEPROPERTIES = 2
def create_hive_table_with_custom_timestamp_format(spark, property_location):
gen_df(spark, gen).repartition(1).createOrReplaceTempView("input_view")
spark.sql("DROP TABLE IF EXISTS " + table_name)
spark.sql("CREATE TABLE " + table_name + " (my_field TIMESTAMP) "
"STORED AS TEXTFILE " +
"LOCATION '" + data_path + "' ")
spark.sql("ALTER TABLE " + table_name + " SET " +
("TBLPROPERTIES" if property_location == PropertyLocation.TBLPROPERTIES else "SERDEPROPERTIES") +
"('timestamp.formats'='yyyy-MM-dd HH:mm:ss.SSS')")
spark.sql("INSERT INTO TABLE " + table_name + " SELECT * FROM input_view")
with_cpu_session(lambda spark:
create_hive_table_with_custom_timestamp_format(spark, PropertyLocation.TBLPROPERTIES))
assert_gpu_fallback_collect(
lambda spark: read_hive_text_table(spark, table_name),
cpu_fallback_class_name=get_non_gpu_allowed()[0],
conf=hive_text_enabled_conf)
with_cpu_session(lambda spark:
create_hive_table_with_custom_timestamp_format(spark, PropertyLocation.SERDEPROPERTIES))
assert_gpu_fallback_collect(
lambda spark: read_hive_text_table(spark, table_name),
cpu_fallback_class_name=get_non_gpu_allowed()[0],
conf=hive_text_enabled_conf)
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text reads are disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@pytest.mark.parametrize('codec', ['BZip2Codec', # BZ2 compression, i.e. Splittable.
'DefaultCodec', # DEFLATE, i.e. Gzip, without headers. Unsplittable.
'GzipCodec']) # Gzip proper. Unsplittable.
def test_read_compressed_hive_text(spark_tmp_table_factory, codec):
"""
This tests whether compressed Hive Text tables are readable from spark-rapids.
For GZIP/DEFLATE compressed tables, spark-rapids should not attempt to split the input files.
Bzip2 compressed tables are splittable, and should remain readable.
"""
table_name = spark_tmp_table_factory.get()
def create_table_with_compressed_files(spark):
spark.range(100000)\
.selectExpr("id",
"cast(id as string) id_string")\
.repartition(1).write.format("hive").saveAsTable(table_name)
# Create Hive Text table with compression enabled.
with_cpu_session(create_table_with_compressed_files,
{"hive.exec.compress.output": "true",
"mapreduce.output.fileoutputformat.compress.codec":
"org.apache.hadoop.io.compress.{}".format(codec)})
# Attempt to read from table with very small (2KB) splits.
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.sql("SELECT COUNT(1) FROM {}".format(table_name)),
conf={"spark.sql.files.maxPartitionBytes": "2048b"}
)
# Hive Delimited Text writer tests follow.
TableWriteMode = Enum('TableWriteMode', ['CTAS', 'CreateThenWrite'])
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text is disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@approximate_float
@ignore_order(local=True)
@pytest.mark.parametrize('mode', [TableWriteMode.CTAS, TableWriteMode.CreateThenWrite])
@pytest.mark.parametrize('input_dir,schema,options', [
('hive-delim-text/simple-boolean-values', make_schema(BooleanType()), {}),
('hive-delim-text/simple-int-values', make_schema(ByteType()), {}),
('hive-delim-text/simple-int-values', make_schema(ShortType()), {}),
('hive-delim-text/simple-int-values', make_schema(IntegerType()), {}),
('hive-delim-text/simple-int-values', make_schema(LongType()), {}),
('hive-delim-text/simple-int-values', make_schema(FloatType()), {}),
('hive-delim-text/simple-int-values', make_schema(DoubleType()), {}),
('hive-delim-text/simple-int-values', make_schema(StringType()), {}),
pytest.param('hive-delim-text/simple-int-values', make_schema(DecimalType(10, 2)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/simple-int-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
# Floating Point.
('hive-delim-text/simple-float-values', make_schema(FloatType()), {}),
pytest.param('hive-delim-text/simple-float-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/simple-float-values', make_schema(DecimalType(38, 10)), {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
('hive-delim-text/simple-float-values', make_schema(IntegerType()), {}),
('hive-delim-text/extended-float-values', make_schema(IntegerType()), {}),
('hive-delim-text/extended-float-values', make_schema(FloatType()), {}),
('hive-delim-text/extended-float-values', make_schema(DoubleType()), {}),
pytest.param('hive-delim-text/extended-float-values', make_schema(DecimalType(10, 3)), {},
marks=pytest.mark.xfail(reason="GPU supports more valid values than CPU. "
"https://github.com/NVIDIA/spark-rapids/issues/7246")),
pytest.param('hive-delim-text/extended-float-values', make_schema(DecimalType(38, 10)), {},
marks=pytest.mark.xfail(reason="GPU supports more valid values than CPU. "
"https://github.com/NVIDIA/spark-rapids/issues/7246")),
# Custom datasets
('hive-delim-text/Acquisition_2007Q3', acq_schema, {}),
('hive-delim-text/Performance_2007Q3', perf_schema, {}),
('hive-delim-text/trucks-1', trucks_schema, {}),
('hive-delim-text/trucks-err', trucks_schema, {}),
# Date/Time
pytest.param('hive-delim-text/timestamp', timestamp_schema, {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
pytest.param('hive-delim-text/date', date_schema, {},
marks=pytest.mark.xfail(condition=is_spark_cdh(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7423")),
# Test that lines beginning with comments ('#') aren't skipped.
('hive-delim-text/comments', StructType([StructField("str", StringType()),
StructField("num", IntegerType()),
StructField("another_str", StringType())]), {}),
# Test that carriage returns ('\r'/'^M') are treated similarly to newlines ('\n')
('hive-delim-text/carriage-return', StructType([StructField("str", StringType())]), {}),
('hive-delim-text/carriage-return-err', StructType([StructField("str", StringType())]), {}),
], ids=idfn)
def test_basic_hive_text_write(std_input_path, input_dir, schema, spark_tmp_table_factory, mode, options):
# Configure table options, including schema.
if options is None:
options = {}
opts = options
if schema is not None:
opts = copy_and_update(options, {'schema': schema})
# Initialize data path.
data_path = std_input_path + "/" + input_dir
def create_input_table(spark):
input_table_name = spark_tmp_table_factory.get()
spark.catalog.createExternalTable(input_table_name, source='hive', path=data_path, **opts)
return input_table_name
input_table = with_cpu_session(create_input_table)
def write_table_sql(spark, table_name):
if mode == TableWriteMode.CTAS:
return [
"CREATE TABLE {} SELECT * FROM {}".format(table_name, input_table)
]
elif mode == TableWriteMode.CreateThenWrite:
return [
"CREATE TABLE {} LIKE {}".format(table_name, input_table),
"INSERT OVERWRITE TABLE {} "
" SELECT * FROM {} ".format(table_name, input_table)
]
assert_gpu_and_cpu_sql_writes_are_equal_collect(
spark_tmp_table_factory,
write_table_sql,
conf=hive_text_write_enabled_conf)
PartitionWriteMode = Enum('PartitionWriteMode', ['Static', 'Dynamic'])
@pytest.mark.skipif(is_spark_cdh(),
reason="Hive text is disabled on CDH, as per "
"https://github.com/NVIDIA/spark-rapids/pull/7628")
@ignore_order(local=True)
@pytest.mark.parametrize('mode', [PartitionWriteMode.Static, PartitionWriteMode.Dynamic])
def test_partitioned_hive_text_write(mode, spark_tmp_table_factory):
def create_input_table(spark):
tmp_input = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE " + tmp_input +
" (make STRING, model STRING, year INT, type STRING, comment STRING)" +
" STORED AS TEXTFILE")
spark.sql("INSERT INTO TABLE " + tmp_input + " VALUES " +
"('Ford', 'F-150', 2020, 'ICE', 'Popular' ),"
"('GMC', 'Sierra 1500', 1997, 'ICE', 'Older'),"
"('Chevy', 'D-Max', 2015, 'ICE', 'Isuzu?' ),"
"('Tesla', 'CyberTruck', 2025, 'Electric', 'BladeRunner'),"
"('Rivian', 'R1T', 2022, 'Electric', 'Heavy'),"
"('Jeep', 'Gladiator', 2024, 'Hybrid', 'Upcoming')")
return tmp_input
input_table = with_cpu_session(create_input_table)
def write_partitions_sql(spark, output_table):
if mode == PartitionWriteMode.Static:
return [
"CREATE TABLE {} "
" (make STRING, model STRING, year INT, comment STRING)"
" PARTITIONED BY (type STRING) STORED AS TEXTFILE".format(output_table),
"INSERT INTO TABLE {} PARTITION (type='ICE')"
" SELECT make, model, year, comment FROM {} "
" WHERE type='ICE'".format(output_table, input_table),
"INSERT OVERWRITE TABLE {} PARTITION (type='Electric')"
" SELECT make, model, year, comment FROM {} "
" WHERE type='Electric'".format(output_table, input_table),
# Second (over)write to the same "Electric" partition.
"INSERT OVERWRITE TABLE {} PARTITION (type='Electric')"
" SELECT make, model, year, comment FROM {} "
" WHERE type='Electric'".format(output_table, input_table),
"INSERT INTO TABLE " + output_table + " PARTITION (type='Hybrid')" +
" SELECT make, model, year, comment FROM " + input_table +
" WHERE type='Hybrid'",
]
elif mode == PartitionWriteMode.Dynamic:
return [
"CREATE TABLE " + output_table +
" (make STRING, model STRING, year INT, comment STRING)"
" PARTITIONED BY (type STRING) STORED AS TEXTFILE",
"INSERT OVERWRITE TABLE " + output_table +
" SELECT make, model, year, comment, type FROM " + input_table,
# Second (over)write to only the "Electric" partition.
"INSERT OVERWRITE TABLE " + output_table +
" SELECT make, model, year, comment, type FROM " + input_table +
" WHERE type = 'Electric'"
]
else:
raise Exception("Unsupported PartitionWriteMode {}".format(mode))
assert_gpu_and_cpu_sql_writes_are_equal_collect(
spark_tmp_table_factory,
write_partitions_sql,
conf={"hive.exec.dynamic.partition.mode": "nonstrict",
"spark.rapids.sql.format.hive.text.enabled": True,
"spark.rapids.sql.format.hive.text.write.enabled": True}
)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/hive_delimited_text_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from pyspark.sql.types import *
import pyspark.sql.functions as f
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_project_alias(data_gen):
dec = Decimal('123123123123123123123123123.456')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a').alias('col1'),
f.col('b').alias('col2'),
f.lit(dec)))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/project_lit_alias_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import exception
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error, assert_gpu_fallback_collect, assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from marks import ignore_order, incompat, approximate_float, allow_non_gpu
from pyspark.sql.types import *
from pyspark.sql.types import IntegralType
from spark_session import *
import pyspark.sql.functions as f
import pyspark.sql.utils
from datetime import timedelta
# No overflow gens here because we just focus on verifying the fallback to CPU when
# enabling ANSI mode. But overflows will fail the tests because CPU runs raise
# exceptions.
_no_overflow_multiply_gens_for_fallback = [
ByteGen(min_val = 1, max_val = 10, special_cases=[]),
ShortGen(min_val = 1, max_val = 100, special_cases=[]),
IntegerGen(min_val = 1, max_val = 1000, special_cases=[]),
LongGen(min_val = 1, max_val = 3000, special_cases=[])]
_no_overflow_multiply_gens = _no_overflow_multiply_gens_for_fallback + [
DecimalGen(10, 0),
DecimalGen(19, 0)]
_decimal_gen_7_7 = DecimalGen(precision=7, scale=7)
_decimal_gen_18_0 = DecimalGen(precision=18, scale=0)
_decimal_gen_18_3 = DecimalGen(precision=18, scale=3)
_decimal_gen_30_2 = DecimalGen(precision=30, scale=2)
_decimal_gen_36_5 = DecimalGen(precision=36, scale=5)
_decimal_gen_36_neg5 = DecimalGen(precision=36, scale=-5)
_decimal_gen_38_0 = DecimalGen(precision=38, scale=0)
_decimal_gen_38_10 = DecimalGen(precision=38, scale=10)
_decimal_gen_38_neg10 = DecimalGen(precision=38, scale=-10)
_arith_data_gens_diff_precision_scale_and_no_neg_scale_no_38_0 = [
decimal_gen_32bit, decimal_gen_64bit, _decimal_gen_18_0, decimal_gen_128bit,
_decimal_gen_30_2, _decimal_gen_36_5, _decimal_gen_38_10
]
_arith_data_gens_diff_precision_scale_and_no_neg_scale = \
_arith_data_gens_diff_precision_scale_and_no_neg_scale_no_38_0 + [_decimal_gen_38_0]
_arith_decimal_gens_no_neg_scale = _arith_data_gens_diff_precision_scale_and_no_neg_scale + [_decimal_gen_7_7]
_arith_decimal_gens = _arith_decimal_gens_no_neg_scale + [
decimal_gen_32bit_neg_scale, _decimal_gen_36_neg5, _decimal_gen_38_neg10
]
_arith_data_gens = numeric_gens + _arith_decimal_gens
_arith_data_gens_no_neg_scale = numeric_gens + _arith_decimal_gens_no_neg_scale
_arith_decimal_gens_no_neg_scale_38_0_overflow = \
_arith_data_gens_diff_precision_scale_and_no_neg_scale_no_38_0 + [
_decimal_gen_7_7,
pytest.param(_decimal_gen_38_0, marks=pytest.mark.skipif(
is_spark_330_or_later(), reason='This case overflows in Spark 3.3.0+'))]
def _get_overflow_df(spark, data, data_type, expr):
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize([data]),
StructType([StructField('a', data_type)])
).selectExpr(expr)
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_addition(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') + f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) + f.col('b'),
f.lit(None).cast(data_type) + f.col('a'),
f.col('b') + f.lit(None).cast(data_type),
f.col('a') + f.col('b')))
# If it will not overflow for multiply it is good for add too
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens, ids=idfn)
def test_addition_ansi_no_overflow(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') + f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) + f.col('b'),
f.lit(None).cast(data_type) + f.col('a'),
f.col('b') + f.lit(None).cast(data_type),
f.col('a') + f.col('b')),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_subtraction(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') - f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) - f.col('b'),
f.lit(None).cast(data_type) - f.col('a'),
f.col('b') - f.lit(None).cast(data_type),
f.col('a') - f.col('b')))
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 5),
DecimalGen(6, 4), DecimalGen(5, 4), DecimalGen(5, 3), DecimalGen(4, 2), DecimalGen(3, -2),
DecimalGen(16, 7), DecimalGen(19, 0), DecimalGen(30, 10)], ids=idfn)
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 3),
DecimalGen(10, -2), DecimalGen(15, 3), DecimalGen(30, 12), DecimalGen(3, -3),
DecimalGen(27, 7), DecimalGen(20, -3)], ids=idfn)
@pytest.mark.parametrize('addOrSub', ['+', '-'])
def test_addition_subtraction_mixed(lhs, rhs, addOrSub):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"a {addOrSub} b")
)
# If it will not overflow for multiply it is good for subtract too
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens, ids=idfn)
def test_subtraction_ansi_no_overflow(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') - f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) - f.col('b'),
f.lit(None).cast(data_type) - f.col('a'),
f.col('b') - f.lit(None).cast(data_type),
f.col('a') - f.col('b')),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', numeric_gens + [
decimal_gen_32bit_neg_scale, decimal_gen_32bit, _decimal_gen_7_7,
DecimalGen(precision=8, scale=8), decimal_gen_64bit, _decimal_gen_18_3,
_decimal_gen_38_10,
_decimal_gen_38_neg10
], ids=idfn)
def test_multiplication(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a'), f.col('b'),
f.col('a') * f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) * f.col('b'),
f.lit(None).cast(data_type) * f.col('a'),
f.col('b') * f.lit(None).cast(data_type),
f.col('a') * f.col('b')
))
@allow_non_gpu('ProjectExec', 'Alias', 'Multiply', 'Cast')
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens_for_fallback, ids=idfn)
def test_multiplication_fallback_when_ansi_enabled(data_gen):
assert_gpu_fallback_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') * f.col('b')),
'Multiply',
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [float_gen, double_gen, decimal_gen_32bit, DecimalGen(19, 0)], ids=idfn)
def test_multiplication_ansi_enabled(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') * f.lit(100).cast(data_type),
f.col('a') * f.col('b')),
conf=ansi_enabled_conf)
def test_multiplication_ansi_overflow():
exception_str = 'ArithmeticException'
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, DecimalGen(38, 0)).selectExpr("a * " + "9"*38 + " as ret").collect(),
ansi_enabled_conf,
exception_str)
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 5),
DecimalGen(6, 4), DecimalGen(5, 4), DecimalGen(5, 3), DecimalGen(4, 2), DecimalGen(3, -2),
DecimalGen(16, 7), DecimalGen(19, 0), DecimalGen(30, 10)], ids=idfn)
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 3),
DecimalGen(10, -2), DecimalGen(15, 3), DecimalGen(30, 12), DecimalGen(3, -3),
DecimalGen(27, 7), DecimalGen(20, -3)], ids=idfn)
def test_multiplication_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).select(
f.col('a') * f.col('b')))
@approximate_float # we should get the perfectly correct answer for floats except when casting a decimal to a float in some corner cases.
@pytest.mark.parametrize('lhs', [float_gen, double_gen], ids=idfn)
@pytest.mark.parametrize('rhs', [DecimalGen(6, 3), DecimalGen(10, -2), DecimalGen(15, 3)], ids=idfn)
def test_float_multiplication_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).select(
f.col('a') * f.col('b')),
conf={'spark.rapids.sql.castDecimalToFloat.enabled': 'true'})
@pytest.mark.parametrize('data_gen', [double_gen, decimal_gen_32bit_neg_scale, DecimalGen(6, 3),
DecimalGen(5, 5), DecimalGen(6, 0), DecimalGen(7, 4), DecimalGen(15, 0), DecimalGen(18, 0),
DecimalGen(17, 2), DecimalGen(16, 4), DecimalGen(38, 21), DecimalGen(21, 17), DecimalGen(3, -2)], ids=idfn)
def test_division(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') / f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) / f.col('b'),
f.lit(None).cast(data_type) / f.col('a'),
f.col('b') / f.lit(None).cast(data_type),
f.col('a') / f.col('b')))
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(4, 1), DecimalGen(5, 0), DecimalGen(5, 1), DecimalGen(10, 5)], ids=idfn)
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(5, 3), DecimalGen(4, 2), DecimalGen(1, -2), DecimalGen(16, 1)], ids=idfn)
def test_division_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).select(
f.col('a'), f.col('b'),
f.col('a') / f.col('b')))
# Spark has some problems with some decimal operations where it can try to generate a type that is invalid (scale > precision) which results in an error
# instead of increasing the precision. So we have a second test that deals with a few of these use cases
@pytest.mark.parametrize('rhs', [DecimalGen(30, 10), DecimalGen(28, 18)], ids=idfn)
@pytest.mark.parametrize('lhs', [DecimalGen(27, 7), DecimalGen(20, -3)], ids=idfn)
def test_division_mixed_larger_dec(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).select(
f.col('a'), f.col('b'),
f.col('a') / f.col('b')))
def test_special_decimal_division():
for precision in range(1, 39):
for scale in range(-3, precision + 1):
print("PRECISION " + str(precision) + " SCALE " + str(scale))
data_gen = DecimalGen(precision, scale)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, data_gen, data_gen).select(
f.col('a') / f.col('b')))
@approximate_float # we should get the perfectly correct answer for floats except when casting a decimal to a float in some corner cases.
@pytest.mark.parametrize('rhs', [float_gen, double_gen], ids=idfn)
@pytest.mark.parametrize('lhs', [DecimalGen(5, 3), DecimalGen(4, 2), DecimalGen(1, -2), DecimalGen(16, 1)], ids=idfn)
def test_float_division_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).select(
f.col('a') / f.col('b')),
conf={'spark.rapids.sql.castDecimalToFloat.enabled': 'true'})
@pytest.mark.parametrize('data_gen', integral_gens + [
decimal_gen_32bit, decimal_gen_64bit, _decimal_gen_7_7, _decimal_gen_18_3, _decimal_gen_30_2,
_decimal_gen_36_5, _decimal_gen_38_0], ids=idfn)
def test_int_division(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'a DIV cast(100 as {})'.format(string_type),
'cast(-12 as {}) DIV b'.format(string_type),
'cast(null as {}) DIV a'.format(string_type),
'b DIV cast(null as {})'.format(string_type),
'a DIV b'))
@pytest.mark.parametrize('lhs', [DecimalGen(6, 5), DecimalGen(5, 4), DecimalGen(3, -2), _decimal_gen_30_2], ids=idfn)
@pytest.mark.parametrize('rhs', [DecimalGen(13, 2), DecimalGen(6, 3), _decimal_gen_38_0,
pytest.param(_decimal_gen_36_neg5, marks=pytest.mark.skipif(not is_before_spark_340() or is_databricks113_or_later(), reason='SPARK-41207'))], ids=idfn)
def test_int_division_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(
'a DIV b'))
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_mod(data_gen):
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') % f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) % f.col('b'),
f.lit(None).cast(data_type) % f.col('a'),
f.col('b') % f.lit(None).cast(data_type),
f.col('a') % f.col('b')))
# pmod currently falls back for Decimal(precision=38)
# https://github.com/NVIDIA/spark-rapids/issues/6336
# only testing numeric_gens because of https://github.com/NVIDIA/spark-rapids/issues/7553
_pmod_gens = numeric_gens
test_pmod_fallback_decimal_gens = [ decimal_gen_32bit, decimal_gen_64bit, _decimal_gen_18_0, decimal_gen_128bit,
_decimal_gen_30_2, _decimal_gen_36_5,
DecimalGen(precision=37, scale=0), DecimalGen(precision=37, scale=10),
_decimal_gen_7_7]
@pytest.mark.parametrize('data_gen', _pmod_gens, ids=idfn)
def test_pmod(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'pmod(a, cast(100 as {}))'.format(string_type),
'pmod(cast(-12 as {}), b)'.format(string_type),
'pmod(cast(null as {}), a)'.format(string_type),
'pmod(b, cast(null as {}))'.format(string_type),
'pmod(a, b)'))
@allow_non_gpu("ProjectExec", "Pmod")
@pytest.mark.parametrize('data_gen', test_pmod_fallback_decimal_gens + [_decimal_gen_38_0, _decimal_gen_38_10], ids=idfn)
def test_pmod_fallback(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_fallback_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'pmod(a, cast(100 as {}))'.format(string_type),
'pmod(cast(-12 as {}), b)'.format(string_type),
'pmod(cast(null as {}), a)'.format(string_type),
'pmod(b, cast(null as {}))'.format(string_type),
'pmod(a, b)'),
"Pmod")
# test pmod(Long.MinValue, -1) = 0 and Long.MinValue % -1 = 0, should not throw
def test_mod_pmod_long_min_value():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.createDataFrame([(-9223372036854775808,)], ["a"]).selectExpr(
'pmod(a, -1L)',
'a % -1L'),
ansi_enabled_conf)
# pmod currently falls back for Decimal(precision=38)
# https://github.com/NVIDIA/spark-rapids/issues/6336
@pytest.mark.xfail(reason='Decimals type disabled https://github.com/NVIDIA/spark-rapids/issues/7553')
@pytest.mark.parametrize('data_gen', [decimal_gen_32bit, decimal_gen_64bit, _decimal_gen_18_0,
decimal_gen_128bit, _decimal_gen_30_2, _decimal_gen_36_5], ids=idfn)
@pytest.mark.parametrize('overflow_exp', [
'pmod(a, cast(0 as {}))',
'pmod(cast(-12 as {}), cast(0 as {}))',
'a % (cast(0 as {}))',
'cast(-12 as {}) % cast(0 as {})'], ids=idfn)
def test_mod_pmod_by_zero(data_gen, overflow_exp):
string_type = to_cast_string(data_gen.data_type)
if is_before_spark_320():
exception_str = 'java.lang.ArithmeticException: divide by zero'
elif is_before_spark_330():
exception_str = 'SparkArithmeticException: divide by zero'
elif is_before_spark_340() and not is_databricks113_or_later():
exception_str = 'SparkArithmeticException: Division by zero'
else:
exception_str = 'SparkArithmeticException: [DIVIDE_BY_ZERO] Division by zero'
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
overflow_exp.format(string_type, string_type)).collect(),
ansi_enabled_conf,
exception_str)
def test_cast_neg_to_decimal_err():
# -12 cannot be represented as decimal(7,7)
data_gen = _decimal_gen_7_7
if is_before_spark_322():
exception_content = "Decimal(compact,-120000000,20,0}) cannot be represented as Decimal(7, 7)"
elif is_databricks113_or_later() or not is_before_spark_340():
exception_content = "[NUMERIC_VALUE_OUT_OF_RANGE] -12 cannot be represented as Decimal(7, 7)"
else:
exception_content = "Decimal(compact, -120000000, 20, 0) cannot be represented as Decimal(7, 7)"
if is_before_spark_330() and not is_databricks104_or_later():
exception_type = "java.lang.ArithmeticException: "
elif not is_before_spark_340():
exception_type = "pyspark.errors.exceptions.captured.ArithmeticException: "
else:
exception_type = "org.apache.spark.SparkArithmeticException: "
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'cast(-12 as {})'.format(to_cast_string(data_gen.data_type))).collect(),
ansi_enabled_conf,
exception_type + exception_content)
@pytest.mark.parametrize('data_gen', _pmod_gens, ids=idfn)
def test_mod_pmod_by_zero_not_ansi(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'pmod(a, cast(0 as {}))'.format(string_type),
'pmod(cast(-12 as {}), cast(0 as {}))'.format(string_type, string_type)),
{'spark.sql.ansi.enabled': 'false'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'a % (cast(0 as {}))'.format(string_type),
'cast(-12 as {}) % cast(0 as {})'.format(string_type, string_type)),
{'spark.sql.ansi.enabled': 'false'})
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 5),
DecimalGen(6, 4), DecimalGen(5, 4), DecimalGen(5, 3), DecimalGen(4, 2), DecimalGen(3, -2),
DecimalGen(16, 7), DecimalGen(19, 0), DecimalGen(30, 10)], ids=idfn)
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen, DecimalGen(6, 3),
DecimalGen(10, -2), DecimalGen(15, 3), DecimalGen(30, 12), DecimalGen(3, -3),
DecimalGen(27, 7), DecimalGen(20, -3)], ids=idfn)
def test_mod_mixed(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"a % b"))
# @pytest.mark.skipif(not is_databricks113_or_later() and not is_spark_340_or_later(), reason="https://github.com/NVIDIA/spark-rapids/issues/8330")
@pytest.mark.parametrize('lhs', [DecimalGen(38,0), DecimalGen(37,2), DecimalGen(38,5), DecimalGen(38,-10), DecimalGen(38,7)], ids=idfn)
@pytest.mark.parametrize('rhs', [DecimalGen(27,7), DecimalGen(30,10), DecimalGen(38,1), DecimalGen(36,0), DecimalGen(28,-7)], ids=idfn)
def test_mod_mixed_decimal128(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr("a", "b", f"a % b"))
# Split into 4 tests to permute https://github.com/NVIDIA/spark-rapids/issues/7553 failures
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen], ids=idfn)
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen], ids=idfn)
def test_pmod_mixed_numeric(lhs, rhs):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"pmod(a, b)"))
@allow_non_gpu("ProjectExec", "Pmod")
@pytest.mark.parametrize('lhs', [DecimalGen(6, 5), DecimalGen(6, 4), DecimalGen(5, 4), DecimalGen(5, 3),
DecimalGen(4, 2), DecimalGen(3, -2), DecimalGen(16, 7), DecimalGen(19, 0), DecimalGen(30, 10)
], ids=idfn)
@pytest.mark.parametrize('rhs', [byte_gen, short_gen, int_gen, long_gen], ids=idfn)
def test_pmod_mixed_decimal_lhs(lhs, rhs):
assert_gpu_fallback_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"pmod(a, b)"),
"Pmod")
@allow_non_gpu("ProjectExec", "Pmod")
@pytest.mark.parametrize('lhs', [byte_gen, short_gen, int_gen, long_gen], ids=idfn)
@pytest.mark.parametrize('rhs', [DecimalGen(6, 3), DecimalGen(10, -2), DecimalGen(15, 3),
DecimalGen(30, 12), DecimalGen(3, -3), DecimalGen(27, 7), DecimalGen(20, -3)
], ids=idfn)
def test_pmod_mixed_decimal_rhs(lhs, rhs):
assert_gpu_fallback_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"pmod(a, b)"),
"Pmod")
@allow_non_gpu("ProjectExec", "Pmod")
@pytest.mark.parametrize('lhs', [DecimalGen(6, 5), DecimalGen(6, 4), DecimalGen(5, 4), DecimalGen(5, 3),
DecimalGen(4, 2), DecimalGen(3, -2), DecimalGen(16, 7), DecimalGen(19, 0), DecimalGen(30, 10)
], ids=idfn)
@pytest.mark.parametrize('rhs', [DecimalGen(6, 3), DecimalGen(10, -2), DecimalGen(15, 3),
DecimalGen(30, 12), DecimalGen(3, -3), DecimalGen(27, 7), DecimalGen(20, -3)
], ids=idfn)
def test_pmod_mixed_decimal(lhs, rhs):
assert_gpu_fallback_collect(
lambda spark : two_col_df(spark, lhs, rhs).selectExpr(f"pmod(a, b)"),
"Pmod")
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_signum(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('signum(a)'))
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_unary_minus(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('-a'))
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens + [float_gen, double_gen] + _arith_decimal_gens, ids=idfn)
def test_unary_minus_ansi_no_overflow(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('-a'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_type,value', [
(LongType(), LONG_MIN),
(IntegerType(), INT_MIN),
(ShortType(), SHORT_MIN),
(ByteType(), BYTE_MIN)], ids=idfn)
def test_unary_minus_ansi_overflow(data_type, value):
"""
We don't check the error messages because they are different on CPU and GPU.
CPU: {name of the data type} overflow.
GPU: One or more rows overflow for {name of the operation} operation.
"""
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df(spark, [value], data_type, '-a').collect(),
conf=ansi_enabled_conf,
error_message='java.lang.ArithmeticException' if is_before_spark_330() else \
'org.apache.spark.SparkArithmeticException')
# This just ends up being a pass through. There is no good way to force
# a unary positive into a plan, because it gets optimized out, but this
# verifies that we can handle it.
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_unary_positive(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('+a'))
@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
def test_abs(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('abs(a)'))
# ANSI is ignored for abs prior to 3.2.0, but still okay to test it a little more.
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens + [float_gen, double_gen] + _arith_decimal_gens, ids=idfn)
def test_abs_ansi_no_overflow(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('abs(a)'),
conf=ansi_enabled_conf)
# Only run this test for Spark v3.2.0 and later to verify abs will
# throw exceptions for overflow when ANSI mode is enabled.
@pytest.mark.skipif(is_before_spark_320(), reason='SPARK-33275')
@pytest.mark.parametrize('data_type,value', [
(LongType(), LONG_MIN),
(IntegerType(), INT_MIN),
(ShortType(), SHORT_MIN),
(ByteType(), BYTE_MIN)], ids=idfn)
def test_abs_ansi_overflow(data_type, value):
"""
We don't check the error messages because they are different on CPU and GPU.
CPU: {name of the data type} overflow.
GPU: One or more rows overflow for abs operation.
"""
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df(spark, [value], data_type, 'abs(a)').collect(),
conf=ansi_enabled_conf,
error_message='java.lang.ArithmeticException' if is_before_spark_330() else \
'org.apache.spark.SparkArithmeticException')
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_asin(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('asin(a)'))
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_sqrt(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('sqrt(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_hypot(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'hypot(a, b)',
))
@pytest.mark.parametrize('data_gen', double_n_long_gens + _arith_decimal_gens_no_neg_scale + [DecimalGen(30, 15)], ids=idfn)
def test_floor(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('floor(a)'))
@pytest.mark.skipif(is_before_spark_330(), reason='scale parameter in Floor function is not supported before Spark 3.3.0')
@pytest.mark.parametrize('data_gen', double_n_long_gens + _arith_decimal_gens_no_neg_scale, ids=idfn)
def test_floor_scale_zero(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('floor(a, 0)'),
conf={'spark.rapids.sql.castFloatToDecimal.enabled':'true'})
@pytest.mark.skipif(is_before_spark_330(), reason='scale parameter in Floor function is not supported before Spark 3.3.0')
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', double_n_long_gens + _arith_decimal_gens_no_neg_scale_38_0_overflow, ids=idfn)
def test_floor_scale_nonzero(data_gen):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('floor(a, -1)'), 'RoundFloor')
@pytest.mark.parametrize('data_gen', double_n_long_gens + _arith_decimal_gens_no_neg_scale + [DecimalGen(30, 15)], ids=idfn)
def test_ceil(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('ceil(a)'))
@pytest.mark.skipif(is_before_spark_330(), reason='scale parameter in Ceil function is not supported before Spark 3.3.0')
@pytest.mark.parametrize('data_gen', double_n_long_gens + _arith_decimal_gens_no_neg_scale, ids=idfn)
def test_ceil_scale_zero(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('ceil(a, 0)'),
conf={'spark.rapids.sql.castFloatToDecimal.enabled':'true'})
@pytest.mark.parametrize('data_gen', [_decimal_gen_36_neg5, _decimal_gen_38_neg10], ids=idfn)
def test_floor_ceil_overflow(data_gen):
exception_type = "java.lang.ArithmeticException" if is_before_spark_330() and not is_databricks104_or_later() \
else "SparkArithmeticException"
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr('floor(a)').collect(),
conf={},
error_message=exception_type)
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr('ceil(a)').collect(),
conf={},
error_message=exception_type)
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_rint(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('rint(a)'))
@pytest.mark.parametrize('data_gen', int_n_long_gens, ids=idfn)
def test_shift_left(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
# The version of shiftLeft exposed to dataFrame does not take a column for num bits
lambda spark : two_col_df(spark, data_gen, IntegerGen()).selectExpr(
'shiftleft(a, cast(12 as INT))',
'shiftleft(cast(-12 as {}), b)'.format(string_type),
'shiftleft(cast(null as {}), b)'.format(string_type),
'shiftleft(a, cast(null as INT))',
'shiftleft(a, b)'))
@pytest.mark.parametrize('data_gen', int_n_long_gens, ids=idfn)
def test_shift_right(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
# The version of shiftRight exposed to dataFrame does not take a column for num bits
lambda spark : two_col_df(spark, data_gen, IntegerGen()).selectExpr(
'shiftright(a, cast(12 as INT))',
'shiftright(cast(-12 as {}), b)'.format(string_type),
'shiftright(cast(null as {}), b)'.format(string_type),
'shiftright(a, cast(null as INT))',
'shiftright(a, b)'))
@pytest.mark.parametrize('data_gen', int_n_long_gens, ids=idfn)
def test_shift_right_unsigned(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
# The version of shiftRightUnsigned exposed to dataFrame does not take a column for num bits
lambda spark : two_col_df(spark, data_gen, IntegerGen()).selectExpr(
'shiftrightunsigned(a, cast(12 as INT))',
'shiftrightunsigned(cast(-12 as {}), b)'.format(string_type),
'shiftrightunsigned(cast(null as {}), b)'.format(string_type),
'shiftrightunsigned(a, cast(null as INT))',
'shiftrightunsigned(a, b)'))
_arith_data_gens_for_round = numeric_gens + _arith_decimal_gens_no_neg_scale_38_0_overflow + [
decimal_gen_32bit_neg_scale,
DecimalGen(precision=15, scale=-8),
DecimalGen(precision=30, scale=-5),
pytest.param(_decimal_gen_36_neg5, marks=pytest.mark.skipif(
is_spark_330_or_later(), reason='This case overflows in Spark 3.3.0+')),
pytest.param(_decimal_gen_38_neg10, marks=pytest.mark.skipif(
is_spark_330_or_later(), reason='This case overflows in Spark 3.3.0+'))
]
@incompat
@approximate_float
@pytest.mark.parametrize('data_gen', _arith_data_gens_for_round, ids=idfn)
def test_decimal_bround(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'bround(a)',
'bround(1.234, 2)',
'bround(a, -1)',
'bround(a, 1)',
'bround(a, 2)',
'bround(a, 10)'))
@incompat
@approximate_float
@pytest.mark.parametrize('data_gen', _arith_data_gens_for_round, ids=idfn)
def test_decimal_round(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'round(a)',
'round(1.234, 2)',
'round(a, -1)',
'round(a, 1)',
'round(a, 2)',
'round(a, 10)'))
@incompat
@approximate_float
@pytest.mark.parametrize('data_gen', [int_gen], ids=idfn)
def test_illegal_args_round(data_gen):
def check_analysis_exception(spark, sql_text):
try:
gen_df(spark, [("a", data_gen), ("b", int_gen)], length=10).selectExpr(sql_text)
raise Exception("round/bround should not plan with invalid arguments %s" % sql_text)
except pyspark.sql.utils.AnalysisException as e:
pass
def doit(spark):
check_analysis_exception(spark, "round(1.2345, b)")
check_analysis_exception(spark, "round(a, b)")
check_analysis_exception(spark, "bround(1.2345, b)")
check_analysis_exception(spark, "bround(a, b)")
with_cpu_session(lambda spark: doit(spark))
with_gpu_session(lambda spark: doit(spark))
@incompat
@approximate_float
def test_non_decimal_round_overflow():
gen = StructGen([('byte_c', byte_gen), ('short_c', short_gen),
('int_c', int_gen), ('long_c', long_gen),
('float_c', float_gen), ('double_c', double_gen)], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'round(byte_c, -2)', 'round(byte_c, -3)',
'round(short_c, -4)', 'round(short_c, -5)',
'round(int_c, -9)', 'round(int_c, -10)',
'round(long_c, -19)', 'round(long_c, -20)',
'round(float_c, -38)', 'round(float_c, -39)',
'round(float_c, 38)', 'round(float_c, 39)',
'round(double_c, -308)', 'round(double_c, -309)',
'round(double_c, 308)', 'round(double_c, 309)',
'bround(byte_c, -2)', 'bround(byte_c, -3)',
'bround(short_c, -4)', 'bround(short_c, -5)',
'bround(int_c, -9)', 'bround(int_c, -10)',
'bround(long_c, -19)', 'bround(long_c, -20)',
'bround(float_c, -38)', 'bround(float_c, -39)',
'bround(float_c, 38)', 'bround(float_c, 39)',
'bround(double_c, -308)', 'bround(double_c, -309)',
'bround(double_c, 308)', 'bround(double_c, 309)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_cbrt(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('cbrt(a)'))
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_bit_and(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'a & cast(100 as {})'.format(string_type),
'cast(-12 as {}) & b'.format(string_type),
'cast(null as {}) & a'.format(string_type),
'b & cast(null as {})'.format(string_type),
'a & b'))
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_bit_or(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'a | cast(100 as {})'.format(string_type),
'cast(-12 as {}) | b'.format(string_type),
'cast(null as {}) | a'.format(string_type),
'b | cast(null as {})'.format(string_type),
'a | b'))
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_bit_xor(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'a ^ cast(100 as {})'.format(string_type),
'cast(-12 as {}) ^ b'.format(string_type),
'cast(null as {}) ^ a'.format(string_type),
'b ^ cast(null as {})'.format(string_type),
'a ^ b'))
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_bit_not(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('~a'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_radians(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('radians(a)'))
# Spark's degrees will overflow on large values in jdk 8 or below
@approximate_float
@pytest.mark.skipif(get_java_major_version() <= 8, reason="requires jdk 9 or higher")
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_degrees(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('degrees(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', [float_gen], ids=idfn)
def test_degrees_small(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('degrees(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_cos(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('cos(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_acos(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('acos(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_cosh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('cosh(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_acosh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('acosh(a)'))
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_gen', [DoubleGen(min_exp=-20, max_exp=20)], ids=idfn)
def test_columnar_acosh_improved(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('acosh(a)'),
{'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_sin(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('sin(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_sinh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('sinh(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_asin(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('asin(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_asinh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('asinh(a)'))
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_gen', [DoubleGen(min_exp=-20, max_exp=20)], ids=idfn)
def test_columnar_asinh_improved(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('asinh(a)'),
{'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_tan(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('tan(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_atan(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('atan(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_atanh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('atanh(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_tanh(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('tanh(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_cot(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('cot(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_exp(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('exp(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_expm1(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('expm1(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_log(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('log(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_log1p(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('log1p(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_log2(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('log2(a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_log10(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('log10(a)'))
@approximate_float
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/89')
def test_logarithm():
# For the 'b' field include a lot more values that we would expect customers to use as a part of a log
data_gen = [('a', DoubleGen()),('b', DoubleGen().with_special_case(lambda rand: float(rand.randint(-16, 16)), weight=100.0))]
string_type = 'DOUBLE'
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, data_gen).selectExpr(
'log(a, cast(100 as {}))'.format(string_type),
'log(cast(-12 as {}), b)'.format(string_type),
'log(cast(null as {}), b)'.format(string_type),
'log(a, cast(null as {}))'.format(string_type),
'log(a, b)'))
@approximate_float
def test_scalar_pow():
# For the 'b' field include a lot more values that we would expect customers to use as a part of a pow
data_gen = [('a', DoubleGen()),('b', DoubleGen().with_special_case(lambda rand: float(rand.randint(-16, 16)), weight=100.0))]
string_type = 'DOUBLE'
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, data_gen).selectExpr(
'pow(a, cast(7 as {}))'.format(string_type),
'pow(cast(-12 as {}), b)'.format(string_type),
'pow(cast(null as {}), a)'.format(string_type),
'pow(b, cast(null as {}))'.format(string_type)))
@approximate_float
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/89')
@pytest.mark.parametrize('data_gen', double_gens, ids=idfn)
def test_columnar_pow(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr('pow(a, b)'))
@pytest.mark.parametrize('data_gen', all_basic_gens + _arith_decimal_gens, ids=idfn)
def test_least(data_gen):
num_cols = 20
s1 = gen_scalar(data_gen, force_no_nulls=not isinstance(data_gen, NullGen))
# we want lots of nulls
gen = StructGen([('_c' + str(x), data_gen.copy_special_case(None, weight=100.0))
for x in range(0, num_cols)], nullable=False)
command_args = [f.col('_c' + str(x)) for x in range(0, num_cols)]
command_args.append(s1)
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen).select(
f.least(*command_args)))
@pytest.mark.parametrize('data_gen', all_basic_gens + _arith_decimal_gens, ids=idfn)
def test_greatest(data_gen):
num_cols = 20
s1 = gen_scalar(data_gen, force_no_nulls=not isinstance(data_gen, NullGen))
# we want lots of nulls
gen = StructGen([('_c' + str(x), data_gen.copy_special_case(None, weight=100.0))
for x in range(0, num_cols)], nullable=False)
command_args = [f.col('_c' + str(x)) for x in range(0, num_cols)]
command_args.append(s1)
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen).select(
f.greatest(*command_args)))
def _test_div_by_zero(ansi_mode, expr, is_lit=False):
ansi_conf = {'spark.sql.ansi.enabled': ansi_mode == 'ansi'}
data_gen = lambda spark: two_col_df(spark, IntegerGen(), IntegerGen(min_val=0, max_val=0), length=1)
div_by_zero_func = lambda spark: data_gen(spark).selectExpr(expr)
if is_before_spark_320():
err_message = 'java.lang.ArithmeticException: divide by zero'
elif is_before_spark_330():
err_message = 'SparkArithmeticException: divide by zero'
elif is_before_spark_340() and not is_databricks113_or_later():
err_message = 'SparkArithmeticException: Division by zero'
else:
exception_type = 'SparkArithmeticException: ' \
if not is_lit else "pyspark.errors.exceptions.captured.ArithmeticException: "
err_message = exception_type + "[DIVIDE_BY_ZERO] Division by zero"
if ansi_mode == 'ansi':
assert_gpu_and_cpu_error(df_fun=lambda spark: div_by_zero_func(spark).collect(),
conf=ansi_conf,
error_message=err_message)
else:
assert_gpu_and_cpu_are_equal_collect(div_by_zero_func, ansi_conf)
@pytest.mark.parametrize('expr', ['a/0', 'a/b'])
@pytest.mark.parametrize('ansi', [True, False])
def test_div_by_zero(expr, ansi):
_test_div_by_zero(ansi_mode=ansi, expr=expr)
# We want to test literals separate from expressions because Spark 3.4 throws different exceptions
@pytest.mark.parametrize('ansi', [True, False])
def test_div_by_zero_literal(ansi):
_test_div_by_zero(ansi_mode=ansi, expr='1/0', is_lit=True)
def _get_div_overflow_df(spark, expr):
return spark.createDataFrame(
[(LONG_MIN, -1)],
['a', 'b']
).selectExpr(expr)
def _div_overflow_exception_when(expr, ansi_enabled, is_lit=False):
ansi_conf = {'spark.sql.ansi.enabled': ansi_enabled}
err_exp = 'java.lang.ArithmeticException' if is_before_spark_330() else \
'org.apache.spark.SparkArithmeticException' \
if not is_lit or not is_spark_340_or_later() else "pyspark.errors.exceptions.captured.ArithmeticException"
err_mess = ': Overflow in integral divide' \
if is_before_spark_340() and not is_databricks113_or_later() else \
': [ARITHMETIC_OVERFLOW] Overflow in integral divide'
if ansi_enabled:
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_div_overflow_df(spark, expr).collect(),
conf=ansi_conf,
error_message=err_exp + err_mess)
else:
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: _get_div_overflow_df(spark, expr),
conf=ansi_conf)
# Only run this test for Spark v3.2.0 and later to verify IntegralDivide will
# throw exceptions for overflow when ANSI mode is enabled.
@pytest.mark.skipif(is_before_spark_320(), reason='https://github.com/apache/spark/pull/32260')
@pytest.mark.parametrize('expr', ['a DIV CAST(-1 AS INT)', 'a DIV b'])
@pytest.mark.parametrize('ansi_enabled', [False, True])
def test_div_overflow_exception_when_ansi(expr, ansi_enabled):
_div_overflow_exception_when(expr, ansi_enabled)
# Only run this test for Spark v3.2.0 and later to verify IntegralDivide will
# throw exceptions for overflow when ANSI mode is enabled.
# We have split this test from test_div_overflow_exception_when_ansi because Spark 3.4
# throws a different exception for literals
@pytest.mark.skipif(is_before_spark_320(), reason='https://github.com/apache/spark/pull/32260')
@pytest.mark.parametrize('expr', ['CAST(-9223372036854775808L as LONG) DIV -1'])
@pytest.mark.parametrize('ansi_enabled', [False, True])
def test_div_overflow_exception_when_ansi_literal(expr, ansi_enabled):
_div_overflow_exception_when(expr, ansi_enabled, is_lit=True)
# Only run this test before Spark v3.2.0 to verify IntegralDivide will NOT
# throw exceptions for overflow even ANSI mode is enabled.
@pytest.mark.skipif(not is_before_spark_320(), reason='https://github.com/apache/spark/pull/32260')
@pytest.mark.parametrize('expr', ['CAST(-9223372036854775808L as LONG) DIV -1', 'a DIV CAST(-1 AS INT)', 'a DIV b'])
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_div_overflow_no_exception_when_ansi(expr, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: _get_div_overflow_df(spark, expr),
conf={'spark.sql.ansi.enabled': ansi_enabled})
_data_type_expr_for_add_overflow = [
([127], ByteType(), 'a + 1Y'),
([-128], ByteType(), '-1Y + a'),
([32767], ShortType(), 'a + 1S'),
([-32768], ShortType(), '-1S + a'),
([2147483647], IntegerType(), 'a + 1'),
([-2147483648], IntegerType(), '-1 + a'),
([9223372036854775807], LongType(), 'a + 1L'),
([-9223372036854775808], LongType(), '-1L + a'),
([3.4028235E38], FloatType(), 'a + a'),
([-3.4028235E38], FloatType(), 'a + a'),
([1.7976931348623157E308], DoubleType(), 'a + a'),
([-1.7976931348623157E308], DoubleType(), 'a + a'),
([Decimal('-' + '9' * 38)], DecimalType(38,0), 'a + -1'),
([Decimal('-' + '9' * 38)], DecimalType(38,0), 'a + a'),
([Decimal('9' * 38)], DecimalType(38,0), 'a + 1'),
([Decimal('9' * 38)], DecimalType(38,0), 'a + 1')]
@pytest.mark.parametrize('data,tp,expr', _data_type_expr_for_add_overflow, ids=idfn)
def test_add_overflow_with_ansi_enabled(data, tp, expr):
if isinstance(tp, IntegralType):
assert_gpu_and_cpu_error(
lambda spark: _get_overflow_df(spark, data, tp, expr).collect(),
conf=ansi_enabled_conf,
error_message='java.lang.ArithmeticException' if is_before_spark_330() else 'SparkArithmeticException')
elif isinstance(tp, DecimalType):
assert_gpu_and_cpu_error(
lambda spark: _get_overflow_df(spark, data, tp, expr).collect(),
conf=ansi_enabled_conf,
error_message='')
else:
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: _get_overflow_df(spark, data, tp, expr),
conf=ansi_enabled_conf)
_data_type_expr_for_sub_overflow = [
([-128], ByteType(), 'a - 1Y'),
([-32768], ShortType(), 'a -1S'),
([-2147483648], IntegerType(), 'a - 1'),
([-9223372036854775808], LongType(), 'a - 1L'),
([-3.4028235E38], FloatType(), 'a - cast(1.0 as float)'),
([-1.7976931348623157E308], DoubleType(), 'a - 1.0'),
([Decimal('-' + '9' * 38)], DecimalType(38,0), 'a - 1'),
([Decimal('-' + '9' * 38)], DecimalType(38,0), 'a - (-a)')
]
@pytest.mark.parametrize('data,tp,expr', _data_type_expr_for_sub_overflow, ids=idfn)
def test_subtraction_overflow_with_ansi_enabled(data, tp, expr):
if isinstance(tp, IntegralType):
assert_gpu_and_cpu_error(
lambda spark: _get_overflow_df(spark, data, tp, expr).collect(),
conf=ansi_enabled_conf,
error_message='java.lang.ArithmeticException' if is_before_spark_330() else 'SparkArithmeticException')
elif isinstance(tp, DecimalType):
assert_gpu_and_cpu_error(
lambda spark: _get_overflow_df(spark, data, tp, expr).collect(),
conf=ansi_enabled_conf,
error_message='')
else:
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: _get_overflow_df(spark, data, tp, expr),
conf=ansi_enabled_conf)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_unary_minus_day_time_interval(ansi_enabled):
DAY_TIME_GEN_NO_OVER_FLOW = DayTimeIntervalGen(min_value=timedelta(days=-2000*365), max_value=timedelta(days=3000*365))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, DAY_TIME_GEN_NO_OVER_FLOW).selectExpr('-a'),
conf={'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_unary_minus_ansi_overflow_day_time_interval(ansi_enabled):
"""
We don't check the error messages because they are different on CPU and GPU.
CPU: long overflow.
GPU: One or more rows overflow for minus operation.
"""
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df(spark, [timedelta(microseconds=LONG_MIN)], DayTimeIntervalType(), '-a').collect(),
conf={'spark.sql.ansi.enabled': ansi_enabled},
error_message='SparkArithmeticException')
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_abs_ansi_no_overflow_day_time_interval(ansi_enabled):
DAY_TIME_GEN_NO_OVER_FLOW = DayTimeIntervalGen(min_value=timedelta(days=-2000*365), max_value=timedelta(days=3000*365))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, DAY_TIME_GEN_NO_OVER_FLOW).selectExpr('abs(a)'),
conf={'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_abs_ansi_overflow_day_time_interval(ansi_enabled):
"""
Check the error message only when ANSI mode is false because they are different on CPU and GPU.
CPU: long overflow.
GPU: One or more rows overflow for abs operation.
"""
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df(spark, [timedelta(microseconds=LONG_MIN)], DayTimeIntervalType(), 'abs(a)').collect(),
conf={'spark.sql.ansi.enabled': ansi_enabled},
error_message='' if ansi_enabled else 'SparkArithmeticException')
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_addition_day_time_interval(ansi_enabled):
DAY_TIME_GEN_NO_OVER_FLOW = DayTimeIntervalGen(min_value=timedelta(days=-2000*365), max_value=timedelta(days=3000*365))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, DAY_TIME_GEN_NO_OVER_FLOW, DAY_TIME_GEN_NO_OVER_FLOW).select(
f.col('a') + f.col('b')),
conf={'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_add_overflow_with_ansi_enabled_day_time_interval(ansi_enabled):
assert_gpu_and_cpu_error(
df_fun=lambda spark: spark.createDataFrame(
SparkContext.getOrCreate().parallelize([(timedelta(microseconds=LONG_MAX), timedelta(microseconds=10)),]),
StructType([StructField('a', DayTimeIntervalType()), StructField('b', DayTimeIntervalType())])
).selectExpr('a + b').collect(),
conf={'spark.sql.ansi.enabled': ansi_enabled},
error_message='SparkArithmeticException')
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_subtraction_day_time_interval(ansi_enabled):
DAY_TIME_GEN_NO_OVER_FLOW = DayTimeIntervalGen(min_value=timedelta(days=-2000*365), max_value=timedelta(days=3000*365))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, DAY_TIME_GEN_NO_OVER_FLOW, DAY_TIME_GEN_NO_OVER_FLOW).select(
f.col('a') - f.col('b')),
conf={'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('ansi_enabled', ['false', 'true'])
def test_subtraction_overflow_with_ansi_enabled_day_time_interval(ansi_enabled):
assert_gpu_and_cpu_error(
df_fun=lambda spark: spark.createDataFrame(
SparkContext.getOrCreate().parallelize([(timedelta(microseconds=LONG_MIN), timedelta(microseconds=10)),]),
StructType([StructField('a', DayTimeIntervalType()), StructField('b', DayTimeIntervalType())])
).selectExpr('a - b').collect(),
conf={'spark.sql.ansi.enabled': ansi_enabled},
error_message='SparkArithmeticException')
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_unary_positive_day_time_interval():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, DayTimeIntervalGen()).selectExpr('+a'))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens_for_fallback + [DoubleGen(min_exp=-3, max_exp=5, special_cases=[0.0])], ids=idfn)
def test_day_time_interval_multiply_number(data_gen):
gen_list = [('_c1', DayTimeIntervalGen(min_value=timedelta(seconds=-20 * 86400), max_value=timedelta(seconds=20 * 86400))),
('_c2', data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen_list).selectExpr("_c1 * _c2"))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens_for_fallback + [DoubleGen(min_exp=0, max_exp=5, special_cases=[])], ids=idfn)
def test_day_time_interval_division_number_no_overflow1(data_gen):
gen_list = [('_c1', DayTimeIntervalGen(min_value=timedelta(seconds=-5000 * 365 * 86400), max_value=timedelta(seconds=5000 * 365 * 86400))),
('_c2', data_gen)]
assert_gpu_and_cpu_are_equal_collect(
# avoid dividing by 0
lambda spark: gen_df(spark, gen_list).selectExpr("_c1 / case when _c2 = 0 then cast(1 as {}) else _c2 end".format(to_cast_string(data_gen.data_type))))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_gen', _no_overflow_multiply_gens_for_fallback + [DoubleGen(min_exp=-5, max_exp=0, special_cases=[])], ids=idfn)
def test_day_time_interval_division_number_no_overflow2(data_gen):
gen_list = [('_c1', DayTimeIntervalGen(min_value=timedelta(seconds=-20 * 86400), max_value=timedelta(seconds=20 * 86400))),
('_c2', data_gen)]
assert_gpu_and_cpu_are_equal_collect(
# avoid dividing by 0
lambda spark: gen_df(spark, gen_list).selectExpr("_c1 / case when _c2 = 0 then cast(1 as {}) else _c2 end".format(to_cast_string(data_gen.data_type))))
def _get_overflow_df_1col(spark, data_type, value, expr):
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize([value]),
StructType([
StructField('a', data_type)
])
).selectExpr(expr)
def _get_overflow_df_2cols(spark, data_types, values, expr):
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize([values]),
StructType([
StructField('a', data_types[0]),
StructField('b', data_types[1])
])
).selectExpr(expr)
# test interval division overflow, such as interval / 0, Long.MinValue / -1 ...
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_type,value_pair', [
(LongType(), [MIN_DAY_TIME_INTERVAL, -1]),
(IntegerType(), [timedelta(microseconds=LONG_MIN), -1])
], ids=idfn)
def test_day_time_interval_division_overflow(data_type, value_pair):
exception_message = "SparkArithmeticException: Overflow in integral divide." \
if is_before_spark_340() and not is_databricks113_or_later() else \
"SparkArithmeticException: [ARITHMETIC_OVERFLOW] Overflow in integral divide."
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_2cols(spark, [DayTimeIntervalType(), data_type], value_pair, 'a / b').collect(),
conf={},
error_message=exception_message)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_type,value_pair', [
(FloatType(), [MAX_DAY_TIME_INTERVAL, 0.1]),
(DoubleType(), [MAX_DAY_TIME_INTERVAL, 0.1]),
(FloatType(), [MIN_DAY_TIME_INTERVAL, 0.1]),
(DoubleType(), [MIN_DAY_TIME_INTERVAL, 0.1]),
], ids=idfn)
def test_day_time_interval_division_round_overflow(data_type, value_pair):
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_2cols(spark, [DayTimeIntervalType(), data_type], value_pair, 'a / b').collect(),
conf={},
error_message='java.lang.ArithmeticException')
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_type,value_pair', [
(ByteType(), [timedelta(seconds=1), 0]),
(ShortType(), [timedelta(seconds=1), 0]),
(IntegerType(), [timedelta(seconds=1), 0]),
(LongType(), [timedelta(seconds=1), 0]),
(FloatType(), [timedelta(seconds=1), 0.0]),
(FloatType(), [timedelta(seconds=1), -0.0]),
(DoubleType(), [timedelta(seconds=1), 0.0]),
(DoubleType(), [timedelta(seconds=1), -0.0]),
(FloatType(), [timedelta(seconds=0), 0.0]), # 0 / 0 = NaN
(DoubleType(), [timedelta(seconds=0), 0.0]), # 0 / 0 = NaN
], ids=idfn)
def test_day_time_interval_divided_by_zero(data_type, value_pair):
exception_message = "SparkArithmeticException: Division by zero." \
if is_before_spark_340() and not is_databricks113_or_later() else \
"SparkArithmeticException: [INTERVAL_DIVIDED_BY_ZERO] Division by zero"
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_2cols(spark, [DayTimeIntervalType(), data_type], value_pair, 'a / b').collect(),
conf={},
error_message=exception_message)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('zero_literal', ['0', '0.0f', '-0.0f'], ids=idfn)
def test_day_time_interval_divided_by_zero_scalar(zero_literal):
exception_message = "SparkArithmeticException: Division by zero." \
if is_before_spark_340() and not is_databricks113_or_later() else \
"SparkArithmeticException: [INTERVAL_DIVIDED_BY_ZERO] Division by zero."
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_1col(spark, DayTimeIntervalType(), [timedelta(seconds=1)], 'a / ' + zero_literal).collect(),
conf={},
error_message=exception_message)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_type,value', [
(ByteType(), 0),
(ShortType(), 0),
(IntegerType(), 0),
(LongType(), 0),
(FloatType(), 0.0),
(FloatType(), -0.0),
(DoubleType(), 0.0),
(DoubleType(), -0.0),
], ids=idfn)
def test_day_time_interval_scalar_divided_by_zero(data_type, value):
exception_message = "SparkArithmeticException: Division by zero." \
if is_before_spark_340() and not is_databricks113_or_later() else \
"SparkArithmeticException: [INTERVAL_DIVIDED_BY_ZERO] Division by zero."
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_1col(spark, data_type, [value], 'INTERVAL 1 SECOND / a').collect(),
conf={},
error_message=exception_message)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('data_type,value_pair', [
(FloatType(), [timedelta(seconds=1), float('NaN')]),
(DoubleType(), [timedelta(seconds=1), float('NaN')]),
], ids=idfn)
def test_day_time_interval_division_nan(data_type, value_pair):
assert_gpu_and_cpu_error(
df_fun=lambda spark: _get_overflow_df_2cols(spark, [DayTimeIntervalType(), data_type], value_pair, 'a / b').collect(),
conf={},
error_message='java.lang.ArithmeticException')
| spark-rapids-branch-23.10 | integration_tests/src/main/python/arithmetic_ops_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests based on the Parquet dataset available at
# https://github.com/apache/parquet-testing
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error
from conftest import get_std_input_path, is_parquet_testing_tests_forced, is_precommit_run
from data_gen import copy_and_update
from pathlib import Path
import pytest
from spark_session import is_before_spark_330, is_spark_350_or_later
import warnings
_rebase_confs = {
"spark.sql.legacy.parquet.datetimeRebaseModeInRead": "CORRECTED",
"spark.sql.legacy.parquet.int96RebaseModeInRead": "CORRECTED"
}
_native_reader_confs = copy_and_update(
_rebase_confs, {"spark.rapids.sql.format.parquet.reader.footer.type": "NATIVE"})
_java_reader_confs = copy_and_update(
_rebase_confs, {"spark.rapids.sql.format.parquet.reader.footer.type": "JAVA"})
# Basenames of Parquet files that are expected to generate an error mapped to the
# error message. Many of these use "Exception" since the error from libcudf does not
# match the error message from Spark, but the important part is that both CPU and GPU
# agree that the file cannot be loaded.
# When the association is a pair rather than a string, it's a way to xfail the test
# by providing the error string and xfail reason.
_error_files = {
"large_string_map.brotli.parquet": "Exception",
"nation.dict-malformed.parquet": ("Exception", "https://github.com/NVIDIA/spark-rapids/issues/8644"),
"non_hadoop_lz4_compressed.parquet": "Exception",
"PARQUET-1481.parquet": "Exception",
}
# Basenames of Parquet files that are expected to fail due to known bugs mapped to the
# xfail reason message.
_xfail_files = {
"byte_array_decimal.parquet": "https://github.com/NVIDIA/spark-rapids/issues/8629",
"fixed_length_byte_array.parquet": "https://github.com/rapidsai/cudf/issues/14104",
"datapage_v2.snappy.parquet": "datapage v2 not supported by cudf",
"delta_binary_packed.parquet": "https://github.com/rapidsai/cudf/issues/13501",
"delta_byte_array.parquet": "https://github.com/rapidsai/cudf/issues/13501",
"delta_encoding_optional_column.parquet": "https://github.com/rapidsai/cudf/issues/13501",
"delta_encoding_required_column.parquet": "https://github.com/rapidsai/cudf/issues/13501",
"delta_length_byte_array.parquet": "https://github.com/rapidsai/cudf/issues/13501",
"hadoop_lz4_compressed.parquet": "cudf does not support Hadoop LZ4 format",
"hadoop_lz4_compressed_larger.parquet": "cudf does not support Hadoop LZ4 format",
"nested_structs.rust.parquet": "PySpark cannot handle year 52951",
"repeated_no_annotation.parquet": "https://github.com/NVIDIA/spark-rapids/issues/8631",
}
if is_before_spark_330():
_xfail_files["rle_boolean_encoding.parquet"] = "Spark CPU cannot decode V2 style RLE before 3.3.x"
# Spark 3.5.0 adds support for lz4_raw compression codec, but we do not support that on GPU yet
if is_spark_350_or_later():
_xfail_files["lz4_raw_compressed.parquet"] = "https://github.com/NVIDIA/spark-rapids/issues/9156"
_xfail_files["lz4_raw_compressed_larger.parquet"] = "https://github.com/NVIDIA/spark-rapids/issues/9156"
else:
_error_files["lz4_raw_compressed.parquet"] = "Exception"
_error_files["lz4_raw_compressed_larger.parquet"] = "Exception"
def locate_parquet_testing_files():
"""
Finds the input files by first checking the standard input path,
falling back to the parquet-testing submodule relative to this
script's location.
:param path: standard input path to check
:return: list of input files or empty list if no files found
"""
glob_patterns = ("parquet-testing/data/*.parquet", "parquet-testing/bad_data/*.parquet")
places = []
std_path = get_std_input_path()
if std_path: places.append(Path(std_path))
places.append(Path(__file__).parent.joinpath("../../../../thirdparty").resolve())
for p in places:
files = []
for pattern in glob_patterns:
files += p.glob(pattern)
if files:
return files
locations = ", ".join([ p.joinpath(g).as_posix() for p in places for g in glob_patterns])
# TODO: Also fail for nightly tests when nightly scripts have been updated to initialize
# the git submodules when pulling spark-rapids changes.
# https://github.com/NVIDIA/spark-rapids/issues/8677
if is_precommit_run() or is_parquet_testing_tests_forced():
raise AssertionError("Cannot find parquet-testing data in any of: " + locations)
warnings.warn("Skipping parquet-testing tests. Unable to locate data in any of: " + locations)
return []
def gen_testing_params_for_errors():
result = []
for f in locate_parquet_testing_files():
error_obj = _error_files.get(f.name, None)
if error_obj is not None:
result.append((f.as_posix(), error_obj))
return result
def gen_testing_params_for_valid_files():
files = []
for f in locate_parquet_testing_files():
if f.name in _error_files:
continue
path = f.as_posix()
xfail_reason = _xfail_files.get(f.name, None)
if xfail_reason:
files.append(pytest.param(path, marks=pytest.mark.xfail(reason=xfail_reason)))
else:
files.append(path)
return files
@pytest.mark.parametrize("path", gen_testing_params_for_valid_files())
@pytest.mark.parametrize("confs", [_native_reader_confs, _java_reader_confs])
def test_parquet_testing_valid_files(path, confs):
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.read.parquet(path), conf=confs)
@pytest.mark.parametrize(("path", "errobj"), gen_testing_params_for_errors())
@pytest.mark.parametrize("confs", [_native_reader_confs, _java_reader_confs])
def test_parquet_testing_error_files(path, errobj, confs):
error_msg = errobj
print("error_msg:", error_msg)
if type(error_msg) != str:
error_msg, xfail_reason = errobj
pytest.xfail(xfail_reason)
assert_gpu_and_cpu_error(
lambda spark: spark.read.parquet(path).collect(),
conf=confs,
error_message=error_msg)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/parquet_testing_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import random
import warnings
# TODO redo _spark stuff using fixtures
#
# Don't import pyspark / _spark directly in conftest globally
# import as a plugin to do a lazy per-pytest-session initialization
#
pytest_plugins = [
'spark_init_internal'
]
_approximate_float_args = None
def get_float_check():
if not _approximate_float_args is None:
return lambda lhs,rhs: lhs == pytest.approx(rhs, **_approximate_float_args)
else:
return lambda lhs,rhs: lhs == rhs
_incompat = False
def is_incompat():
return _incompat
_sort_on_spark = False
_sort_locally = False
def should_sort_on_spark():
return _sort_on_spark
def should_sort_locally():
return _sort_locally
_allow_any_non_gpu = False
_non_gpu_allowed = []
def is_allowing_any_non_gpu():
return _allow_any_non_gpu
def get_non_gpu_allowed():
return _non_gpu_allowed
def get_validate_execs_in_gpu_plan():
return _validate_execs_in_gpu_plan
_runtime_env = "apache"
def runtime_env():
return _runtime_env.lower()
def is_apache_runtime():
return runtime_env() == "apache"
def is_databricks_runtime():
return runtime_env() == "databricks"
def is_emr_runtime():
return runtime_env() == "emr"
def is_dataproc_runtime():
return runtime_env() == "dataproc"
_is_nightly_run = False
_is_precommit_run = False
def is_nightly_run():
return _is_nightly_run
def is_precommit_run():
return _is_precommit_run
def is_at_least_precommit_run():
return _is_nightly_run or _is_precommit_run
def skip_unless_nightly_tests(description):
if (_is_nightly_run):
raise AssertionError(description + ' during nightly test run')
else:
pytest.skip(description)
def skip_unless_precommit_tests(description):
if (_is_nightly_run):
raise AssertionError(description + ' during nightly test run')
elif (_is_precommit_run):
raise AssertionError(description + ' during pre-commit test run')
else:
pytest.skip(description)
_is_parquet_testing_tests_forced = False
def is_parquet_testing_tests_forced():
return _is_parquet_testing_tests_forced
_limit = -1
_inject_oom = None
def should_inject_oom():
global _inject_oom
return _inject_oom != None
def get_limit():
return _limit
def _get_limit_from_mark(mark):
if mark.args:
return mark.args[0]
else:
return mark.kwargs.get('num_rows', 100000)
_std_input_path = None
def get_std_input_path():
return _std_input_path
def pytest_runtest_setup(item):
global _sort_on_spark
global _sort_locally
global _inject_oom
_inject_oom = item.get_closest_marker('inject_oom')
order = item.get_closest_marker('ignore_order')
if order:
if order.kwargs.get('local', False):
_sort_on_spark = False
_sort_locally = True
else:
_sort_on_spark = True
_sort_locally = False
else:
_sort_on_spark = False
_sort_locally = False
global _incompat
if item.get_closest_marker('incompat'):
_incompat = True
else:
_incompat = False
global _approximate_float_args
app_f = item.get_closest_marker('approximate_float')
if app_f:
_approximate_float_args = app_f.kwargs
else:
_approximate_float_args = None
global _allow_any_non_gpu
global _non_gpu_allowed
_non_gpu_allowed_databricks = []
_allow_any_non_gpu_databricks = False
non_gpu_databricks = item.get_closest_marker('allow_non_gpu_databricks')
non_gpu = item.get_closest_marker('allow_non_gpu')
if non_gpu_databricks:
if is_databricks_runtime():
if non_gpu_databricks.kwargs and non_gpu_databricks.kwargs['any']:
_allow_any_non_gpu_databricks = True
elif non_gpu_databricks.args:
_non_gpu_allowed_databricks = non_gpu_databricks.args
else:
warnings.warn('allow_non_gpu_databricks marker without anything allowed')
if non_gpu:
if non_gpu.kwargs and non_gpu.kwargs['any']:
_allow_any_non_gpu = True
_non_gpu_allowed = []
elif non_gpu.args:
_allow_any_non_gpu = False
_non_gpu_allowed = non_gpu.args
else:
warnings.warn('allow_non_gpu marker without anything allowed')
_allow_any_non_gpu = False
_non_gpu_allowed = []
else:
_allow_any_non_gpu = False
_non_gpu_allowed = []
_allow_any_non_gpu = _allow_any_non_gpu | _allow_any_non_gpu_databricks
if _non_gpu_allowed and _non_gpu_allowed_databricks:
_non_gpu_allowed = _non_gpu_allowed + _non_gpu_allowed_databricks
elif _non_gpu_allowed_databricks:
_non_gpu_allowed = _non_gpu_allowed_databricks
global _validate_execs_in_gpu_plan
validate_execs = item.get_closest_marker('validate_execs_in_gpu_plan')
if validate_execs and validate_execs.args:
_validate_execs_in_gpu_plan = validate_execs.args
else:
_validate_execs_in_gpu_plan = []
global _limit
limit_mrk = item.get_closest_marker('limit')
if limit_mrk:
_limit = _get_limit_from_mark(limit_mrk)
else:
_limit = -1
if item.get_closest_marker('iceberg'):
if not item.config.getoption('iceberg'):
pytest.skip('Iceberg tests not configured to run')
elif is_databricks_runtime():
pytest.skip('Iceberg tests skipped on Databricks')
if item.get_closest_marker('delta_lake'):
if not item.config.getoption('delta_lake'):
pytest.skip('delta lake tests not configured to run')
if item.get_closest_marker('large_data_test'):
if not item.config.getoption('large_data_test'):
pytest.skip('tests for large data not configured to run')
def pytest_configure(config):
global _runtime_env
_runtime_env = config.getoption('runtime_env')
global _std_input_path
_std_input_path = config.getoption("std_input_path")
global _is_nightly_run
global _is_precommit_run
test_type = config.getoption('test_type').lower()
if "nightly" == test_type:
_is_nightly_run = True
elif "pre-commit" == test_type:
_is_precommit_run = True
elif "developer" != test_type:
raise Exception("not supported test type {}".format(test_type))
global _is_parquet_testing_tests_forced
_is_parquet_testing_tests_forced = config.getoption("force_parquet_testing_tests")
# For OOM injection: we expect a seed to be provided by the environment, or default to 1.
# This is done such that any worker started by the xdist plugin for pytest will
# have the same seed. Since each worker creates a list of tests independently and then
# pytest expects this starting list to match for all workers, it is important that the same seed
# is set for all, either from the environment or as a constant.
oom_random_injection_seed = int(os.getenv("SPARK_RAPIDS_TEST_INJECT_OOM_SEED", 1))
print(f"Starting with OOM injection seed: {oom_random_injection_seed}. "
"Set env variable SPARK_RAPIDS_TEST_INJECT_OOM_SEED to override.")
def pytest_collection_modifyitems(config, items):
r = random.Random(oom_random_injection_seed)
for item in items:
extras = []
order = item.get_closest_marker('ignore_order')
# decide if OOMs should be injected, and when
injection_mode = config.getoption('test_oom_injection_mode').lower()
inject_choice = False
if injection_mode == 'random':
inject_choice = r.randrange(0, 2) == 1
elif injection_mode == 'always':
inject_choice = True
if inject_choice:
extras.append('INJECT_OOM')
item.add_marker('inject_oom', append=True)
if order:
if order.kwargs:
extras.append('IGNORE_ORDER(' + str(order.kwargs) + ')')
else:
extras.append('IGNORE_ORDER')
if item.get_closest_marker('incompat'):
extras.append('INCOMPAT')
app_f = item.get_closest_marker('approximate_float')
if app_f:
if app_f.kwargs:
extras.append('APPROXIMATE_FLOAT(' + str(app_f.kwargs) + ')')
else:
extras.append('APPROXIMATE_FLOAT')
non_gpu = item.get_closest_marker('allow_non_gpu')
if non_gpu:
if non_gpu.kwargs and non_gpu.kwargs['any']:
extras.append('ALLOW_NON_GPU(ANY)')
elif non_gpu.args:
extras.append('ALLOW_NON_GPU(' + ','.join(non_gpu.args) + ')')
limit_mrk = item.get_closest_marker('limit')
if limit_mrk:
extras.append('LIMIT({})'.format(_get_limit_from_mark(limit_mrk)))
if extras:
# This is not ideal because we are reaching into an internal value
item._nodeid = item.nodeid + '[' + ', '.join(extras) + ']'
@pytest.fixture(scope="session")
def std_input_path(request):
path = request.config.getoption("std_input_path")
if path is None:
skip_unless_precommit_tests("std_input_path is not configured")
else:
yield path
def get_worker_id(request):
try:
import xdist
return xdist.plugin.get_xdist_worker_id(request)
except ImportError:
return 'main'
@pytest.fixture
def spark_tmp_path(request):
from spark_init_internal import get_spark_i_know_what_i_am_doing
debug = request.config.getoption('debug_tmp_path')
ret = request.config.getoption('tmp_path')
if ret is None:
ret = '/tmp/pyspark_tests/'
worker_id = get_worker_id(request)
pid = os.getpid()
hostname = os.uname()[1]
ret = f'{ret}/{hostname}-{worker_id}-{pid}-{random.randrange(0, 1<<31)}/'
# Make sure it is there and accessible
sc = get_spark_i_know_what_i_am_doing().sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(ret)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
fs.mkdirs(path)
yield ret
if not debug:
fs.delete(path)
class TmpTableFactory:
def __init__(self, base_id):
self.base_id = base_id
self.running_id = 0
def get(self):
ret = '{}_{}'.format(self.base_id, self.running_id)
self.running_id = self.running_id + 1
return ret
@pytest.fixture
def spark_tmp_table_factory(request):
from spark_init_internal import get_spark_i_know_what_i_am_doing
worker_id = get_worker_id(request)
table_id = random.getrandbits(31)
base_id = f'tmp_table_{worker_id}_{table_id}'
yield TmpTableFactory(base_id)
sp = get_spark_i_know_what_i_am_doing()
tables = sp.sql("SHOW TABLES".format(base_id)).collect()
for row in tables:
t_name = row['tableName']
if (t_name.startswith(base_id)):
sp.sql("DROP TABLE IF EXISTS {}".format(t_name))
def _get_jvm_session(spark):
return spark._jsparkSession
def _get_jvm(spark):
return spark.sparkContext._jvm
def spark_jvm():
from spark_init_internal import get_spark_i_know_what_i_am_doing
return _get_jvm(get_spark_i_know_what_i_am_doing())
class MortgageRunner:
def __init__(self, mortgage_format, mortgage_acq_path, mortgage_perf_path):
self.mortgage_format = mortgage_format
self.mortgage_acq_path = mortgage_acq_path
self.mortgage_perf_path = mortgage_perf_path
def do_test_query(self, spark):
from pyspark.sql.dataframe import DataFrame
jvm_session = _get_jvm_session(spark)
jvm = _get_jvm(spark)
acq = self.mortgage_acq_path
perf = self.mortgage_perf_path
run = jvm.com.nvidia.spark.rapids.tests.mortgage.Run
if self.mortgage_format == 'csv':
df = run.csv(jvm_session, perf, acq)
elif self.mortgage_format == 'parquet':
df = run.parquet(jvm_session, perf, acq)
elif self.mortgage_format == 'orc':
df = run.orc(jvm_session, perf, acq)
else:
raise AssertionError('Not Supported Format {}'.format(self.mortgage_format))
return DataFrame(df, spark.getActiveSession())
@pytest.fixture(scope="session")
def mortgage(request):
mortgage_format = request.config.getoption("mortgage_format")
mortgage_path = request.config.getoption("mortgage_path")
if mortgage_path is None:
std_path = request.config.getoption("std_input_path")
if std_path is None:
skip_unless_precommit_tests("Mortgage tests are not configured to run")
else:
yield MortgageRunner('parquet', std_path + '/parquet_acq', std_path + '/parquet_perf')
else:
yield MortgageRunner(mortgage_format, mortgage_path + '/acq', mortgage_path + '/perf')
@pytest.fixture(scope="session")
def enable_cudf_udf(request):
enable_udf_cudf = request.config.getoption("cudf_udf")
if not enable_udf_cudf:
# cudf_udf tests are not required for any test runs
pytest.skip("cudf_udf not configured to run")
@pytest.fixture(scope="session")
def enable_fuzz_test(request):
enable_fuzz_test = request.config.getoption("fuzz_test")
if not enable_fuzz_test:
# fuzz tests are not required for any test runs
pytest.skip("fuzz_test not configured to run")
| spark-rapids-branch-23.10 | integration_tests/src/main/python/conftest.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_writes_are_equal_collect, assert_gpu_fallback_write
from spark_session import is_before_spark_320, is_spark_321cdh, is_spark_cdh, with_cpu_session, with_gpu_session
from datetime import date, datetime, timezone
from data_gen import *
from marks import *
from pyspark.sql.functions import col, lit
from pyspark.sql.types import *
pytestmark = pytest.mark.nightly_resource_consuming_test
orc_write_basic_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc)) ] + \
decimal_gens
all_nulls_string_gen = SetValuesGen(StringType(), [None])
empty_or_null_string_gen = SetValuesGen(StringType(), [None, ""])
all_empty_string_gen = SetValuesGen(StringType(), [""])
all_nulls_array_gen = SetValuesGen(ArrayType(StringType()), [None])
all_empty_array_gen = SetValuesGen(ArrayType(StringType()), [[]])
all_array_empty_string_gen = SetValuesGen(ArrayType(StringType()), [["", ""]])
mixed_empty_nulls_array_gen = SetValuesGen(ArrayType(StringType()), [None, [], [None], [""], [None, ""]])
mixed_empty_nulls_map_gen = SetValuesGen(MapType(StringType(), StringType()), [{}, None, {"A": ""}, {"B": None}])
all_nulls_map_gen = SetValuesGen(MapType(StringType(), StringType()), [None])
all_empty_map_gen = SetValuesGen(MapType(StringType(), StringType()), [{}])
orc_write_odd_empty_strings_gens_sample = [all_nulls_string_gen,
empty_or_null_string_gen,
all_empty_string_gen,
all_nulls_array_gen,
all_empty_array_gen,
all_array_empty_string_gen,
mixed_empty_nulls_array_gen,
mixed_empty_nulls_map_gen,
all_nulls_map_gen,
all_empty_map_gen]
orc_write_basic_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(orc_write_basic_gens)])
orc_write_struct_gens_sample = [orc_write_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', orc_write_basic_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
orc_write_array_gens_sample = [ArrayGen(sub_gen) for sub_gen in orc_write_basic_gens] + [
ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))]
orc_write_basic_map_gens = [simple_string_to_string_map_gen] + [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen,
# Using timestamps from 1970 to work around a cudf ORC bug
# https://github.com/NVIDIA/spark-rapids/issues/140.
lambda nullable=True: TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc), nullable=nullable),
lambda nullable=True: DateGen(start=date(1590, 1, 1), nullable=nullable),
lambda nullable=True: DecimalGen(precision=15, scale=1, nullable=nullable),
lambda nullable=True: DecimalGen(precision=36, scale=5, nullable=nullable)]]
orc_write_gens_list = [orc_write_basic_gens,
orc_write_struct_gens_sample,
orc_write_array_gens_sample,
orc_write_basic_map_gens,
pytest.param([date_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/139')),
pytest.param([timestamp_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/140'))]
@pytest.mark.parametrize('orc_gens', orc_write_gens_list, ids=idfn)
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
def test_write_round_trip(spark_tmp_path, orc_gens, orc_impl):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
conf={'spark.sql.orc.impl': orc_impl, 'spark.rapids.sql.format.orc.write.enabled': True})
@pytest.mark.parametrize('orc_gen', orc_write_odd_empty_strings_gens_sample, ids=idfn)
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
def test_write_round_trip_corner(spark_tmp_path, orc_gen, orc_impl):
gen_list = [('_c0', orc_gen)]
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list, 128000, num_slices=1).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
conf={'spark.sql.orc.impl': orc_impl, 'spark.rapids.sql.format.orc.write.enabled': True})
orc_part_write_gens = [
byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, boolean_gen,
# Some file systems have issues with UTF8 strings so to help the test pass even there
StringGen('(\\w| ){0,50}'),
# Once https://github.com/NVIDIA/spark-rapids/issues/139 is fixed replace this with
# date_gen
DateGen(start=date(1590, 1, 1)),
# Once https://github.com/NVIDIA/spark-rapids/issues/140 is fixed replace this with
# timestamp_gen
TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc))]
# There are race conditions around when individual files are read in for partitioned data
@ignore_order
@pytest.mark.parametrize('orc_gen', orc_part_write_gens, ids=idfn)
def test_part_write_round_trip(spark_tmp_path, orc_gen):
gen_list = [('a', RepeatSeqGen(orc_gen, 10)),
('b', orc_gen)]
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.partitionBy('a').orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
conf = {'spark.rapids.sql.format.orc.write.enabled': True})
@ignore_order(local=True)
@pytest.mark.parametrize('orc_gen', [int_gen], ids=idfn)
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
@pytest.mark.skipif(is_spark_321cdh(), reason="3.2.1 CDH not support partitionOverwriteMode=DYNAMIC")
def test_dynamic_partition_write_round_trip(spark_tmp_path, orc_gen, orc_impl):
gen_list = [('_c0', orc_gen)]
data_path = spark_tmp_path + '/ORC_DATA'
def do_writes(spark, path):
df = gen_df(spark, gen_list).withColumn("my_partition", lit("PART"))
# first write finds no partitions, it skips the dynamic partition
# overwrite code
df.write.mode("overwrite").partitionBy("my_partition").orc(path)
# second write actually triggers dynamic partition overwrite
df.write.mode("overwrite").partitionBy("my_partition").orc(path)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: do_writes(spark, path),
lambda spark, path: spark.read.orc(path),
data_path,
conf={
'spark.sql.orc.impl': orc_impl,
'spark.rapids.sql.format.orc.write.enabled': True,
'spark.sql.sources.partitionOverwriteMode': 'DYNAMIC'
})
orc_write_compress_options = ['none', 'uncompressed', 'snappy']
# zstd is available in spark 3.2.0 and later.
if not is_before_spark_320() and not is_spark_cdh():
orc_write_compress_options.append('zstd')
@pytest.mark.parametrize('compress', orc_write_compress_options)
def test_compress_write_round_trip(spark_tmp_path, compress):
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path : binary_op_df(spark, long_gen).coalesce(1).write.orc(path),
lambda spark, path : spark.read.orc(path),
data_path,
conf={'spark.sql.orc.compression.codec': compress, 'spark.rapids.sql.format.orc.write.enabled': True})
@pytest.mark.order(2)
@pytest.mark.parametrize('orc_gens', orc_write_gens_list, ids=idfn)
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
def test_write_save_table(spark_tmp_path, orc_gens, orc_impl, spark_tmp_table_factory):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
data_path = spark_tmp_path + '/ORC_DATA'
all_confs={'spark.sql.sources.useV1SourceList': "orc",
'spark.rapids.sql.format.orc.write.enabled': True,
"spark.sql.orc.impl": orc_impl}
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("orc").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.orc(path),
data_path,
conf=all_confs)
def write_orc_sql_from(spark, df, data_path, write_to_table):
tmp_view_name = 'tmp_view_{}'.format(random.randint(0, 1000000))
df.createOrReplaceTempView(tmp_view_name)
write_cmd = 'CREATE TABLE `{}` USING ORC location \'{}\' AS SELECT * from `{}`'.format(write_to_table, data_path, tmp_view_name)
spark.sql(write_cmd)
@pytest.mark.order(2)
@pytest.mark.parametrize('orc_gens', orc_write_gens_list, ids=idfn)
@pytest.mark.parametrize('ts_type', ["TIMESTAMP_MICROS", "TIMESTAMP_MILLIS"])
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
def test_write_sql_save_table(spark_tmp_path, orc_gens, ts_type, orc_impl, spark_tmp_table_factory):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: write_orc_sql_from(spark, gen_df(spark, gen_list).coalesce(1), path, spark_tmp_table_factory.get()),
lambda spark, path: spark.read.orc(path),
data_path,
conf={'spark.sql.orc.impl': orc_impl, 'spark.rapids.sql.format.orc.write.enabled': True})
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize('codec', ['zlib', 'lzo'])
def test_orc_write_compression_fallback(spark_tmp_path, codec, spark_tmp_table_factory):
gen = TimestampGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs={'spark.sql.orc.compression.codec': codec, 'spark.rapids.sql.format.orc.write.enabled': True}
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("orc").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.orc(path),
data_path,
'DataWritingCommandExec',
conf=all_confs)
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_buckets_write_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_fallback_write(
lambda spark, path: spark.range(10e4).write.bucketBy(4, "id").sortBy("id").format('orc').mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.orc(path),
data_path,
'DataWritingCommandExec',
conf = {'spark.rapids.sql.format.orc.write.enabled': True})
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_orc_write_bloom_filter_with_options_cpu_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_fallback_write(
lambda spark, path: spark.range(10e4).write.mode('overwrite').option("orc.bloom.filter.columns", "id").orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
'DataWritingCommandExec',
conf={'spark.rapids.sql.format.orc.write.enabled': True})
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_orc_write_bloom_filter_sql_cpu_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/ORC_DATA'
base_table_name = spark_tmp_table_factory.get()
def sql_write(spark, path):
is_gpu = path.endswith('GPU')
table_name = base_table_name + '_GPU' if is_gpu else base_table_name + '_CPU'
spark.sql('CREATE TABLE `{}` STORED AS ORCFILE location \'{}\' TBLPROPERTIES("orc.bloom.filter.columns"="id") '
'AS SELECT id from range(100)'.format(table_name, path))
assert_gpu_fallback_write(
sql_write,
lambda spark, path: spark.read.orc(path),
data_path,
'DataWritingCommandExec',
conf={'spark.rapids.sql.format.orc.write.enabled': True})
@pytest.mark.parametrize('orc_gens', orc_write_gens_list, ids=idfn)
def test_write_empty_orc_round_trip(spark_tmp_path, orc_gens):
def create_empty_df(spark, path):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
return gen_df(spark, gen_list, length=0).write.orc(path)
data_path = spark_tmp_path + '/ORC_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
create_empty_df,
lambda spark, path: spark.read.orc(path),
data_path,
conf={'spark.rapids.sql.format.orc.write.enabled': True})
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="is only supported in Spark 320+")
def test_concurrent_writer(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: get_25_partitions_df(spark) # df has 25 partitions for (c1, c2)
.repartition(2)
.write.mode("overwrite").partitionBy('c1', 'c2').orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
copy_and_update(
# 26 > 25, will not fall back to single writer
{"spark.sql.maxConcurrentOutputFileWriters": 26}
))
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="is only supported in Spark 320+")
def test_fallback_to_single_writer_from_concurrent_writer(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: get_25_partitions_df(spark) # df has 25 partitions for (c1, c2)
.repartition(2)
.write.mode("overwrite").partitionBy('c1', 'c2').orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
copy_and_update(
# 10 < 25, will fall back to single writer
{"spark.sql.maxConcurrentOutputFileWriters": 10},
{"spark.rapids.sql.concurrentWriterPartitionFlushSize": 64 * 1024 * 1024}
))
@ignore_order
def test_orc_write_column_name_with_dots(spark_tmp_path):
data_path = spark_tmp_path + "/ORC_DATA"
gens = [
("a.b", StructGen([
("c.d.e", StructGen([
("f.g", int_gen),
("h", string_gen)])),
("i.j", long_gen)])),
("k", boolean_gen)]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gens).coalesce(1).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path)
# test case from:
# https://github.com/apache/spark/blob/v3.4.0/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala#L371
@ignore_order
def test_orc_do_not_lowercase_columns(spark_tmp_path):
data_path = spark_tmp_path + "/ORC_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
# column is uppercase
lambda spark, path: spark.range(0, 1000).select(col("id").alias("Acol")).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path)
try:
# reading lowercase causes exception
with_cpu_session(lambda spark: spark.read.orc(data_path + "/CPU").schema["acol"])
assert False
except KeyError as e:
assert "No StructField named acol" in str(e)
try:
# reading lowercase causes exception
with_gpu_session(lambda spark: spark.read.orc(data_path + "/GPU").schema["acol"])
assert False
except KeyError as e:
assert "No StructField named acol" in str(e)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/orc_write_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from pyspark.sql.types import *
from spark_session import is_before_spark_330
from marks import *
basic_gens = all_gen + [NullGen()]
# This is a conner case, use @ignore_order and "length = 4" to trigger
# If sample exec can't handle empty batch, will trigger "Input table cannot be empty" error
@ignore_order
@pytest.mark.parametrize('data_gen', [string_gen], ids=idfn)
def test_sample_produce_empty_batch(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# length = 4 will generate empty batch after sample;
# set num_slices as constant,
# num_slices is not same in different mode, such as local, yarn, Mesos
lambda spark: unary_op_df(spark, data_gen, length= 4, num_slices = 10)
.sample(fraction = 0.9, seed = 1)
)
# the following cases is the normal cases and do not use @ignore_order
nested_gens = array_gens_sample + struct_gens_sample + map_gens_sample
@pytest.mark.parametrize('data_gen', basic_gens + nested_gens, ids=idfn)
def test_sample(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, num_slices = 10)
.sample(fraction = 0.9, seed = 1)
)
@pytest.mark.parametrize('data_gen', basic_gens + nested_gens, ids=idfn)
def test_sample_with_replacement(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, num_slices = 10).sample(
withReplacement =True, fraction = 0.5, seed = 1)
)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_sample_for_interval():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, DayTimeIntervalGen(), num_slices=10)
.sample(fraction=0.9, seed=1)
)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_sample_with_replacement_for_interval():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, DayTimeIntervalGen(), num_slices=10).sample(
withReplacement=True, fraction=0.5, seed=1)
) | spark-rapids-branch-23.10 | integration_tests/src/main/python/sample_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
import pyspark.sql.functions as f
import pytest
import re
import sys
from asserts import *
from data_gen import *
from conftest import is_databricks_runtime
from marks import *
from parquet_write_test import parquet_part_write_gens, parquet_write_gens_list, writer_confs
from pyspark.sql.types import *
from spark_session import is_before_spark_320, is_before_spark_330, is_databricks122_or_later, with_cpu_session
delta_meta_allow = [
"DeserializeToObjectExec",
"ShuffleExchangeExec",
"FileSourceScanExec",
"FilterExec",
"MapPartitionsExec",
"MapElementsExec",
"ObjectHashAggregateExec",
"ProjectExec",
"SerializeFromObjectExec",
"SortExec"
]
delta_writes_enabled_conf = {"spark.rapids.sql.format.delta.write.enabled": "true"}
delta_write_fallback_allow = "ExecutedCommandExec,DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
delta_write_fallback_check = "DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
delta_optimized_write_fallback_allow = "ExecutedCommandExec,DataWritingCommandExec,DeltaOptimizedWriterExec" if is_databricks122_or_later() else "ExecutedCommandExec"
def fixup_path(d):
"""Modify the 'path' value to remove random IDs in the pathname"""
parts = d["path"].split("-")
d["path"] = "-".join(parts[0:1]) + ".".join(parts[-1].split(".")[-2:])
def del_keys(key_list, c_val, g_val):
for key in key_list:
c_val.pop(key, None)
g_val.pop(key, None)
def fixup_operation_metrics(opm):
"""Update the specified operationMetrics node to facilitate log comparisons"""
# note that we remove many byte metrics because number of bytes can vary
# between CPU and GPU.
metrics_to_remove = ["executionTimeMs", "numOutputBytes", "rewriteTimeMs", "scanTimeMs",
"numRemovedBytes", "numAddedBytes", "numTargetBytesAdded", "numTargetBytesInserted",
"numTargetBytesUpdated", "numTargetBytesRemoved"]
for k in metrics_to_remove:
opm.pop(k, None)
TMP_TABLE_PATTERN=re.compile(r"tmp_table_\w+")
TMP_TABLE_PATH_PATTERN=re.compile(r"delta.`[^`]*`")
REF_ID_PATTERN=re.compile(r"#[0-9]+")
def fixup_operation_parameters(opp):
"""Update the specified operationParameters node to facilitate log comparisons"""
for key in ("predicate", "matchedPredicates", "notMatchedPredicates"):
pred = opp.get(key)
if pred:
subbed = TMP_TABLE_PATTERN.sub("tmp_table", pred)
subbed = TMP_TABLE_PATH_PATTERN.sub("tmp_table", subbed)
opp[key] = REF_ID_PATTERN.sub("#refid", subbed)
def assert_delta_log_json_equivalent(filename, c_json, g_json):
assert c_json.keys() == g_json.keys(), "Delta log {} has mismatched keys:\nCPU: {}\nGPU: {}".format(filename, c_json, g_json)
for key, c_val in c_json.items():
g_val = g_json[key]
# Strip out the values that are expected to be different
c_tags = c_val.get("tags", {})
g_tags = g_val.get("tags", {})
del_keys(["INSERTION_TIME", "MAX_INSERTION_TIME", "MIN_INSERTION_TIME"], c_tags, g_tags)
if key == "metaData":
assert c_val.keys() == g_val.keys(), "Delta log {} 'metaData' keys mismatch:\nCPU: {}\nGPU: {}".format(filename, c_val, g_val)
del_keys(("createdTime", "id"), c_val, g_val)
elif key == "add":
assert c_val.keys() == g_val.keys(), "Delta log {} 'add' keys mismatch:\nCPU: {}\nGPU: {}".format(filename, c_val, g_val)
del_keys(("modificationTime", "size"), c_val, g_val)
fixup_path(c_val)
fixup_path(g_val)
elif key == "cdc":
assert c_val.keys() == g_val.keys(), "Delta log {} 'cdc' keys mismatch:\nCPU: {}\nGPU: {}".format(filename, c_val, g_val)
del_keys(("size",), c_val, g_val)
fixup_path(c_val)
fixup_path(g_val)
elif key == "commitInfo":
assert c_val.keys() == g_val.keys(), "Delta log {} 'commitInfo' keys mismatch:\nCPU: {}\nGPU: {}".format(filename, c_val, g_val)
del_keys(("timestamp", "txnId"), c_val, g_val)
for v in c_val, g_val:
fixup_operation_metrics(v.get("operationMetrics", {}))
fixup_operation_parameters(v.get("operationParameters", {}))
elif key == "remove":
assert c_val.keys() == g_val.keys(), "Delta log {} 'remove' keys mismatch:\nCPU: {}\nGPU: {}".format(filename, c_val, g_val)
del_keys(("deletionTimestamp", "size"), c_val, g_val)
fixup_path(c_val)
fixup_path(g_val)
assert c_val == g_val, "Delta log {} is different at key '{}':\nCPU: {}\nGPU: {}".format(filename, key, c_val, g_val)
def decode_jsons(json_data):
"""Decode the JSON records in a string"""
jsons = []
idx = 0
decoder = json.JSONDecoder()
while idx < len(json_data):
js, idx = decoder.raw_decode(json_data, idx=idx)
jsons.append(js)
# Skip whitespace between records
while idx < len(json_data) and json_data[idx].isspace():
idx += 1
# reorder to produce a consistent output for comparison
def json_to_sort_key(j):
keys = sorted(j.keys())
stats = sorted([ v.get("stats", "") for v in j.values() ])
paths = sorted([ v.get("path", "") for v in j.values() ])
return ','.join(keys + stats + paths)
jsons.sort(key=json_to_sort_key)
return jsons
def assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path):
cpu_log_data = spark.sparkContext.wholeTextFiles(data_path + "/CPU/_delta_log/*").collect()
gpu_log_data = spark.sparkContext.wholeTextFiles(data_path + "/GPU/_delta_log/*").collect()
assert len(cpu_log_data) == len(gpu_log_data), "Different number of Delta log files:\nCPU: {}\nGPU: {}".format(cpu_log_data, gpu_log_data)
cpu_logs_data = [ (os.path.basename(x), y) for x, y in cpu_log_data if x.endswith(".json") ]
gpu_logs_dict = dict([ (os.path.basename(x), y) for x, y in gpu_log_data if x.endswith(".json") ])
for file, cpu_json_data in cpu_logs_data:
gpu_json_data = gpu_logs_dict.get(file)
assert gpu_json_data, "CPU Delta log file {} is missing from GPU Delta logs".format(file)
cpu_jsons = decode_jsons(cpu_json_data)
gpu_jsons = decode_jsons(gpu_json_data)
assert len(cpu_jsons) == len(gpu_jsons), "Different line counts in {}:\nCPU: {}\nGPU: {}".format(file, cpu_json_data, gpu_json_data)
for cpu_json, gpu_json in zip(cpu_jsons, gpu_jsons):
assert_delta_log_json_equivalent(file, cpu_json, gpu_json)
def get_last_operation_metrics(path):
from delta.tables import DeltaTable
return with_cpu_session(lambda spark: DeltaTable.forPath(spark, path)\
.history(1)\
.selectExpr("operationMetrics")\
.head()[0])
@allow_non_gpu(delta_write_fallback_allow, *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("disable_conf",
[{"spark.rapids.sql.format.delta.write.enabled": "false"},
{"spark.rapids.sql.format.parquet.enabled": "false"},
{"spark.rapids.sql.format.parquet.write.enabled": "false"}], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_disabled_fallback(spark_tmp_path, disable_conf):
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=copy_and_update(writer_confs, disable_conf))
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order(local=True)
@pytest.mark.parametrize("gens", parquet_write_gens_list, ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_round_trip_unmanaged(spark_tmp_path, gens):
gen_list = [("c" + str(i), gen) for i, gen in enumerate(gens)]
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=copy_and_update(writer_confs, delta_writes_enabled_conf))
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("gens", parquet_part_write_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_part_write_round_trip_unmanaged(spark_tmp_path, gens):
gen_list = [("a", RepeatSeqGen(gens, 10)), ("b", gens)]
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.partitionBy("a")
.save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=copy_and_update(writer_confs, delta_writes_enabled_conf))
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("gens", parquet_part_write_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_multi_part_write_round_trip_unmanaged(spark_tmp_path, gens):
gen_list = [("a", RepeatSeqGen(gens, 10)), ("b", gens), ("c", SetValuesGen(StringType(), ["x", "y", "z"]))]
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.partitionBy("a", "c")
.save(path),
lambda spark, path: spark.read.format("delta").load(path).filter("c='x'"),
data_path,
conf=copy_and_update(writer_confs, delta_writes_enabled_conf))
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
def do_update_round_trip_managed(spark_tmp_path, mode):
gen_list = [("x", int_gen), ("y", binary_gen), ("z", string_gen)]
data_path = spark_tmp_path + "/DELTA_DATA"
confs = copy_and_update(writer_confs, delta_writes_enabled_conf)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.mode(mode).format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
# Verify time travel still works
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("delta").option("versionAsOf", "0").load(data_path + "/GPU"),
conf=confs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_overwrite_round_trip_unmanaged(spark_tmp_path):
do_update_round_trip_managed(spark_tmp_path, "overwrite")
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_append_round_trip_unmanaged(spark_tmp_path):
do_update_round_trip_managed(spark_tmp_path, "append")
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(is_databricks_runtime() and is_before_spark_330(),
reason="Databricks 10.4 does not properly handle options passed during DataFrame API write")
def test_delta_write_round_trip_cdf_write_opt(spark_tmp_path):
gen_list = [("ints", int_gen)]
data_path = spark_tmp_path + "/DELTA_DATA"
confs = copy_and_update(writer_confs, delta_writes_enabled_conf)
# drop the _commit_timestamp column when comparing since it will always be different
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.option("delta.enableChangeDataFeed", "true")
.save(path),
lambda spark, path: spark.read.format("delta")
.option("readChangeDataFeed", "true")
.option("startingVersion", 0)
.load(path)
.drop("_commit_timestamp"),
data_path,
conf=confs)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.mode("overwrite")
.save(path),
lambda spark, path: spark.read.format("delta")
.option("readChangeDataFeed", "true")
.option("startingVersion", 0)
.load(path)
.drop("_commit_timestamp"),
data_path,
conf=confs)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_round_trip_cdf_table_prop(spark_tmp_path):
gen_list = [("ints", int_gen)]
data_path = spark_tmp_path + "/DELTA_DATA"
confs = copy_and_update(writer_confs, delta_writes_enabled_conf)
def setup_tables(spark):
for name in ["CPU", "GPU"]:
spark.sql("CREATE TABLE delta.`{}/{}` (ints INT) ".format(data_path, name) +
"USING DELTA TBLPROPERTIES (delta.enableChangeDataFeed = true)")
with_cpu_session(setup_tables)
# drop the _commit_timestamp column when comparing since it will always be different
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.mode("append")
.option("delta.enableChangeDataFeed", "true")
.save(path),
lambda spark, path: spark.read.format("delta")
.option("readChangeDataFeed", "true")
.option("startingVersion", 0)
.load(path)
.drop("_commit_timestamp"),
data_path,
conf=confs)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")
.mode("overwrite")
.save(path),
lambda spark, path: spark.read.format("delta")
.option("readChangeDataFeed", "true")
.option("startingVersion", 0)
.load(path)
.drop("_commit_timestamp"),
data_path,
conf=confs)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("ts_write", ["INT96", "TIMESTAMP_MICROS", "TIMESTAMP_MILLIS"], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_legacy_timestamp_fallback(spark_tmp_path, ts_write):
gen = TimestampGen(start=datetime(1590, 1, 1, tzinfo=timezone.utc))
data_path = spark_tmp_path + "/DELTA_DATA"
all_confs = copy_and_update(delta_writes_enabled_conf, {
"spark.sql.legacy.parquet.datetimeRebaseModeInWrite": "LEGACY",
"spark.sql.legacy.parquet.int96RebaseModeInWrite": "LEGACY",
"spark.sql.legacy.parquet.outputTimestampType": ts_write
})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=all_confs)
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("write_options", [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_encryption_option_fallback(spark_tmp_path, write_options):
def write_func(spark, path):
writer = unary_op_df(spark, int_gen).coalesce(1).write.format("delta")
for key, value in write_options.items():
writer.option(key , value)
writer.save(path)
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_fallback_write(
write_func,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=delta_writes_enabled_conf)
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("write_options", [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_encryption_runtimeconfig_fallback(spark_tmp_path, write_options):
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=copy_and_update(write_options, delta_writes_enabled_conf))
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("write_options", [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_encryption_hadoopconfig_fallback(spark_tmp_path, write_options):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_hadoop_confs(spark):
for k, v in write_options.items():
spark.sparkContext._jsc.hadoopConfiguration().set(k, v)
def reset_hadoop_confs(spark):
for k in write_options.keys():
spark.sparkContext._jsc.hadoopConfiguration().unset(k)
try:
with_cpu_session(setup_hadoop_confs)
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=delta_writes_enabled_conf)
finally:
with_cpu_session(reset_hadoop_confs)
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize('codec', ['gzip'])
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_compression_fallback(spark_tmp_path, codec):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(delta_writes_enabled_conf, {"spark.sql.parquet.compression.codec": codec})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=confs)
@allow_non_gpu(*delta_meta_allow, delta_write_fallback_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_legacy_format_fallback(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(delta_writes_enabled_conf, {"spark.sql.parquet.writeLegacyFormat": "true"})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=confs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_append_only(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
gen = int_gen
# setup initial table
with_gpu_session(lambda spark: unary_op_df(spark, gen).coalesce(1).write.format("delta")
.option("delta.appendOnly", "true")
.save(data_path),
conf=delta_writes_enabled_conf)
# verify overwrite fails
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: unary_op_df(spark, gen).write.format("delta").mode("overwrite").save(data_path),
conf=delta_writes_enabled_conf),
"This table is configured to only allow appends")
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_constraint_not_null(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
not_null_gen = StringGen(nullable=False)
null_gen = SetValuesGen(StringType(), [None])
# create table with not null constraint
def setup_table(spark):
spark.sql("CREATE TABLE delta.`{}` (a string NOT NULL) USING DELTA".format(data_path))
with_cpu_session(setup_table)
# verify write of non-null values does not throw
with_gpu_session(lambda spark: unary_op_df(spark, not_null_gen).write.format("delta").mode("append").save(data_path),
conf=delta_writes_enabled_conf)
# verify write of null value throws
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: unary_op_df(spark, null_gen).write.format("delta").mode("append").save(data_path),
conf=delta_writes_enabled_conf),
"NOT NULL constraint violated for column: a")
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_constraint_check(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
# create table with check constraint
def setup_table(spark):
spark.sql("CREATE TABLE delta.`{}` (id long, x long) USING DELTA".format(data_path))
spark.sql("ALTER TABLE delta.`{}` ADD CONSTRAINT customcheck CHECK (id < x)".format(data_path))
with_cpu_session(setup_table)
# verify write of dataframe that passes constraint check does not fail
def gen_good_data(spark):
return spark.range(1024).withColumn("x", f.col("id") + 1)
with_gpu_session(lambda spark: gen_good_data(spark).write.format("delta").mode("append").save(data_path),
conf=delta_writes_enabled_conf)
# verify write of values that violate the constraint throws
def gen_bad_data(spark):
return gen_good_data(spark).union(spark.range(1).withColumn("x", f.col("id")))
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: gen_bad_data(spark).write.format("delta").mode("append").save(data_path),
conf=delta_writes_enabled_conf),
"CHECK constraint customcheck (id < x) violated")
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_constraint_check_fallback(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
# create table with check constraint
def setup_table(spark):
spark.sql("CREATE TABLE delta.`{}` (id long, x long) USING DELTA".format(data_path))
spark.sql("ALTER TABLE delta.`{}` ADD CONSTRAINT mycheck CHECK (id + x < 1000)".format(data_path))
with_cpu_session(setup_table)
# create a conf that will force constraint check to fallback to CPU
add_disable_conf = copy_and_update(delta_writes_enabled_conf, {"spark.rapids.sql.expression.Add": "false"})
# verify write of dataframe that passes constraint check does not fail
def gen_good_data(spark):
return spark.range(100).withColumn("x", f.col("id") + 1)
# TODO: Find a way to capture plan with DeltaInvariantCheckerExec
with_gpu_session(lambda spark: gen_good_data(spark).write.format("delta").mode("append").save(data_path),
conf=add_disable_conf)
# verify write of values that violate the constraint throws
def gen_bad_data(spark):
return spark.range(1000).withColumn("x", f.col("id") + 1)
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: gen_bad_data(spark).write.format("delta").mode("append").save(data_path),
conf=add_disable_conf),
"CHECK constraint mycheck ((id + x) < 1000) violated",)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("num_cols", [-1, 0, 1, 2, 3 ], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_stat_column_limits(num_cols, spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
confs = copy_and_update(delta_writes_enabled_conf, {"spark.databricks.io.skipping.stringPrefixLength": 8})
strgen = StringGen() \
.with_special_case((chr(sys.maxunicode) * 7) + "abc") \
.with_special_case((chr(sys.maxunicode) * 8) + "abc") \
.with_special_case((chr(sys.maxunicode) * 16) + "abc") \
.with_special_case(('\U0000FFFD' * 7) + "abc") \
.with_special_case(('\U0000FFFD' * 8) + "abc") \
.with_special_case(('\U0000FFFD' * 16) + "abc")
gens = [("a", StructGen([("x", strgen), ("y", StructGen([("z", strgen)]))])),
("b", binary_gen),
("c", strgen)]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gens).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu("CreateTableExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_generated_columns(spark_tmp_table_factory, spark_tmp_path):
from delta.tables import DeltaTable
def write_data(spark, path):
DeltaTable.create(spark) \
.tableName(spark_tmp_table_factory.get()) \
.location(path) \
.addColumn("id", "LONG", comment="IDs") \
.addColumn("x", "LONG", comment="some other column") \
.addColumn("z", "STRING", comment="a generated column",
generatedAlwaysAs="CONCAT('sum(id,x)=', CAST((id + x) AS STRING))") \
.execute()
df = spark.range(2048).withColumn("x", f.col("id") * 2)
df.write.format("delta").mode("append").save(path)
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
write_data,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=delta_writes_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu("CreateTableExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320() or not is_databricks_runtime(),
reason="Delta Lake identity columns are currently only supported on Databricks")
def test_delta_write_identity_columns(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
def create_data(spark, path):
spark.sql("CREATE TABLE delta.`{}` (x BIGINT, id BIGINT GENERATED ALWAYS AS IDENTITY) USING DELTA".format(path))
spark.range(2048).selectExpr("id * id AS x").write.format("delta").mode("append").save(path)
assert_gpu_and_cpu_writes_are_equal_collect(
create_data,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=delta_writes_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
def append_data(spark, path):
spark.range(2048).selectExpr("id + 10 as x").write.format("delta").mode("append").save(path)
assert_gpu_and_cpu_writes_are_equal_collect(
append_data,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=delta_writes_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu("CreateTableExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320() or not is_databricks_runtime(),
reason="Delta Lake identity columns are currently only supported on Databricks")
def test_delta_write_multiple_identity_columns(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
def create_data(spark, path):
spark.sql("CREATE TABLE delta.`{}` (".format(path) +
"id1 BIGINT GENERATED ALWAYS AS IDENTITY, "
"x BIGINT, "
"id2 BIGINT GENERATED ALWAYS AS IDENTITY ( START WITH 100 ), "
"id3 BIGINT GENERATED ALWAYS AS IDENTITY ( INCREMENT BY 11 ), "
"id4 BIGINT GENERATED ALWAYS AS IDENTITY ( START WITH -200 INCREMENT BY 3 ), "
"id5 BIGINT GENERATED ALWAYS AS IDENTITY ( START WITH 12 INCREMENT BY -3 )"
") USING DELTA")
spark.range(2048).selectExpr("id * id AS x").write.format("delta").mode("append").save(path)
assert_gpu_and_cpu_writes_are_equal_collect(
create_data,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=delta_writes_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
def append_data(spark, path):
spark.range(2048).selectExpr("id + 10 as x").write.format("delta").mode("append").save(path)
assert_gpu_and_cpu_writes_are_equal_collect(
append_data,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=delta_writes_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow, "ExecutedCommandExec")
@delta_lake
@ignore_order
@pytest.mark.parametrize("confkey", ["optimizeWrite"], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(is_databricks_runtime(), reason="Optimized write is supported on Databricks")
def test_delta_write_auto_optimize_write_opts_fallback(confkey, spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").option(confkey, "true").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
"ExecutedCommandExec",
conf=delta_writes_enabled_conf)
@allow_non_gpu(*delta_meta_allow, "CreateTableExec", "ExecutedCommandExec")
@delta_lake
@ignore_order
@pytest.mark.parametrize("confkey", [
pytest.param("delta.autoOptimize", marks=pytest.mark.skipif(
is_databricks_runtime(), reason="Optimize write is supported on Databricks")),
pytest.param("delta.autoOptimize.optimizeWrite", marks=pytest.mark.skipif(
is_databricks_runtime(), reason="Optimize write is supported on Databricks"))], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Auto optimize only supported on Databricks")
def test_delta_write_auto_optimize_table_props_fallback(confkey, spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
spark.sql("CREATE TABLE delta.`{}/CPU` (a INT) USING DELTA TBLPROPERTIES ({} = true)".format(data_path, confkey))
spark.sql("CREATE TABLE delta.`{}/GPU` (a INT) USING DELTA TBLPROPERTIES ({} = true)".format(data_path, confkey))
with_cpu_session(setup_tables)
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").mode("append").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
"ExecutedCommandExec",
conf=delta_writes_enabled_conf)
@allow_non_gpu(*delta_meta_allow, "ExecutedCommandExec")
@delta_lake
@ignore_order
@pytest.mark.parametrize("confkey", [
pytest.param("spark.databricks.delta.optimizeWrite.enabled", marks=pytest.mark.skipif(
is_databricks_runtime(), reason="Optimize write is supported on Databricks")),
pytest.param("spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite", marks=pytest.mark.skipif(
is_databricks_runtime(), reason="Optimize write is supported on Databricks"))], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_auto_optimize_sql_conf_fallback(confkey, spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(delta_writes_enabled_conf, {confkey: "true"})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, int_gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
"ExecutedCommandExec",
conf=confs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_aqe_join(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(delta_writes_enabled_conf, {"spark.sql.adaptive.enabled": "true"})
def do_join(spark, path):
df = unary_op_df(spark, int_gen)
df.join(df, ["a"], "inner").write.format("delta").save(path)
assert_gpu_and_cpu_writes_are_equal_collect(
do_join,
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
@pytest.mark.parametrize("enable_conf_key", [
"spark.databricks.delta.optimizeWrite.enabled",
"spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite"], ids=idfn)
@pytest.mark.parametrize("aqe_enabled", [True, False], ids=idfn)
def test_delta_write_optimized_aqe(spark_tmp_path, enable_conf_key, aqe_enabled):
num_chunks = 20
def do_write(data_path, is_optimize_write):
confs=copy_and_update(delta_writes_enabled_conf, {
enable_conf_key : str(is_optimize_write),
"spark.sql.adaptive.enabled" : str(aqe_enabled)
})
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: unary_op_df(spark, int_gen)\
.repartition(num_chunks).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
data_path = spark_tmp_path + "/DELTA_DATA1"
do_write(data_path, is_optimize_write=False)
opmetrics = get_last_operation_metrics(data_path + "/GPU")
assert int(opmetrics["numFiles"]) == num_chunks
data_path = spark_tmp_path + "/DELTA_DATA2"
do_write(data_path, is_optimize_write=True)
opmetrics = get_last_operation_metrics(data_path + "/GPU")
assert int(opmetrics["numFiles"]) == 1
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order(local=True)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
def test_delta_write_optimized_supported_types(spark_tmp_path):
num_chunks = 20
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(writer_confs, delta_writes_enabled_conf, {
"spark.sql.execution.sortBeforeRepartition": "true",
"spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite": "true"
})
simple_gens = [ byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, TimestampGen() ]
genlist = simple_gens + \
[ StructGen([("child" + str(i), gen) for i, gen in enumerate(simple_gens)]) ] + \
[ StructGen([("x", StructGen([("y", int_gen)]))]) ]
gens = [("c" + str(i), gen) for i, gen in enumerate(genlist)]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gens) \
.repartition(num_chunks).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
opmetrics = get_last_operation_metrics(data_path + "/GPU")
assert int(opmetrics["numFiles"]) < 20
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order(local=True)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
def test_delta_write_optimized_supported_types_partitioned(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(writer_confs, delta_writes_enabled_conf, {
"spark.sql.execution.sortBeforeRepartition": "true",
"spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite": "true"
})
genlist = [ SetValuesGen(StringType(), ["a", "b", "c"]) ] + \
[ x for sublist in parquet_write_gens_list for x in sublist ]
gens = [("c" + str(i), gen) for i, gen in enumerate(genlist)]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gens) \
.write.format("delta").partitionBy("c0").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
@allow_non_gpu(delta_optimized_write_fallback_allow, *delta_meta_allow)
@delta_lake
@ignore_order(local=True)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
@pytest.mark.parametrize("gen", [
simple_string_to_string_map_gen,
StructGen([("x", ArrayGen(int_gen))]),
ArrayGen(StructGen([("x", long_gen)]))], ids=idfn)
def test_delta_write_optimized_unsupported_sort_fallback(spark_tmp_path, gen):
data_path = spark_tmp_path + "/DELTA_DATA"
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.sql.execution.sortBeforeRepartition": "true",
"spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite": "true"
})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("delta").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
delta_write_fallback_check,
conf=confs)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
def test_delta_write_optimized_table_confs(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
gpu_data_path = data_path + "/GPU"
num_chunks = 20
def do_write(confs):
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: unary_op_df(spark, int_gen)\
.repartition(num_chunks).write.format("delta").mode("overwrite").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.databricks.delta.optimizeWrite.enabled" : "true"
})
do_write(confs)
opmetrics = get_last_operation_metrics(gpu_data_path)
assert int(opmetrics["numFiles"]) == 1
# Verify SQL conf takes precedence over table setting
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.databricks.delta.optimizeWrite.enabled" : "false"
})
do_write(confs)
opmetrics = get_last_operation_metrics(gpu_data_path)
assert int(opmetrics["numFiles"]) == num_chunks
# Verify default conf is not honored after table setting
def do_prop_update(spark):
spark.sql("ALTER TABLE delta.`{}`".format(gpu_data_path) +
" SET TBLPROPERTIES (delta.autoOptimize.optimizeWrite = true)")
with_cpu_session(do_prop_update)
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite" : "false"
})
do_write(confs)
opmetrics = get_last_operation_metrics(gpu_data_path)
assert int(opmetrics["numFiles"]) == 1
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
@pytest.mark.skipif(not is_databricks_runtime(), reason="Delta Lake optimized writes are only supported on Databricks")
def test_delta_write_optimized_partitioned(spark_tmp_path):
data_path = spark_tmp_path + "/DELTA_DATA"
gpu_data_path = data_path + "/GPU"
num_chunks = 20
def do_write(confs):
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: two_col_df(spark, int_gen, SetValuesGen(StringType(), ["x", "y"]))\
.repartition(num_chunks).write.format("delta")\
.mode("overwrite").partitionBy("b").save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=confs)
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.databricks.delta.optimizeWrite.enabled" : "false"
})
do_write(confs)
opmetrics = get_last_operation_metrics(gpu_data_path)
assert int(opmetrics["numFiles"]) == 2 * num_chunks
# Verify SQL conf takes precedence over table setting
confs=copy_and_update(delta_writes_enabled_conf, {
"spark.databricks.delta.optimizeWrite.enabled" : "true"
})
do_write(confs)
opmetrics = get_last_operation_metrics(gpu_data_path)
assert int(opmetrics["numFiles"]) == 2
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_write_partial_overwrite_replace_where(spark_tmp_path):
gen_list = [("a", int_gen),
("b", SetValuesGen(StringType(), ["x", "y", "z"])),
("c", string_gen),
("d", SetValuesGen(IntegerType(), [1, 2, 3])),
("e", long_gen)]
data_path = spark_tmp_path + "/DELTA_DATA"
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")\
.partitionBy("b", "d")\
.save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=copy_and_update(writer_confs, delta_writes_enabled_conf))
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
# overwrite with a subset of the original schema
gen_list = [("b", SetValuesGen(StringType(), ["y"])),
("e", long_gen),
("c", string_gen),
("d", SetValuesGen(IntegerType(), [1, 2, 3]))]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("delta")\
.mode("overwrite")\
.partitionBy("b", "d")\
.option("replaceWhere", "b = 'y'")\
.save(path),
lambda spark, path: spark.read.format("delta").load(path),
data_path,
conf=copy_and_update(writer_confs, delta_writes_enabled_conf))
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_write_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error
from data_gen import *
from marks import incompat, approximate_float
from pyspark.sql.types import *
import pyspark.sql.functions as f
def test_mono_id():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, short_gen, num_slices=8).select(
f.col('a'),
f.monotonically_increasing_id()))
def test_part_id():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, short_gen, num_slices=8).select(
f.col('a'),
f.spark_partition_id()))
def test_raise_error():
data_gen = ShortGen(nullable=False, min_val=0, max_val=20, special_cases=[])
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, num_slices=2).select(
f.when(f.col('a') > 30, f.raise_error("unexpected"))))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.range(0).select(f.raise_error(f.col("id"))))
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, null_gen, length=2, num_slices=1).select(
f.raise_error(f.col('a'))).collect(),
conf={},
error_message="java.lang.RuntimeException")
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, short_gen, length=2, num_slices=1).select(
f.raise_error(f.lit("unexpected"))).collect(),
conf={},
error_message="java.lang.RuntimeException: unexpected")
| spark-rapids-branch-23.10 | integration_tests/src/main/python/misc_expr_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A JSON generator built based on the context free grammar from https://www.json.org/json-en.html
import random
from marks import allow_non_gpu, fuzz_test
from typing import List
from data_gen import *
from asserts import assert_gpu_and_cpu_are_equal_collect
from marks import approximate_float
_name_gen = StringGen(pattern= "[a-zA-Z]{1,30}",nullable= False)
_name_gen.start(random.Random(0))
def gen_top_schema(depth):
return gen_object_type(depth)
def gen_schema(depth):
"""
Abstract data type of JSON schema
type Schema = Object of Fields
| Array of Schema
| String
| Number
| Bool
type Fields = Field list
type Field = {Name, Schema}
type Name = String
"""
if depth > 1 and random.randint(1, 100) < 90:
return random.choice([gen_object_type, gen_array_type])(depth)
else:
return random.choice([gen_string_type, gen_number_type, gen_bool_type])()
def gen_object_type(depth):
return StructType(gen_fields_type(depth-1))
def gen_array_type(depth):
return ArrayType(gen_schema(depth-1))
def gen_fields_type(depth):
length = random.randint(1, 5)
return [gen_field_type(depth) for _ in range(length)]
def gen_field_type(depth):
return StructField(gen_name(), gen_schema(depth-1))
def gen_string_type():
return StringType()
def gen_number_type():
return random.choice([IntegerType(), FloatType(), DoubleType()])
def gen_bool_type():
return BooleanType()
def gen_name():
return _name_gen.gen()
# This is just a simple prototype of JSON generator.
# You need to generate a JSON schema before using it.
#
# #Example
# ```python
# schema = gen_schema()
# with open("./temp.json", 'w') as f:
# for t in gen_json(schema):
# f.write(t)
# ```
# to generate a random JSON file.
def gen_json(schema: DataType):
"""
JSON -> ELEMENT
"""
lines = random.randint(0, 10)
for _ in range(lines):
for t in gen_element(schema):
yield t
yield '\n'
def gen_value(schema: DataType):
"""
VALUE -> OBJECT
| ARRAY
| STRING
| NUMBER
| BOOL
"""
if isinstance(schema, StructType):
for t in gen_object(schema):
yield t
elif isinstance(schema, ArrayType):
for t in gen_array(schema.elementType):
yield t
elif isinstance(schema, StringType):
for t in gen_string():
yield t
elif isinstance(schema, BooleanType):
for t in gen_bool():
yield t
elif isinstance(schema, IntegerType):
for t in gen_integer():
yield t
elif isinstance(schema, (FloatType, DoubleType)):
for t in gen_number():
yield t
else:
raise Exception("not supported schema")
def gen_object(schema: StructType):
"""
OBJECT -> '{' WHITESPACE '}'
| '{' MEMBERS '}'
"""
yield "{"
if len(schema) == 0:
for t in gen_whitespace():
yield t
else:
for t in gen_members(schema.fields):
yield t
yield "}"
def gen_members(schema: List[StructField]):
"""
MEMBERS -> MEMBER
| MEMBER ',' MEMBERS
"""
if len(schema) == 1:
for t in gen_member(schema[0]):
yield t
else:
for t in gen_member(schema[0]):
yield t
yield ","
for t in gen_members(schema[1:]):
yield t
def gen_member(schema: StructField):
"""
MEMBER -> WHITESPACE STRING WHITESPACE ':' ELEMENT
"""
for t in gen_whitespace():
yield t
yield '"' + schema.name + '"'
for t in gen_whitespace():
yield t
yield ":"
for t in gen_element(schema.dataType):
yield t
def gen_array(schema: DataType):
yield '['
for t in random.choices([gen_whitespace(), gen_elements(schema)], [10, 90], k=1)[0]:
yield t
yield ']'
def gen_elements(schema: DataType):
"""
ELEMENTS -> ELEMENT
| ELEMENT ',' ELEMENTS
"""
for t in gen_element(schema):
yield t
if random.randint(1, 100) < 80:
yield ','
for t in gen_elements(schema):
yield t
def gen_element(schema: DataType):
"""
ELEMENT -> WHITESPACE VALUE WHITESPACE
"""
for t in gen_whitespace():
yield t
for t in gen_value(schema):
yield t
for t in gen_whitespace():
yield t
def gen_string():
"""
STRING -> '"' CHARACTERS '"'
"""
yield '"'
for t in gen_characters():
yield t
yield '"'
def gen_characters():
"""
CHARACTERS -> ''
| CHAR CHARACTERS
"""
if random.randint(0,100) < 30:
yield ''
else:
for t in gen_char():
yield t
for t in gen_characters():
yield t
def gen_char():
"""
CHAR -> 0x0020 .. 0x10ffff (exclude 0x0022 and 0x005c)
| '\\' ESCAPE
"""
if random.randint(0, 99) < 80:
unicode = random.randint(0x0020, 0x10ffff)
while unicode == 0x22 or unicode == 0x5c:
unicode = random.randint(0x0020, 0x10ffff)
yield chr(unicode)
else:
yield '\\'
for t in gen_escape():
yield t
def gen_escape():
"""
ESCAPE -> '"' | '\\' | '/' | 'b' | 'f' | 'n' | 'r' | 't'
| 'u' HEX HEX HEX HEX
"""
if random.randint(0, 8) < 8:
yield random.choice(['"', '\\', '/', 'b', 'f', 'n', 'r', 't'])
else:
yield 'u'
for _ in range(4):
for t in gen_hex():
yield t
def gen_hex():
"""
HEX -> DIGIT
| 'a' .. 'f'
| 'A' .. 'F'
"""
path = random.randint(0, 2)
if path == 0:
for t in gen_digit():
yield t
elif path == 1:
yield chr(random.randint(0x41, 0x46))
else:
yield chr(random.randint(0x61, 0x66))
def gen_number():
"""
NUMBER -> INTEGER FRACTION EXPONENT
"""
for t in gen_integer():
yield t
for t in gen_fraction():
yield t
for t in gen_exponent():
yield t
def gen_integer():
"""
INTEGER -> DIGIT
| ONENINE DIGITS
| '-' DIGIT
| '-' ONENINE DIGITS
"""
if random.randint(1, 100) <= 50:
yield '-'
if random.randint(1, 100) <= 50:
for t in gen_digit():
yield t
else:
for t in gen_onenine():
yield t
for t in gen_digits():
yield t
def gen_digits():
"""
DIGITS -> DIGIT
| DIGIT DIGITS
"""
for t in gen_digit():
yield t
if random.randint(1, 100) < 70:
for t in gen_digits():
yield t
def gen_digit():
"""
DIGIT -> '0'
| ONENINE
"""
if random.randint(0, 9) == 0:
yield '0'
else:
for t in gen_onenine():
yield t
def gen_onenine():
"""
ONENINE -> '1' .. '9'
"""
yield chr(random.randint(0x31, 0x39))
def gen_fraction():
"""
FRACTION -> "" | '.' DIGITS
"""
if random.randint(1, 100) < 50:
yield ""
else:
yield '.'
for t in gen_digits():
yield t
def gen_exponent():
"""
EXPONENT -> ""
| 'E' SIGN DIGITS
| 'e' SIGN DIGITS
"""
if random.randint(1, 100) < 20:
yield ""
else:
yield random.choice(['E', 'e'])
for t in gen_sign():
yield t
for t in gen_digits():
yield t
def gen_sign():
"""
SIGN -> ""
| '+'
| '-'
"""
yield random.choice(["", '+', '-'])
def gen_whitespace():
"""
WHITESPACE -> ''
| 0x0020 WHITESPACE
| 0x000a WHITESPACE (todo)
| 0x000d WHITESPACE (todo)
| 0x0009 WHITESPACE (todo)
"""
if random.randint(0, 4) > 3:
yield chr(random.choice([0x0020]))
for t in gen_whitespace():
yield t
else:
yield ''
def gen_bool():
"""
BOOL -> "true"
| "null"
| "false"
"""
yield random.choice(["true", "null", "false"])
_enable_all_types_conf = {
'spark.rapids.sql.format.json.enabled': 'true',
'spark.rapids.sql.format.json.read.enabled': 'true'}
@approximate_float
@allow_non_gpu('FileSourceScanExec')
@fuzz_test
def test_json_read_fuzz(enable_fuzz_test, spark_tmp_path):
depth = random.randint(1, 5)
schema = gen_top_schema(depth)
data_path = spark_tmp_path + '/JSON_FUZZ_DATA'
schema_path = spark_tmp_path + '/JSON_SCHEMA'
# write the schema for debugging
with open(schema_path, 'w') as f:
f.write("{}".format(schema))
with open(data_path, 'w') as f:
for c in gen_json(schema):
f.write(c)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema).json(data_path),
_enable_all_types_conf
)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/json_fuzz_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from marks import allow_non_gpu, ignore_order
from pyspark.sql.types import *
import pyspark.sql.functions as f
pytestmark = pytest.mark.nightly_resource_consuming_test
explode_gens = all_gen + [binary_gen]
arrays_with_binary = [ArrayGen(BinaryGen(max_length=5))]
maps_with_binary = [MapGen(IntegerGen(nullable=False), BinaryGen(max_length=5))]
def four_op_df(spark, gen, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', gen),
('b', gen),
('c', gen),
('d', gen)], nullable=False), length=length, seed=seed)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('a', 'explode(array(b, c, d))'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.explode(array_lit)))
# use a small `spark.rapids.sql.batchSizeBytes` to enforce input batches splitting up during explode
conf_to_enforce_split_input = {'spark.rapids.sql.batchSizeBytes': '8192'}
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + map_gens_sample + arrays_with_binary + maps_with_binary,
ids=idfn)
def test_explode_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_explode_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode(b) as c').selectExpr('a', 'explode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_explode_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_explode_outer_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_explode_outer_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode_outer(b) as c').selectExpr('a', 'explode_outer(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('posexplode(array(b, c, d))', 'a'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.posexplode(array_lit)))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_posexplode_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_posexplode_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_nested_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode(b) as (pos, c)').selectExpr('a', 'pos', 'posexplode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', explode_gens + struct_gens_sample_with_decimal128 +
array_gens_sample + arrays_with_binary + map_gens_sample + maps_with_binary,
ids=idfn)
def test_posexplode_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample + decimal_128_map_gens + maps_with_binary, ids=idfn)
def test_posexplode_outer_map_data(map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', explode_gens, ids=idfn)
def test_posexplode_nested_outer_array_data(data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode_outer(b) as (pos, c)').selectExpr(
'a', 'pos', 'posexplode_outer(c)'),
conf=conf_to_enforce_split_input)
@allow_non_gpu("GenerateExec", "ShuffleExchangeExec")
@ignore_order(local=True)
def test_generate_outer_fallback():
assert_gpu_fallback_collect(
lambda spark: spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as x")\
.repartition(1).selectExpr("inline_outer(x)"),
"GenerateExec")
| spark-rapids-branch-23.10 | integration_tests/src/main/python/generate_expr_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from conftest import is_allowing_any_non_gpu, get_non_gpu_allowed, get_validate_execs_in_gpu_plan, is_databricks_runtime, is_at_least_precommit_run, should_inject_oom
from pyspark.sql import DataFrame
from spark_init_internal import get_spark_i_know_what_i_am_doing, spark_version
def _from_scala_map(scala_map):
ret = {}
# The value we get is a scala map, not a java map, so we need to jump through some hoops
keys = scala_map.keys().iterator()
while keys.hasNext():
key = keys.next()
ret[key] = scala_map.get(key).get()
return ret
_spark = get_spark_i_know_what_i_am_doing()
# Have to reach into a private member to get access to the API we need
_orig_conf = _from_scala_map(_spark.conf._jconf.getAll())
_orig_conf_keys = _orig_conf.keys()
# Default settings that should apply to CPU and GPU sessions.
# These settings can be overridden by specific tests if necessary.
# Many of these are redundant with default settings for the configs but are set here explicitly
# to ensure any cluster settings do not interfere with tests that assume the defaults.
_default_conf = {
'spark.ansi.enabled': 'false',
'spark.rapids.sql.castDecimalToFloat.enabled': 'false',
'spark.rapids.sql.castFloatToDecimal.enabled': 'false',
'spark.rapids.sql.castFloatToIntegralTypes.enabled': 'false',
'spark.rapids.sql.castFloatToString.enabled': 'false',
'spark.rapids.sql.castStringToFloat.enabled': 'false',
'spark.rapids.sql.castStringToTimestamp.enabled': 'false',
'spark.rapids.sql.fast.sample': 'false',
'spark.rapids.sql.hasExtendedYearValues': 'true',
'spark.rapids.sql.hashOptimizeSort.enabled': 'false',
'spark.rapids.sql.improvedFloatOps.enabled': 'false',
'spark.rapids.sql.improvedTimeOps.enabled': 'false',
'spark.rapids.sql.incompatibleDateFormats.enabled': 'false',
'spark.rapids.sql.incompatibleOps.enabled': 'false',
'spark.rapids.sql.mode': 'executeongpu',
'spark.rapids.sql.variableFloatAgg.enabled': 'false',
'spark.sql.legacy.allowNegativeScaleOfDecimal': 'true',
}
def is_tz_utc(spark=_spark):
"""
true if the tz is UTC else false
"""
# Now we have to do some kind of ugly internal java stuff
jvm = spark.sparkContext._jvm
utc = jvm.java.time.ZoneId.of('UTC').normalized()
sys_tz = jvm.java.time.ZoneId.systemDefault().normalized()
return utc == sys_tz
def _set_all_confs(conf):
newconf = _default_conf.copy()
if (should_inject_oom()):
_spark.conf.set("spark.rapids.sql.test.injectRetryOOM", "true")
else:
_spark.conf.set("spark.rapids.sql.test.injectRetryOOM", "false")
newconf.update(conf)
for key, value in newconf.items():
if _spark.conf.get(key, None) != value:
_spark.conf.set(key, value)
def reset_spark_session_conf():
"""Reset all of the configs for a given spark session."""
_set_all_confs(_orig_conf)
#We should clear the cache
_spark.catalog.clearCache()
# Have to reach into a private member to get access to the API we need
current_keys = _from_scala_map(_spark.conf._jconf.getAll()).keys()
for key in current_keys:
if key not in _orig_conf_keys:
_spark.conf.unset(key)
def _check_for_proper_return_values(something):
"""We don't want to return an DataFrame or Dataset from a with_spark_session. You will not get what you expect"""
if (isinstance(something, DataFrame)):
raise RuntimeError("You should never return a DataFrame from a with_*_session, you will not get the results that you expect")
def with_spark_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set."""
reset_spark_session_conf()
_add_job_description(conf)
_set_all_confs(conf)
ret = func(_spark)
_check_for_proper_return_values(ret)
return ret
def _add_job_description(conf):
is_gpu_job = conf.get('spark.rapids.sql.enabled', False)
job_type = 'GPU' if str(is_gpu_job).lower() == str(True).lower() else 'CPU'
job_desc = '{}[{}]'.format(os.environ.get('PYTEST_CURRENT_TEST'), job_type)
_spark.sparkContext.setJobDescription(job_desc)
def with_cpu_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set on the CPU."""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'false'
return with_spark_session(func, conf=copy)
def with_gpu_session(func, conf={}):
"""
Run func that takes a spark session as input with the given configs set on the GPU.
Note that this forces you into test mode unless. It is not a requirement, but is
simplest for right now.
"""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'true'
if is_allowing_any_non_gpu():
copy['spark.rapids.sql.test.enabled'] = 'false'
else:
copy['spark.rapids.sql.test.enabled'] = 'true'
copy['spark.rapids.sql.test.allowedNonGpu'] = ','.join(get_non_gpu_allowed())
copy['spark.rapids.sql.test.validateExecsInGpuPlan'] = ','.join(get_validate_execs_in_gpu_plan())
return with_spark_session(func, conf=copy)
def is_before_spark_312():
return spark_version() < "3.1.2"
def is_before_spark_313():
return spark_version() < "3.1.3"
def is_before_spark_320():
return spark_version() < "3.2.0"
def is_before_spark_322():
return spark_version() < "3.2.2"
def is_before_spark_330():
return spark_version() < "3.3.0"
def is_before_spark_331():
return spark_version() < "3.3.1"
def is_before_spark_340():
return spark_version() < "3.4.0"
def is_before_spark_350():
return spark_version() < "3.5.0"
def is_spark_320_or_later():
return spark_version() >= "3.2.0"
def is_spark_330_or_later():
return spark_version() >= "3.3.0"
def is_spark_340_or_later():
return spark_version() >= "3.4.0"
def is_spark_350_or_later():
return spark_version() >= "3.5.0"
def is_spark_330():
return spark_version() == "3.3.0"
def is_spark_33X():
return "3.3.0" <= spark_version() < "3.4.0"
def is_spark_321cdh():
return "3.2.1.3.2.717" in spark_version()
def is_spark_330cdh():
return "3.3.0.3.3.718" in spark_version()
def is_spark_cdh():
return is_spark_321cdh() or is_spark_330cdh()
def is_databricks_version_or_later(major, minor):
spark = get_spark_i_know_what_i_am_doing()
version = spark.conf.get("spark.databricks.clusterUsageTags.sparkVersion", "0.0")
parts = version.split(".")
if (len(parts) < 2):
raise RuntimeError("Unable to determine Databricks version from version string: " + version)
db_major = int(parts[0])
db_minor = int(parts[1])
return db_minor >= minor if (db_major == major) else db_major >= major
def is_databricks104_or_later():
return is_databricks_version_or_later(10, 4)
def is_databricks113_or_later():
return is_databricks_version_or_later(11, 3)
def is_databricks122_or_later():
return is_databricks_version_or_later(12, 2)
def supports_delta_lake_deletion_vectors():
if is_databricks_runtime():
return is_databricks122_or_later()
else:
return is_spark_340_or_later()
def get_java_major_version():
ver = _spark.sparkContext._jvm.System.getProperty("java.version")
# Allow these formats:
# 1.8.0_72-ea
# 9-ea
# 9
# 11.0.1
if ver.startswith('1.'):
ver = ver[2:]
dot_pos = ver.find('.')
dash_pos = ver.find('-')
if dot_pos != -1:
ver = ver[0:dot_pos]
elif dash_pos != -1:
ver = ver[0:dash_pos]
return int(ver)
def get_jvm_charset():
sc = _spark.sparkContext
return str(sc._jvm.java.nio.charset.Charset.defaultCharset())
def is_jvm_charset_utf8():
return get_jvm_charset() == 'UTF-8'
def is_hive_available():
# precommit and nightly runs are supposed to have Hive,
# so tests should fail if Hive is missing in those environments.
if is_at_least_precommit_run():
return True
return _spark.conf.get("spark.sql.catalogImplementation") == "hive"
| spark-rapids-branch-23.10 | integration_tests/src/main/python/spark_session.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from spark_session import with_cpu_session, with_gpu_session
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal
from data_gen import *
from marks import *
if os.environ.get('INCLUDE_SPARK_AVRO_JAR', 'false') == 'false':
pytestmark = pytest.mark.skip(reason=str("INCLUDE_SPARK_AVRO_JAR is disabled"))
support_gens = numeric_gens + [string_gen, boolean_gen]
_enable_all_types_conf = {
'spark.rapids.sql.format.avro.enabled': 'true',
'spark.rapids.sql.format.avro.read.enabled': 'true'}
rapids_reader_types = ['PERFILE', 'COALESCING', 'MULTITHREADED']
# 50 files for the coalescing reading case
coalescingPartitionNum = 50
def gen_avro_files(gen_list, out_path):
with_cpu_session(
lambda spark: gen_df(spark,
gen_list).repartition(coalescingPartitionNum).write.format("avro").save(out_path)
)
@pytest.mark.parametrize('v1_enabled_list', ["avro", ""], ids=["v1", "v2"])
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_basic_read(spark_tmp_path, v1_enabled_list, reader_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(support_gens)]
data_path = spark_tmp_path + '/AVRO_DATA'
gen_avro_files(gen_list, data_path)
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.rapids.sql.format.avro.reader.type': reader_type,
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("avro").load(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "avro"], ids=["v1", "v2"])
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_avro_simple_partitioned_read(spark_tmp_path, v1_enabled_list, reader_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(support_gens)]
data_path = spark_tmp_path + '/AVRO_DATA'
# generate partitioned files
for v in [0, 1, 2]:
out_path = data_path + '/key={}/key2=2{}'.format(v, v)
gen_avro_files(gen_list, out_path)
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.rapids.sql.format.avro.reader.type': reader_type,
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("avro").load(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "avro"], ids=["v1", "v2"])
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_avro_input_meta(spark_tmp_path, v1_enabled_list, reader_type):
data_path = spark_tmp_path + '/AVRO_DATA'
for v in [0, 1]:
out_path = data_path + '/key={}'.format(v)
with_cpu_session(
lambda spark: unary_op_df(spark, long_gen).write.format("avro").save(out_path))
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.rapids.sql.format.avro.reader.type': reader_type,
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format("avro").load(data_path)
.filter(f.col('a') > 0)
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
# This is for https://github.com/NVIDIA/spark-rapids/issues/5312
@pytest.mark.parametrize('v1_enabled_list', ["avro", ""], ids=["v1", "v2"])
def test_coalescing_uniform_sync(spark_tmp_path, v1_enabled_list):
# Generate the data files
data_path = spark_tmp_path + '/AVRO_DATA'
with_cpu_session(
lambda spark: unary_op_df(spark, long_gen).repartition(coalescingPartitionNum)\
.write.format("avro").save(data_path))
# dump the coalesced files
dump_path = spark_tmp_path + '/AVRO_DUMP/'
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.rapids.sql.format.avro.reader.type': 'COALESCING',
'spark.rapids.sql.avro.debug.dumpPrefix': dump_path,
'spark.rapids.sql.avro.debug.dumpAlways': 'true',
'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_gpu_session(
lambda spark: spark.read.format("avro").load(data_path).collect(),
conf=all_confs)
# read the coalesced files by CPU
with_cpu_session(
lambda spark: spark.read.format("avro").load(dump_path).collect())
@ignore_order(local=True)
@pytest.mark.parametrize('v1_enabled_list', ["", "avro"], ids=["v1", "v2"])
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_avro_read_with_corrupt_files(spark_tmp_path, reader_type, v1_enabled_list):
first_dpath = spark_tmp_path + '/AVRO_DATA/first'
with_cpu_session(lambda spark : spark.range(1).toDF("a").write.format("avro").save(first_dpath))
second_dpath = spark_tmp_path + '/AVRO_DATA/second'
with_cpu_session(lambda spark : spark.range(1, 2).toDF("a").write.format("avro").save(second_dpath))
third_dpath = spark_tmp_path + '/AVRO_DATA/third'
with_cpu_session(lambda spark : spark.range(2, 3).toDF("a").write.json(third_dpath))
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.sql.files.ignoreCorruptFiles': "true",
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.format("avro").load([first_dpath, second_dpath, third_dpath]),
conf=all_confs)
# 10 rows is a small batch size for multi-batch read test, 2147483647 is the default value
@pytest.mark.parametrize('batch_size_rows', [10, 2147483647])
@pytest.mark.parametrize('v1_enabled_list', ["avro", ""], ids=["v1", "v2"])
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_read_count(spark_tmp_path, v1_enabled_list, reader_type, batch_size_rows):
data_path = spark_tmp_path + '/AVRO_DATA'
# the default block size of the avro file is about 64kb, so we need to generate a larger file
# to test multi-batch read. length=30000 will generate 2 blocks in each partition.
with_cpu_session(
lambda spark: gen_df(spark, [('_c0', int_gen)], length=30000)
.repartition(2).write.format("avro").save(data_path)
)
all_confs = copy_and_update(_enable_all_types_conf, {
'spark.rapids.sql.format.avro.reader.type': reader_type,
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.rapids.sql.reader.batchSizeRows': batch_size_rows})
assert_gpu_and_cpu_row_counts_equal(
lambda spark: spark.read.format("avro").load(data_path),
conf=all_confs)
@pytest.mark.parametrize('col_name', ['K0', 'k0', 'K3', 'k3', 'V0', 'v0'], ids=idfn)
@ignore_order
def test_read_case_col_name(spark_tmp_path, col_name):
gen_list =[('k0', LongGen(nullable=False, min_val=0, max_val=0)),
('k1', LongGen(nullable=False, min_val=1, max_val=1)),
('k2', LongGen(nullable=False, min_val=2, max_val=2)),
('k3', LongGen(nullable=False, min_val=3, max_val=3)),
('v0', LongGen()),
('v1', LongGen()),
('v2', LongGen()),
('v3', LongGen())]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/AVRO_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.partitionBy('k0', 'k1', 'k2', 'k3').format('avro').save(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.format('avro').load(data_path).selectExpr(col_name),
conf=_enable_all_types_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/avro_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from marks import *
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_basic_gens, ids=idfn)
def test_scalar_subquery_basics(data_gen):
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, [('a', data_gen)], num_slices=1),
'table',
'''select a, (select last(a) from table)
from table
where a > (select first(a) from table)
''')
@ignore_order(local=True)
@pytest.mark.parametrize('basic_gen', all_basic_gens, ids=idfn)
def test_scalar_subquery_struct(basic_gen):
# single-level struct
gen = [('ss', StructGen([['a', basic_gen], ['b', basic_gen]]))]
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, gen, num_slices=1),
'table',
'''select ss, (select last(ss) from table)
from table
where (select first(ss) from table).b > ss.a
''')
# nested struct
gen = [('ss', StructGen([['child', StructGen([['c0', basic_gen]])]]))]
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, gen, num_slices=1),
'table',
'''select ss, (select last(ss) from table)
from table
where (select first(ss) from table)['child']['c0'] > ss.child.c0
''')
# struct of array
gen = [('ss', StructGen([['arr', ArrayGen(basic_gen)]]))]
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, gen, length=100, num_slices=1),
'table',
'''select sort_array(ss.arr), sort_array((select last(ss) from table)['arr'])
from table
where (select first(ss) from table).arr[0] > ss.arr[1]
''')
@ignore_order(local=True)
@pytest.mark.parametrize('basic_gen', all_basic_gens, ids=idfn)
def test_scalar_subquery_array(basic_gen):
# single-level array
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('arr', ArrayGen(basic_gen))], num_slices=1),
'table',
'''select sort_array(arr),
sort_array((select last(arr) from table))
from table
where (select first(arr) from table)[0] > arr[0]
''')
# nested array
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('arr', ArrayGen(ArrayGen(basic_gen)))]
, length=100
, num_slices=1),
'table',
'''select sort_array(arr[10]),
sort_array((select last(arr) from table)[10])
from table
where (select first(arr) from table)[0][1] > arr[0][1]
''')
# array of struct
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('arr', ArrayGen(StructGen([['a', basic_gen]])))]
, length=100
, num_slices=1),
'table',
'''select arr[10].a, (select last(arr) from table)[10].a
from table
where (select first(arr) from table)[0].a > arr[0].a
''')
@ignore_order(local=True)
def test_scalar_subquery_map():
map_gen = map_string_string_gen[0]
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('kv', map_gen)], length=100, num_slices=1),
'table',
'''select kv['key_0'],
(select first(kv) from table)['key_1'],
(select last(kv) from table)['key_2']
from table
''')
# array of map
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('arr', ArrayGen(map_gen))], length=100, num_slices=1),
'table',
'''select arr[0]['key_0'],
(select first(arr) from table)[0]['key_1'],
(select last(arr[0]) from table)['key_2']
from table
''')
# struct of map
assert_gpu_and_cpu_are_equal_sql(
# Fix num_slices at 1 to make sure that first/last returns same results under CPU and GPU.
lambda spark: gen_df(spark, [('ss', StructGen([['kv', map_gen]]))], length=100, num_slices=1),
'table',
'''select ss['kv']['key_0'],
(select first(ss) from table)['kv']['key_1'],
(select last(ss.kv) from table)['key_2']
from table
''')
| spark-rapids-branch-23.10 | integration_tests/src/main/python/subquery_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyspark.sql.functions import when, col, current_date, current_timestamp
from pyspark.sql.types import *
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_cpu_and_gpu_are_equal_collect_with_capture
from data_gen import *
from marks import ignore_order, allow_non_gpu
from spark_session import with_cpu_session, is_databricks113_or_later
_adaptive_conf = { "spark.sql.adaptive.enabled": "true" }
def create_skew_df(spark, length):
root = spark.range(0, length)
mid = length / 2
left = root.select(
when(col('id') < mid / 2, mid).
otherwise('id').alias("key1"),
col('id').alias("value1")
)
right = root.select(
when(col('id') < mid, mid).
otherwise('id').alias("key2"),
col('id').alias("value2")
)
return left, right
# This replicates the skew join test from scala tests, and is here to test
# the computeStats(...) implementation in GpuRangeExec
@ignore_order(local=True)
def test_aqe_skew_join():
def do_join(spark):
left, right = create_skew_df(spark, 500)
left.createOrReplaceTempView("skewData1")
right.createOrReplaceTempView("skewData2")
return spark.sql("SELECT * FROM skewData1 join skewData2 ON key1 = key2")
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_adaptive_conf)
# Test the computeStats(...) implementation in GpuDataSourceScanExec
@ignore_order(local=True)
@pytest.mark.parametrize("data_gen", integral_gens, ids=idfn)
def test_aqe_join_parquet(spark_tmp_path, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark: unary_op_df(spark, data_gen).orderBy('a').write.parquet(data_path)
)
def do_it(spark):
spark.read.parquet(data_path).createOrReplaceTempView('df1')
spark.read.parquet(data_path).createOrReplaceTempView('df2')
return spark.sql("select count(*) from df1,df2 where df1.a = df2.a")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=_adaptive_conf)
# Test the computeStats(...) implementation in GpuBatchScanExec
@ignore_order(local=True)
@pytest.mark.parametrize("data_gen", integral_gens, ids=idfn)
def test_aqe_join_parquet_batch(spark_tmp_path, data_gen):
# force v2 source for parquet to use BatchScanExec
conf = copy_and_update(_adaptive_conf, {
"spark.sql.sources.useV1SourceList": ""
})
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : unary_op_df(spark, data_gen).write.parquet(first_data_path))
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : unary_op_df(spark, data_gen).write.parquet(second_data_path))
data_path = spark_tmp_path + '/PARQUET_DATA'
def do_it(spark):
spark.read.parquet(data_path).createOrReplaceTempView('df1')
spark.read.parquet(data_path).createOrReplaceTempView('df2')
return spark.sql("select count(*) from df1,df2 where df1.a = df2.a")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
# Test the map stage submission handling for GpuShuffleExchangeExec
@ignore_order(local=True)
def test_aqe_struct_self_join(spark_tmp_table_factory):
def do_join(spark):
data = [
(("Adam ", "", "Green"), "1", "M", 1000),
(("Bob ", "Middle", "Green"), "2", "M", 2000),
(("Cathy ", "", "Green"), "3", "F", 3000)
]
schema = (StructType()
.add("name", StructType()
.add("firstname", StringType())
.add("middlename", StringType())
.add("lastname", StringType()))
.add("id", StringType())
.add("gender", StringType())
.add("salary", IntegerType()))
df = spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
df_name = spark_tmp_table_factory.get()
df.createOrReplaceTempView(df_name)
resultdf = spark.sql(
"select struct(name, struct(name.firstname, name.lastname) as newname)" +
" as col,name from " + df_name + " union" +
" select struct(name, struct(name.firstname, name.lastname) as newname) as col,name" +
" from " + df_name)
resultdf_name = spark_tmp_table_factory.get()
resultdf.createOrReplaceTempView(resultdf_name)
return spark.sql("select a.* from {} a, {} b where a.name=b.name".format(
resultdf_name, resultdf_name))
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_adaptive_conf)
@allow_non_gpu("ProjectExec")
def test_aqe_broadcast_join_non_columnar_child(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
def prep(spark):
data = [
(("Adam ", "", "Green"), "1", "M", 1000, "http://widgets.net"),
(("Bob ", "Middle", "Green"), "2", "M", 2000, "http://widgets.org"),
(("Cathy ", "", "Green"), "3", "F", 3000, "http://widgets.net")
]
schema = (StructType()
.add("name", StructType()
.add("firstname", StringType())
.add("middlename", StringType())
.add("lastname", StringType()))
.add("id", StringType())
.add("gender", StringType())
.add("salary", IntegerType())
.add("website", StringType()))
df = spark.createDataFrame(spark.sparkContext.parallelize(data),schema)
df2 = df.withColumn("dt",current_date().alias("dt")).withColumn("ts",current_timestamp().alias("ts"))
df2.write.format("parquet").mode("overwrite").save(data_path)
with_cpu_session(prep)
def do_it(spark):
newdf2 = spark.read.parquet(data_path)
newdf2.createOrReplaceTempView("df2")
return spark.sql(
"""
select
a.name.firstname,
a.name.lastname,
b.full_url
from df2 a join (select id, concat(website,'/path') as full_url from df2) b
on a.id = b.id
"""
)
conf = copy_and_update(_adaptive_conf, { 'spark.rapids.sql.expression.Concat': 'false' })
if is_databricks113_or_later():
assert_cpu_and_gpu_are_equal_collect_with_capture(do_it, exist_classes="GpuShuffleExchangeExec",conf=conf)
else:
assert_cpu_and_gpu_are_equal_collect_with_capture(do_it, exist_classes="GpuBroadcastExchangeExec",conf=conf)
joins = [
'inner',
'cross',
'left semi',
'left anti',
'anti'
]
# Databricks-11.3 added new operator EXECUTOR_BROADCAST which does executor side broadcast.
# SparkPlan is different as there is no BroadcastExchange which is replaced by Exchange.
# To handle issue in https://github.com/NVIDIA/spark-rapids/issues/7037, we need to allow a
# for a CPU ShuffleExchangeExec for a CPU Broadcast join to consume it
db_113_cpu_bnlj_join_allow=["ShuffleExchangeExec"] if is_databricks113_or_later() else []
# see https://github.com/NVIDIA/spark-rapids/issues/7037
# basically this happens when a GPU broadcast exchange is reused from
# one side of a GPU broadcast join to be used on one side of a CPU
# broadcast join. The bug currently manifests in Databricks, but could
# theoretically show up in other Spark distributions
@ignore_order(local=True)
@allow_non_gpu('BroadcastNestedLoopJoinExec', 'Cast', 'DateSub', *db_113_cpu_bnlj_join_allow)
@pytest.mark.parametrize('join', joins, ids=idfn)
def test_aqe_join_reused_exchange_inequality_condition(spark_tmp_path, join):
data_path = spark_tmp_path + '/PARQUET_DATA'
def prep(spark):
data = [
(("Adam ", "", "Green"), "1", "M", 1000),
(("Bob ", "Middle", "Green"), "2", "M", 2000),
(("Cathy ", "", "Green"), "3", "F", 3000)
]
schema = (StructType()
.add("name", StructType()
.add("firstname", StringType())
.add("middlename", StringType())
.add("lastname", StringType()))
.add("id", StringType())
.add("gender", StringType())
.add("salary", IntegerType()))
df = spark.createDataFrame(spark.sparkContext.parallelize(data),schema)
df2 = df.withColumn("dt",current_date().alias("dt")).withColumn("ts",current_timestamp().alias("ts"))
df2.write.format("parquet").mode("overwrite").save(data_path)
with_cpu_session(prep)
def do_it(spark):
newdf2 = spark.read.parquet(data_path)
newdf2.createOrReplaceTempView("df2")
return spark.sql(
"""
select *
from (
select distinct a.salary
from df2 a {join} join (select max(date(ts)) as state_start from df2) b
on date(a.ts) > b.state_start - 2)
where salary in (
select salary from (select a.salary
from df2 a inner join (select max(date(ts)) as state_start from df2) b on date(a.ts) > b.state_start - 2
order by a.salary limit 1))
""".format(join=join)
)
assert_gpu_and_cpu_are_equal_collect(do_it, conf=_adaptive_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/aqe_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from marks import allow_non_gpu, approximate_float, incompat
from pyspark.sql.types import *
from spark_session import with_cpu_session
# This is one of the most basic tests where we verify that we can
# move data onto and off of the GPU without messing up. All data
# that comes from data_gen is row formatted, with how pyspark
# currently works and when we do a collect all of that data has
# to be brought back to the CPU (rows) to be returned.
# So we just need a very simple operation in the middle that
# can be done on the GPU.
def test_row_conversions():
gens = [["a", byte_gen], ["b", short_gen], ["c", int_gen], ["d", long_gen],
["e", float_gen], ["f", double_gen], ["g", string_gen], ["h", boolean_gen],
["i", timestamp_gen], ["j", date_gen], ["k", ArrayGen(byte_gen)],
["l", ArrayGen(string_gen)], ["m", ArrayGen(float_gen)],
["n", ArrayGen(boolean_gen)], ["o", ArrayGen(ArrayGen(short_gen))],
["p", StructGen([["c0", byte_gen], ["c1", ArrayGen(byte_gen)]])],
["q", simple_string_to_string_map_gen],
["r", MapGen(BooleanGen(nullable=False), ArrayGen(boolean_gen), max_length=2)],
["s", null_gen], ["t", decimal_gen_64bit], ["u", decimal_gen_32bit],
["v", decimal_gen_128bit]]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gens).selectExpr("*", "a as a_again"))
def test_row_conversions_fixed_width():
gens = [["a", byte_gen], ["b", short_gen], ["c", int_gen], ["d", long_gen],
["e", float_gen], ["f", double_gen], ["h", boolean_gen],
["i", timestamp_gen], ["j", date_gen], ["k", decimal_gen_64bit],
["l", decimal_gen_32bit]]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gens).selectExpr("*", "a as a_again"))
def test_row_conversions_fixed_width_wide():
gens = [["a{}".format(i), ByteGen(nullable=True)] for i in range(10)] + \
[["b{}".format(i), ShortGen(nullable=True)] for i in range(10)] + \
[["c{}".format(i), IntegerGen(nullable=True)] for i in range(10)] + \
[["d{}".format(i), LongGen(nullable=True)] for i in range(10)] + \
[["e{}".format(i), FloatGen(nullable=True)] for i in range(10)] + \
[["f{}".format(i), DoubleGen(nullable=True)] for i in range(10)] + \
[["h{}".format(i), BooleanGen(nullable=True)] for i in range(10)] + \
[["i{}".format(i), TimestampGen(nullable=True)] for i in range(10)] + \
[["j{}".format(i), DateGen(nullable=True)] for i in range(10)] + \
[["k{}".format(i), DecimalGen(precision=12, scale=2, nullable=True)] for i in range(10)] + \
[["l{}".format(i), DecimalGen(precision=7, scale=3, nullable=True)] for i in range(10)]
def do_it(spark):
df=gen_df(spark, gens, length=1).selectExpr("*", "a0 as a_again")
debug_df(df)
return df
assert_gpu_and_cpu_are_equal_collect(do_it)
# Test handling of transitions when the data is already columnar on the host
# Note that Apache Spark will automatically convert a load of nested types to rows, so
# the nested types will not test a host columnar transition in that case.
# Databricks does support returning nested types as columnar data on the host, and that
# is where we would expect any problems with handling nested types in host columnar form to appear.
@pytest.mark.parametrize('data_gen', [
int_gen,
string_gen,
decimal_gen_64bit,
decimal_gen_128bit,
ArrayGen(string_gen, max_length=10),
ArrayGen(decimal_gen_128bit, max_length=10),
StructGen([('a', string_gen)]) ] + map_string_string_gen, ids=idfn)
@allow_non_gpu('ColumnarToRowExec', 'FileSourceScanExec')
def test_host_columnar_transition(spark_tmp_path, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(lambda spark : unary_op_df(spark, data_gen).write.parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path).filter("a IS NOT NULL"),
conf={ 'spark.rapids.sql.exec.FileSourceScanExec' : 'false'})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/row_conversion_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import LONG_MAX, LONG_MIN
from pyspark.sql.types import *
import pyspark.sql.functions as f
def test_simple_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(100))
def test_start_end_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(-100, 100))
def test_step_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(-100, 100, 7))
def test_neg_step_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(100, -100, -7))
def test_partitioned_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(1000, numPartitions=2))
def test_large_corner_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(LONG_MAX - 100, LONG_MAX, step=3))
def test_small_corner_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(LONG_MIN + 100, LONG_MIN, step=-3))
def test_wrong_step_corner_range():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(100, -100, 7))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/range_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_equal, assert_gpu_and_cpu_writes_are_equal_collect, assert_gpu_fallback_write
from data_gen import *
from delta_lake_write_test import assert_gpu_and_cpu_delta_logs_equivalent, delta_meta_allow, delta_writes_enabled_conf
from delta_lake_merge_test import read_delta_path, read_delta_path_with_cdf, setup_dest_tables
from marks import *
from spark_session import is_before_spark_320, is_databricks_runtime, is_databricks122_or_later, \
supports_delta_lake_deletion_vectors, with_cpu_session, with_gpu_session
delta_update_enabled_conf = copy_and_update(delta_writes_enabled_conf,
{"spark.rapids.sql.command.UpdateCommand": "true",
"spark.rapids.sql.command.UpdateCommandEdge": "true"})
delta_write_fallback_allow = "ExecutedCommandExec,DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
delta_write_fallback_check = "DataWritingCommandExec" if is_databricks122_or_later() else "ExecutedCommandExec"
def delta_sql_update_test(spark_tmp_path, use_cdf, dest_table_func, update_sql,
check_func, partition_columns=None, enable_deletion_vectors=False):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
setup_dest_tables(spark, data_path, dest_table_func, use_cdf, partition_columns, enable_deletion_vectors)
def do_update(spark, path):
return spark.sql(update_sql.format(path=path))
with_cpu_session(setup_tables)
check_func(data_path, do_update)
def assert_delta_sql_update_collect(spark_tmp_path, use_cdf, dest_table_func, update_sql,
partition_columns=None,
enable_deletion_vectors=False,
conf=delta_update_enabled_conf):
def read_data(spark, path):
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
df = read_func(spark, path)
return df.sort(df.columns)
def checker(data_path, do_update):
cpu_path = data_path + "/CPU"
gpu_path = data_path + "/GPU"
# compare resulting dataframe from the update operation (some older Spark versions return empty here)
cpu_result = with_cpu_session(lambda spark: do_update(spark, cpu_path).collect(), conf=conf)
gpu_result = with_gpu_session(lambda spark: do_update(spark, gpu_path).collect(), conf=conf)
assert_equal(cpu_result, gpu_result)
# compare table data results, read both via CPU to make sure GPU write can be read by CPU
cpu_result = with_cpu_session(lambda spark: read_data(spark, cpu_path).collect(), conf=conf)
gpu_result = with_cpu_session(lambda spark: read_data(spark, gpu_path).collect(), conf=conf)
assert_equal(cpu_result, gpu_result)
# Databricks not guaranteed to write the same number of files due to optimized write when
# using partitions
if not is_databricks_runtime() or not partition_columns:
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
delta_sql_update_test(spark_tmp_path, use_cdf, dest_table_func, update_sql, checker,
partition_columns, enable_deletion_vectors)
@allow_non_gpu(delta_write_fallback_allow, *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("disable_conf",
[{"spark.rapids.sql.format.delta.write.enabled": "false"},
{"spark.rapids.sql.format.parquet.write.enabled": "false"},
{"spark.rapids.sql.command.UpdateCommand": "false"},
delta_writes_enabled_conf # Test disabled by default
], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_update_disabled_fallback(spark_tmp_path, disable_conf):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
setup_dest_tables(spark, data_path,
dest_table_func=lambda spark: unary_op_df(spark, int_gen),
use_cdf=False)
def write_func(spark, path):
update_sql="UPDATE delta.`{}` SET a = 0".format(path)
spark.sql(update_sql)
with_cpu_session(setup_tables)
assert_gpu_fallback_write(write_func, read_delta_path, data_path,
delta_write_fallback_check, disable_conf)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_update_entire_table(spark_tmp_path, use_cdf, partition_columns):
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen)
update_sql = "UPDATE delta.`{path}` SET a = 0"
assert_delta_sql_update_collect(spark_tmp_path, use_cdf, generate_dest_data,
update_sql, partition_columns)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [["a"], ["a", "b"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_update_partitions(spark_tmp_path, use_cdf, partition_columns):
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen)
update_sql = "UPDATE delta.`{path}` SET a = 3 WHERE b < 'c'"
assert_delta_sql_update_collect(spark_tmp_path, use_cdf, generate_dest_data,
update_sql, partition_columns)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_update_rows(spark_tmp_path, use_cdf, partition_columns):
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen, num_slices=num_slices_to_test)
update_sql = "UPDATE delta.`{path}` SET c = b WHERE b >= 'd'"
assert_delta_sql_update_collect(spark_tmp_path, use_cdf, generate_dest_data,
update_sql, partition_columns)
@allow_non_gpu("HashAggregateExec,ColumnarToRowExec,RapidsDeltaWriteExec,GenerateExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.parametrize("enable_deletion_vectors", [True, False], ids=idfn)
@pytest.mark.skipif(not supports_delta_lake_deletion_vectors(), reason="Deletion vectors are new in Spark 3.4.0 / DBR 12.2")
def test_delta_update_rows_with_dv(spark_tmp_path, use_cdf, partition_columns, enable_deletion_vectors):
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen, num_slices=num_slices_to_test)
update_sql = "UPDATE delta.`{path}` SET c = b WHERE b >= 'd'"
assert_delta_sql_update_collect(spark_tmp_path, use_cdf, generate_dest_data,
update_sql, partition_columns, enable_deletion_vectors)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_update_dataframe_api(spark_tmp_path, use_cdf, partition_columns):
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/DELTA_DATA"
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen, num_slices=num_slices_to_test)
with_cpu_session(lambda spark: setup_dest_tables(spark, data_path, generate_dest_data, use_cdf, partition_columns))
def do_update(spark, path):
dest_table = DeltaTable.forPath(spark, path)
dest_table.update(condition="b > 'c'", set={"c": f.col("b"), "a": f.lit(1)})
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
assert_gpu_and_cpu_writes_are_equal_collect(do_update, read_func, data_path,
conf=delta_update_enabled_conf)
# Databricks not guaranteed to write the same number of files due to optimized write when
# using partitions
if not is_databricks_runtime() or not partition_columns:
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_update_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from spark_session import with_cpu_session, is_before_spark_330
from pyspark.sql.types import *
import pyspark.sql.functions as f
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_eq(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') == s1,
s2 == f.col('b'),
f.lit(None).cast(data_type) == f.col('a'),
f.col('b') == f.lit(None).cast(data_type),
f.col('a') == f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_eq_for_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') == s1,
s2 == f.col('b'),
f.lit(None).cast(data_type) == f.col('a'),
f.col('b') == f.lit(None).cast(data_type),
f.col('a') == f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_eq_ns(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a').eqNullSafe(s1),
s2.eqNullSafe(f.col('b')),
f.lit(None).cast(data_type).eqNullSafe(f.col('a')),
f.col('b').eqNullSafe(f.lit(None).cast(data_type)),
f.col('a').eqNullSafe(f.col('b'))))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_eq_ns_for_interval():
data_gen = DayTimeIntervalGen()
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a').eqNullSafe(s1),
s2.eqNullSafe(f.col('b')),
f.lit(None).cast(data_type).eqNullSafe(f.col('a')),
f.col('b').eqNullSafe(f.lit(None).cast(data_type)),
f.col('a').eqNullSafe(f.col('b'))))
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_ne(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') != s1,
s2 != f.col('b'),
f.lit(None).cast(data_type) != f.col('a'),
f.col('b') != f.lit(None).cast(data_type),
f.col('a') != f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_ne_for_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') != s1,
s2 != f.col('b'),
f.lit(None).cast(data_type) != f.col('a'),
f.col('b') != f.lit(None).cast(data_type),
f.col('a') != f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', orderable_gens + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_lt(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') < s1,
s2 < f.col('b'),
f.lit(None).cast(data_type) < f.col('a'),
f.col('b') < f.lit(None).cast(data_type),
f.col('a') < f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_lt_for_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') < s1,
s2 < f.col('b'),
f.lit(None).cast(data_type) < f.col('a'),
f.col('b') < f.lit(None).cast(data_type),
f.col('a') < f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', orderable_gens + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_lte(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') <= s1,
s2 <= f.col('b'),
f.lit(None).cast(data_type) <= f.col('a'),
f.col('b') <= f.lit(None).cast(data_type),
f.col('a') <= f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_lte_for_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') <= s1,
s2 <= f.col('b'),
f.lit(None).cast(data_type) <= f.col('a'),
f.col('b') <= f.lit(None).cast(data_type),
f.col('a') <= f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', orderable_gens, ids=idfn)
def test_gt(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') > s1,
s2 > f.col('b'),
f.lit(None).cast(data_type) > f.col('a'),
f.col('b') > f.lit(None).cast(data_type),
f.col('a') > f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_gt_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') > s1,
s2 > f.col('b'),
f.lit(None).cast(data_type) > f.col('a'),
f.col('b') > f.lit(None).cast(data_type),
f.col('a') > f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', orderable_gens + struct_gens_sample_with_decimal128_no_list, ids=idfn)
def test_gte(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') >= s1,
s2 >= f.col('b'),
f.lit(None).cast(data_type) >= f.col('a'),
f.col('b') >= f.lit(None).cast(data_type),
f.col('a') >= f.col('b')))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_gte_for_interval():
def test_func(data_gen):
(s1, s2) = gen_scalars(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') >= s1,
s2 >= f.col('b'),
f.lit(None).cast(data_type) >= f.col('a'),
f.col('b') >= f.lit(None).cast(data_type),
f.col('a') >= f.col('b')))
# DayTimeIntervalType not supported inside Structs -- issue #6184
# data_gens = [DayTimeIntervalGen(),
# StructGen([['child0', StructGen([['child2', DayTimeIntervalGen()]])], ['child1', short_gen]])]
data_gens = [DayTimeIntervalGen()]
for data_gen in data_gens:
test_func(data_gen)
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + [binary_gen] + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_isnull(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
f.isnull(f.col('a'))))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_isnull_for_interval():
data_gen = DayTimeIntervalGen()
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
f.isnull(f.col('a'))))
@pytest.mark.parametrize('data_gen', [FloatGen(), DoubleGen()], ids=idfn)
def test_isnan(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
f.isnan(f.col('a'))))
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + [binary_gen] + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_dropna_any(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).dropna())
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + [binary_gen] + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_dropna_all(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).dropna(how='all'))
#dropna is really a filter along with a test for null, but lets do an explicit filter test too
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_filter(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : three_col_df(spark, BooleanGen(), data_gen, data_gen).filter(f.col('a')))
# coalesce batch happens after a filter, but only if something else happens on the GPU after that
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_filter_with_project(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, BooleanGen(), data_gen).filter(f.col('a')).selectExpr('*', 'a as a2'))
# It takes quite a bit to get filter to have a column it can filter on, but
# no columns to actually filter. We are making it happen here with a sub-query
# and some constants that then make it so all we need is the number of rows
# of input.
@pytest.mark.parametrize('op', ['>', '<'])
def test_empty_filter(op, spark_tmp_path):
def do_it(spark):
df = spark.createDataFrame([(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
# we repartition the data to 1 because for some reason Spark can write 4 files for 3 rows.
# In this case that causes a race condition with the last aggregation which can result
# in a null being returned. For some reason this happens a lot on the GPU in local mode
# and not on the CPU in local mode.
df.repartition(1).write.mode("overwrite").parquet(spark_tmp_path)
df = spark.read.parquet(spark_tmp_path)
curDate = df.withColumn("current_date", f.current_date())
curDate.createOrReplaceTempView("empty_filter_test_curDate")
spark.sql("select current_date, ((select last(current_date) from empty_filter_test_curDate) + interval 1 day) as test from empty_filter_test_curDate").createOrReplaceTempView("empty_filter_test2")
return spark.sql(f"select * from empty_filter_test2 where test {op} current_date")
assert_gpu_and_cpu_are_equal_collect(do_it)
def test_nondeterministic_filter():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, LongGen(), 1).filter(f.rand(0) > 0.5))
@pytest.mark.parametrize('expr', [f.lit(True), f.lit(False), f.lit(None).cast('boolean')], ids=idfn)
def test_filter_with_lit(expr):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, LongGen()).filter(expr))
# Spark supports two different versions of 'IN', and it depends on the spark.sql.optimizer.inSetConversionThreshold conf
# This is to test entries under that value.
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen, ids=idfn)
def test_in(data_gen):
# nulls are not supported for in on the GPU yet
num_entries = int(with_cpu_session(lambda spark: spark.conf.get('spark.sql.optimizer.inSetConversionThreshold'))) - 1
# we have to make the scalars in a session so negative scales in decimals are supported
scalars = with_cpu_session(lambda spark: list(gen_scalars(data_gen, num_entries, force_no_nulls=not isinstance(data_gen, NullGen))))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').isin(scalars)))
# Spark supports two different versions of 'IN', and it depends on the spark.sql.optimizer.inSetConversionThreshold conf
# This is to test entries over that value.
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen, ids=idfn)
def test_in_set(data_gen):
# nulls are not supported for in on the GPU yet
num_entries = int(with_cpu_session(lambda spark: spark.conf.get('spark.sql.optimizer.inSetConversionThreshold'))) + 1
# we have to make the scalars in a session so negative scales in decimals are supported
scalars = with_cpu_session(lambda spark: list(gen_scalars(data_gen, num_entries, force_no_nulls=not isinstance(data_gen, NullGen))))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').isin(scalars)))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/cmp_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyspark.sql import Row
from asserts import assert_gpu_fallback_collect
from marks import allow_non_gpu, delta_lake
from spark_session import with_cpu_session, with_gpu_session, is_databricks_runtime, spark_version, is_spark_320_or_later
_conf = {'spark.rapids.sql.explain': 'ALL'}
@delta_lake
@allow_non_gpu('FileSourceScanExec')
@pytest.mark.skipif(not (is_databricks_runtime() or is_spark_320_or_later()), \
reason="Delta Lake is already configured on Databricks and CI supports Delta Lake OSS with Spark 3.2.x so far")
def test_delta_metadata_query_fallback(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
def setup_delta_table(spark):
df = spark.createDataFrame([(1, 'a'), (2, 'b'), (3, 'c')], ["id", "data"])
df.write.format("delta").save("/tmp/delta-table/{}".format(table))
with_cpu_session(setup_delta_table)
# note that this is just testing that any reads against a delta log json file fall back to CPU and does
# not test the actual metadata queries that the delta lake plugin generates so does not fully test the
# plugin code
assert_gpu_fallback_collect(
lambda spark : spark.read.json("/tmp/delta-table/{}/_delta_log/00000000000000000000.json".format(table)),
"FileSourceScanExec", conf = _conf)
@delta_lake
@pytest.mark.skipif(not is_databricks_runtime(), \
reason="This test is specific to Databricks because we only fall back to CPU for merges on Databricks")
@allow_non_gpu(any = True)
def test_delta_merge_query(spark_tmp_table_factory):
table_name_1 = spark_tmp_table_factory.get()
table_name_2 = spark_tmp_table_factory.get()
def setup_delta_table1(spark):
df = spark.createDataFrame([('a', 10), ('b', 20)], ["c0", "c1"])
df.write.format("delta").save("/tmp/delta-table/{}".format(table_name_1))
def setup_delta_table2(spark):
df = spark.createDataFrame([('a', 30), ('c', 30)], ["c0", "c1"])
df.write.format("delta").save("/tmp/delta-table/{}".format(table_name_2))
with_cpu_session(setup_delta_table1)
with_cpu_session(setup_delta_table2)
def merge(spark):
spark.read.format("delta").load("/tmp/delta-table/{}".format(table_name_1)).createOrReplaceTempView("t1")
spark.read.format("delta").load("/tmp/delta-table/{}".format(table_name_2)).createOrReplaceTempView("t2")
return spark.sql("MERGE INTO t1 USING t2 ON t1.c0 = t2.c0 \
WHEN MATCHED THEN UPDATE SET c1 = t1.c1 + t2.c1 \
WHEN NOT MATCHED THEN INSERT (c0, c1) VALUES (t2.c0, t2.c1)").collect()
# run the MERGE on GPU
with_gpu_session(lambda spark : merge(spark), conf = _conf)
# check the results on CPU
result = with_cpu_session(lambda spark: spark.sql("SELECT * FROM t1 ORDER BY c0").collect(), conf=_conf)
assert [Row(c0='a', c1=40), Row(c0='b', c1=20), Row(c0='c', c1=30)] == result | spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from pyspark.sql.types import *
def test_struct_scalar_project():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(2).selectExpr(
"named_struct('1', 2, '3', 4) as i",
"named_struct('a', 'b', 'c', 'd', 'e', named_struct()) as s",
"named_struct('a', map('foo', 10, 'bar', 11), 'arr', array(1.0, 2.0, 3.0)) as st"
"id"))
@pytest.mark.parametrize('data_gen', [StructGen([["first", boolean_gen], ["second", byte_gen], ["third", float_gen]]),
StructGen([["first", short_gen], ["second", int_gen], ["third", long_gen]]),
StructGen([["first", double_gen], ["second", date_gen], ["third", timestamp_gen]]),
StructGen([["first", string_gen], ["second", ArrayGen(byte_gen)], ["third", simple_string_to_string_map_gen]]),
StructGen([["first", decimal_gen_64bit], ["second", decimal_gen_32bit], ["third", decimal_gen_32bit]]),
StructGen([["first", decimal_gen_128bit], ["second", decimal_gen_128bit], ["third", decimal_gen_128bit]]),
StructGen([["first", binary_gen], ["second", ArrayGen(BinaryGen(max_length=10), max_length=10)], ["third", binary_gen]])], ids=idfn)
def test_struct_get_item(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'a.first',
'a.second',
'a.third'))
@pytest.mark.parametrize('data_gen', all_basic_gens + decimal_gens + [binary_gen,
null_gen] + single_level_array_gens + struct_gens_sample + map_gens_sample, ids=idfn)
def test_make_struct(data_gen):
# Spark has no good way to create a map literal without the map function
# so we are inserting one.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'struct(a, b)',
'named_struct("foo", b, "m", map("a", "b"), "n", null, "bar", 5, "other", named_struct("z", "z"),"end", a)'))
@pytest.mark.parametrize('data_gen', [StructGen([["first", boolean_gen], ["second", byte_gen], ["third", float_gen]]),
StructGen([["first", short_gen], ["second", int_gen], ["third", long_gen]]),
StructGen([["first", long_gen], ["second", long_gen], ["third", long_gen]]),
StructGen([["first", string_gen], ["second", ArrayGen(string_gen)], ["third", ArrayGen(string_gen)]])], ids=idfn)
def test_orderby_struct(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : append_unique_int_col_to_df(spark, unary_op_df(spark, data_gen)),
'struct_table',
'select struct_table.a, struct_table.uniq_int from struct_table order by uniq_int')
@pytest.mark.parametrize('data_gen', [StructGen([["first", string_gen], ["second", ArrayGen(string_gen)], ["third", ArrayGen(string_gen)]])], ids=idfn)
def test_orderby_struct_2(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : append_unique_int_col_to_df(spark, unary_op_df(spark, data_gen)),
'struct_table',
'select struct_table.a, struct_table.uniq_int from struct_table order by uniq_int')
| spark-rapids-branch-23.10 | integration_tests/src/main/python/struct_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import date, datetime, timedelta, timezone
from decimal import *
import math
from pyspark.context import SparkContext
from pyspark.sql import Row
from pyspark.sql.types import *
import pyspark.sql.functions as f
import random
from spark_session import is_tz_utc, is_before_spark_340
import sre_yield
import struct
from conftest import skip_unless_precommit_tests
import time
import os
from functools import lru_cache
import hashlib
# set time zone to UTC for timestamp test cases to avoid `datetime` out-of-range error:
# refer to: https://github.com/NVIDIA/spark-rapids/issues/7535
os.environ['TZ'] = 'UTC'
time.tzset()
class DataGen:
"""Base class for data generation"""
def __repr__(self):
if not self.nullable:
return self.__class__.__name__[:-3] + '(not_null)'
return self.__class__.__name__[:-3]
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._cache_repr() == other._cache_repr()
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, data_type, nullable=True, special_cases =[]):
self.data_type = data_type
self.list_of_special_cases = special_cases
self._special_cases = []
if isinstance(nullable, tuple):
self.nullable = nullable[0]
weight = nullable[1]
else:
self.nullable = nullable
weight = 5.0
if self.nullable:
self.with_special_case(None, weight)
# Special cases can be a value or a tuple of (value, weight). If the
# special_case itself is a tuple as in the case of StructGen, it MUST be added with a
# weight like : ((special_case_tuple_v1, special_case_tuple_v2), weight).
for element in special_cases:
if isinstance(element, tuple):
self.with_special_case(element[0], element[1])
else:
self.with_special_case(element)
def _cache_repr(self):
# repr of DataGens and their children will be used to generate the cache key
# make sure it is unique for different DataGens
notnull = '(not_null)' if not self.nullable else ''
datatype = str(self.data_type)
specialcases = ''
for (weight, case) in self._special_cases:
if (callable(case)):
case = case.__code__.co_code
specialcases += str(case) + ', ' + str(weight) + ', '
specialcases = hashlib.blake2b(specialcases.encode('utf-8'), digest_size=8).hexdigest()
return self.__class__.__name__[:-3] + notnull + ', ' + datatype + ', ' + str(specialcases)
def copy_special_case(self, special_case, weight=1.0):
# it would be good to do a deepcopy, but sre_yield is not happy with that.
c = copy.copy(self)
c._special_cases = copy.deepcopy(self._special_cases)
return c.with_special_case(special_case, weight=weight)
def with_special_case(self, special_case, weight=1.0):
"""
Add in a special case with a given weight. A special case can either be
a function that takes an instance of Random and returns the generated data
or it can be a constant. By default the weight is 1.0, and the default
number generation's weight is 100.0. The number of lines that are generate in
the data set should be proportional to the its weight/sum weights
"""
if callable(special_case):
sc = special_case
else:
sc = lambda rand: special_case
self._special_cases.append((weight, sc))
return self
def get_types(self):
return 'DataType: {}, nullable: {}, special_cases: {}'.format(self.data_type,
self.nullable, self.list_of_special_cases)
def start(self, rand):
"""Start data generation using the given rand"""
raise TypeError('Children should implement this method and call _start')
def _start(self, rand, gen_func):
"""Start internally, but use the given gen_func as the base"""
if not self._special_cases:
self._gen_func = gen_func
else:
weighted_choices = [(100.0, lambda rand: gen_func())]
weighted_choices.extend(self._special_cases)
total = float(sum(weight for weight,gen in weighted_choices))
normalized_choices = [(weight/total, gen) for weight,gen in weighted_choices]
def choose_one():
pick = rand.random()
total = 0
for (weight, gen) in normalized_choices:
total += weight
if total >= pick:
return gen(rand)
raise RuntimeError('Random did not pick something we expected')
self._gen_func = choose_one
def gen(self, force_no_nulls=False):
"""generate the next line"""
if not self._gen_func:
raise RuntimeError('start must be called before generating any data')
v = self._gen_func()
if force_no_nulls:
while v is None:
v = self._gen_func()
return v
def contains_ts(self):
"""Checks if this contains a TimestampGen"""
return False
class ConvertGen(DataGen):
"""Provides a way to modify the data before it is returned"""
def __init__(self, child_gen, func, data_type=None, nullable=True):
if data_type is None:
data_type = child_gen.data_type
super().__init__(data_type, nullable=nullable)
self._child_gen = child_gen
self._func = func
def __repr__(self):
return super().__repr__() + '(' + str(self._child_gen) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + self._child_gen._cache_repr() + ')'
def start(self, rand):
self._child_gen.start(rand)
def modify():
return self._func(self._child_gen.gen())
self._start(rand, modify)
_MAX_CHOICES = 1 << 64
class StringGen(DataGen):
"""Generate strings that match a pattern"""
def __init__(self, pattern="(.|\n){1,30}", flags=0, charset=sre_yield.CHARSET, nullable=True):
super().__init__(StringType(), nullable=nullable)
self.base_strs = sre_yield.AllStrings(pattern, flags=flags, charset=charset, max_count=_MAX_CHOICES)
# save pattern and charset for cache repr
charsetrepr = '[' + ','.join(charset) + ']' if charset != sre_yield.CHARSET else 'sre_yield.CHARSET'
self.stringrepr = pattern + ',' + str(flags) + ',' + charsetrepr
def _cache_repr(self):
return super()._cache_repr() + '(' + self.stringrepr + ')'
def with_special_pattern(self, pattern, flags=0, charset=sre_yield.CHARSET, weight=1.0):
"""
Like with_special_case but you can provide a regexp pattern
instead of a hard coded string value.
"""
strs = sre_yield.AllStrings(pattern, flags=flags, charset=charset, max_count=_MAX_CHOICES)
length = strs.__len__()
return self.with_special_case(lambda rand : strs[rand.randint(0, length-1)], weight=weight)
def start(self, rand):
strs = self.base_strs
length = strs.__len__()
self._start(rand, lambda : strs[rand.randint(0, length-1)])
BYTE_MIN = -(1 << 7)
BYTE_MAX = (1 << 7) - 1
class ByteGen(DataGen):
"""Generate Bytes"""
def __init__(self, nullable=True, min_val = BYTE_MIN, max_val = BYTE_MAX, special_cases=[]):
super().__init__(ByteType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_val) + ',' + str(self._max_val) + ')'
SHORT_MIN = -(1 << 15)
SHORT_MAX = (1 << 15) - 1
class ShortGen(DataGen):
"""Generate Shorts, which some built in corner cases."""
def __init__(self, nullable=True, min_val = SHORT_MIN, max_val = SHORT_MAX,
special_cases = [SHORT_MIN, SHORT_MAX, 0, 1, -1]):
super().__init__(ShortType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_val) + ',' + str(self._max_val) + ')'
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
INT_MIN = -(1 << 31)
INT_MAX = (1 << 31) - 1
class IntegerGen(DataGen):
"""Generate Ints, which some built in corner cases."""
def __init__(self, nullable=True, min_val = INT_MIN, max_val = INT_MAX,
special_cases = [INT_MIN, INT_MAX, 0, 1, -1]):
super().__init__(IntegerType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_val) + ',' + str(self._max_val) + ')'
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
class DecimalGen(DataGen):
"""Generate Decimals, with some built in corner cases."""
def __init__(self, precision=None, scale=None, nullable=True, special_cases=None, avoid_positive_values=False):
if precision is None:
#Maximum number of decimal digits a Long can represent is 18
precision = 18
scale = 0
DECIMAL_MIN = Decimal('-' + ('9' * precision) + 'e' + str(-scale))
DECIMAL_MAX = Decimal(('9'* precision) + 'e' + str(-scale))
if special_cases is None:
special_cases = [DECIMAL_MIN, Decimal('0')]
if not avoid_positive_values:
special_cases.append(DECIMAL_MAX)
super().__init__(DecimalType(precision, scale), nullable=nullable, special_cases=special_cases)
self.scale = scale
self.precision = precision
self.avoid_positive_values = avoid_positive_values
def __repr__(self):
return super().__repr__() + '(' + str(self.precision) + ',' + str(self.scale) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self.precision) + ',' + str(self.scale) + ',' + str(self.avoid_positive_values) + ')'
def start(self, rand):
def random_decimal(rand):
if self.avoid_positive_values:
sign = "-"
else:
sign = rand.choice(["-", ""])
int_part = "".join([rand.choice("0123456789") for _ in range(self.precision)])
result = f"{sign}{int_part}e{str(-self.scale)}"
return Decimal(result)
self._start(rand, lambda : random_decimal(rand))
LONG_MIN = -(1 << 63)
LONG_MAX = (1 << 63) - 1
class LongGen(DataGen):
"""Generate Longs, which some built in corner cases."""
def __init__(self, nullable=True, min_val = LONG_MIN, max_val = LONG_MAX, special_cases = []):
_special_cases = [min_val, max_val, 0, 1, -1] if not special_cases else special_cases
super().__init__(LongType(), nullable=nullable, special_cases=_special_cases)
self._min_val = min_val
self._max_val = max_val
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_val) + ',' + str(self._max_val) + ')'
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
class UniqueLongGen(DataGen):
"""Generates a sequence of longs with no repeating values except nulls."""
def __init__(self, nullable=False):
super().__init__(LongType(), nullable=nullable)
self._current_val = 0
def next_val(self):
if self._current_val < 0:
self._current_val = -self._current_val + 1
else:
self._current_val = -self._current_val - 1
return self._current_val
def _cache_repr(self):
return super()._cache_repr()
def start(self, rand):
self._current_val = 0
self._start(rand, lambda: self.next_val())
class RepeatSeqGen(DataGen):
"""Generate Repeated seq of `length` random items"""
def __init__(self, child, length):
super().__init__(child.data_type, nullable=False)
self.nullable = child.nullable
self._child = child
self._vals = []
self._length = length
self._index = 0
def __repr__(self):
return super().__repr__() + '(' + str(self._child) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + self._child._cache_repr() + ',' + str(self._length) + ')'
def _loop_values(self):
ret = self._vals[self._index]
self._index = (self._index + 1) % self._length
return ret
def start(self, rand):
self._index = 0
self._child.start(rand)
self._start(rand, self._loop_values)
self._vals = [self._child.gen() for _ in range(0, self._length)]
class SetValuesGen(DataGen):
"""A set of values that are randomly selected"""
def __init__(self, data_type, data):
super().__init__(data_type, nullable=False)
self.nullable = any(x is None for x in data)
self._vals = data
def __repr__(self):
return super().__repr__() +'(' + str(self.data_type) + ',' + str(self._vals) + ')'
def _cache_repr(self):
return super()._cache_repr() +'(' + str(self.data_type) + ',' + str(self._vals) + ')'
def start(self, rand):
data = self._vals
length = len(data)
self._start(rand, lambda : data[rand.randrange(0, length)])
FLOAT_MIN = -3.4028235E38
FLOAT_MAX = 3.4028235E38
NEG_FLOAT_NAN_MIN_VALUE = struct.unpack('f', struct.pack('I', 0xffffffff))[0]
NEG_FLOAT_NAN_MAX_VALUE = struct.unpack('f', struct.pack('I', 0xff800001))[0]
POS_FLOAT_NAN_MIN_VALUE = struct.unpack('f', struct.pack('I', 0x7f800001))[0]
POS_FLOAT_NAN_MAX_VALUE = struct.unpack('f', struct.pack('I', 0x7fffffff))[0]
class FloatGen(DataGen):
"""Generate floats, which some built in corner cases."""
def __init__(self, nullable=True,
no_nans=False, special_cases=None):
self._no_nans = no_nans
if special_cases is None:
special_cases = [FLOAT_MIN, FLOAT_MAX, 0.0, -0.0, 1.0, -1.0]
if not no_nans:
special_cases.append(float('inf'))
special_cases.append(float('-inf'))
special_cases.append(float('nan'))
special_cases.append(NEG_FLOAT_NAN_MAX_VALUE)
super().__init__(FloatType(), nullable=nullable, special_cases=special_cases)
def _fixup_nans(self, v):
if self._no_nans and (math.isnan(v) or v == math.inf or v == -math.inf):
v = None if self.nullable else 0.0
return v
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._no_nans) + ')'
def start(self, rand):
def gen_float():
i = rand.randint(INT_MIN, INT_MAX)
p = struct.pack('i', i)
return self._fixup_nans(struct.unpack('f', p)[0])
self._start(rand, gen_float)
DOUBLE_MIN_EXP = -1022
DOUBLE_MAX_EXP = 1023
DOUBLE_MAX_FRACTION = int('1'*52, 2)
DOUBLE_MIN = -1.7976931348623157E308
DOUBLE_MAX = 1.7976931348623157E308
NEG_DOUBLE_NAN_MIN_VALUE = struct.unpack('d', struct.pack('L', 0xffffffffffffffff))[0]
NEG_DOUBLE_NAN_MAX_VALUE = struct.unpack('d', struct.pack('L', 0xfff0000000000001))[0]
POS_DOUBLE_NAN_MIN_VALUE = struct.unpack('d', struct.pack('L', 0x7ff0000000000001))[0]
POS_DOUBLE_NAN_MAX_VALUE = struct.unpack('d', struct.pack('L', 0x7fffffffffffffff))[0]
class DoubleGen(DataGen):
"""Generate doubles, which some built in corner cases."""
def __init__(self, min_exp=DOUBLE_MIN_EXP, max_exp=DOUBLE_MAX_EXP, no_nans=False,
nullable=True, special_cases = None):
self._min_exp = min_exp
self._max_exp = max_exp
self._no_nans = no_nans
self._use_full_range = (self._min_exp == DOUBLE_MIN_EXP) and (self._max_exp == DOUBLE_MAX_EXP)
if special_cases is None:
special_cases = [
self.make_from(1, self._max_exp, DOUBLE_MAX_FRACTION),
self.make_from(0, self._max_exp, DOUBLE_MAX_FRACTION),
self.make_from(1, self._min_exp, DOUBLE_MAX_FRACTION),
self.make_from(0, self._min_exp, DOUBLE_MAX_FRACTION)
]
if self._min_exp <= 0 and self._max_exp >= 0:
special_cases.append(0.0)
special_cases.append(-0.0)
if self._min_exp <= 3 and self._max_exp >= 3:
special_cases.append(1.0)
special_cases.append(-1.0)
if not no_nans:
special_cases.append(float('inf'))
special_cases.append(float('-inf'))
special_cases.append(float('nan'))
special_cases.append(NEG_DOUBLE_NAN_MAX_VALUE)
super().__init__(DoubleType(), nullable=nullable, special_cases=special_cases)
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_exp) + ',' + str(self._max_exp) + ',' + str(self._no_nans) + ')'
@staticmethod
def make_from(sign, exp, fraction):
sign = sign & 1 # 1 bit
exp = (exp + 1023) & 0x7FF # add bias and 11 bits
fraction = fraction & DOUBLE_MAX_FRACTION
i = (sign << 63) | (exp << 52) | fraction
p = struct.pack('L', i)
ret = struct.unpack('d', p)[0]
return ret
def _fixup_nans(self, v):
if self._no_nans and (math.isnan(v) or v == math.inf or v == -math.inf):
v = None if self.nullable else 0.0
return v
def start(self, rand):
if self._use_full_range:
def gen_double():
i = rand.randint(LONG_MIN, LONG_MAX)
p = struct.pack('l', i)
return self._fixup_nans(struct.unpack('d', p)[0])
self._start(rand, gen_double)
else:
def gen_part_double():
sign = rand.getrandbits(1)
exp = rand.randint(self._min_exp, self._max_exp)
fraction = rand.getrandbits(52)
return self._fixup_nans(self.make_from(sign, exp, fraction))
self._start(rand, gen_part_double)
class BooleanGen(DataGen):
"""Generate Bools (True/False)"""
def __init__(self, nullable=True):
super().__init__(BooleanType(), nullable=nullable)
def start(self, rand):
self._start(rand, lambda : bool(rand.getrandbits(1)))
class StructGen(DataGen):
"""Generate a Struct"""
def __init__(self, children, nullable=True, special_cases=[]):
"""
Initialize the struct with children. The children should be of the form:
[('name', Gen),('name_2', Gen2)]
Where name is the name of the strict field and Gens are Generators of
the type for that entry.
"""
tmp = [StructField(name, child.data_type, nullable=child.nullable) for name, child in children]
super().__init__(StructType(tmp), nullable=nullable, special_cases=special_cases)
self.children = children
def __repr__(self):
return super().__repr__() + '(' + ','.join([str(i) for i in self.children]) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + ','.join([name + child._cache_repr() for name, child in self.children]) + ')'
def start(self, rand):
for name, child in self.children:
child.start(rand)
def make_tuple():
data = [child.gen() for name, child in self.children]
return tuple(data)
self._start(rand, make_tuple)
def contains_ts(self):
return any(child[1].contains_ts() for child in self.children)
class DateGen(DataGen):
"""Generate Dates in a given range"""
def __init__(self, start=None, end=None, nullable=True):
super().__init__(DateType(), nullable=nullable)
if start is None:
# Spark supports times starting at
# "0001-01-01 00:00:00.000000"
start = date(1, 1, 1)
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for start {}'.format(start))
if end is None:
# Spark supports time through
# "9999-12-31 23:59:59.999999"
end = date(9999, 12, 31)
elif isinstance(end, timedelta):
end = start + end
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for end {}'.format(end))
self._start_day = self._to_days_since_epoch(start)
self._end_day = self._to_days_since_epoch(end)
self.with_special_case(start)
self.with_special_case(end)
# we want a few around the leap year if possible
step = int((end.year - start.year) / 5.0)
if (step != 0):
years = {self._guess_leap_year(y) for y in range(start.year, end.year, step)}
for y in years:
leap_day = date(y, 2, 29)
if (leap_day > start and leap_day < end):
self.with_special_case(leap_day)
next_day = date(y, 3, 1)
if (next_day > start and next_day < end):
self.with_special_case(next_day)
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._start_day) + ',' + str(self._end_day) + ')'
@staticmethod
def _guess_leap_year(t):
y = int(math.ceil(t/4.0)) * 4
if ((y % 100) == 0) and ((y % 400) != 0):
y = y + 4
if (y == 10000):
y = y - 4
return y
_epoch = date(1970, 1, 1)
_days = timedelta(days=1)
def _to_days_since_epoch(self, val):
return int((val - self._epoch)/self._days)
def _from_days_since_epoch(self, days):
return self._epoch + timedelta(days=days)
def start(self, rand):
start = self._start_day
end = self._end_day
self._start(rand, lambda : self._from_days_since_epoch(rand.randint(start, end)))
class TimestampGen(DataGen):
"""Generate Timestamps in a given range. All timezones are UTC by default."""
def __init__(self, start=None, end=None, nullable=True):
super().__init__(TimestampType(), nullable=nullable)
if start is None:
# Spark supports times starting at
# "0001-01-01 00:00:00.000000"
# but it has issues if you get really close to that because it tries to do things
# in a different format which causes roundoff, so we have to add a few days,
# just to be sure
start = datetime(1, 1, 3, tzinfo=timezone.utc)
elif not isinstance(start, datetime):
raise RuntimeError('Unsupported type passed in for start {}'.format(start))
if end is None:
# Spark supports time through
# "9999-12-31 23:59:59.999999"
end = datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=timezone.utc)
elif isinstance(end, timedelta):
end = start + end
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for end {}'.format(end))
self._start_time = self._to_us_since_epoch(start)
self._end_time = self._to_us_since_epoch(end)
if (self._epoch >= start and self._epoch <= end):
self.with_special_case(self._epoch)
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._start_time) + ',' + str(self._end_time) + ')'
_epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
_us = timedelta(microseconds=1)
def _to_us_since_epoch(self, val):
return int((val - self._epoch)/self._us)
def _from_us_since_epoch(self, us):
return self._epoch + timedelta(microseconds=us)
def start(self, rand):
start = self._start_time
end = self._end_time
self._start(rand, lambda : self._from_us_since_epoch(rand.randint(start, end)))
def contains_ts(self):
return True
class ArrayGen(DataGen):
"""Generate Arrays of data."""
def __init__(self, child_gen, min_length=0, max_length=20, nullable=True, all_null=False, convert_to_tuple=False):
super().__init__(ArrayType(child_gen.data_type, containsNull=child_gen.nullable), nullable=nullable)
self._min_length = min_length
self._max_length = max_length
self._child_gen = child_gen
self.all_null = all_null
self.convert_to_tuple = convert_to_tuple
def __repr__(self):
return super().__repr__() + '(' + str(self._child_gen) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + self._child_gen._cache_repr() + ')'
def start(self, rand):
self._child_gen.start(rand)
def gen_array():
if self.all_null:
return None
length = rand.randint(self._min_length, self._max_length)
result = [self._child_gen.gen() for _ in range(0, length)]
# This is needed for map(array, _) tests because python cannot create
# a dict(list, _), but it can create a dict(tuple, _)
if self.convert_to_tuple:
result = tuple(result)
return result
self._start(rand, gen_array)
def contains_ts(self):
return self._child_gen.contains_ts()
class MapGen(DataGen):
"""Generate a Map"""
def __init__(self, key_gen, value_gen, min_length=0, max_length=20, nullable=True, special_cases=[]):
# keys cannot be nullable
assert not key_gen.nullable
self._min_length = min_length
self._max_length = max_length
self._key_gen = key_gen
self._value_gen = value_gen
super().__init__(MapType(key_gen.data_type, value_gen.data_type, valueContainsNull=value_gen.nullable), nullable=nullable, special_cases=special_cases)
def __repr__(self):
return super().__repr__() + '(' + str(self._key_gen) + ',' + str(self._value_gen) + ')'
def _cache_repr(self):
return super()._cache_repr() + '(' + self._key_gen._cache_repr() + ',' + self._value_gen._cache_repr() + ')'
def start(self, rand):
self._key_gen.start(rand)
self._value_gen.start(rand)
def make_dict():
length = rand.randint(self._min_length, self._max_length)
return {self._key_gen.gen(): self._value_gen.gen() for idx in range(0, length)}
self._start(rand, make_dict)
def contains_ts(self):
return self._key_gen.contains_ts() or self._value_gen.contains_ts()
class NullGen(DataGen):
"""Generate NullType values"""
def __init__(self):
super().__init__(NullType(), nullable=True)
def start(self, rand):
def make_null():
return None
self._start(rand, make_null)
# DayTimeIntervalGen is for Spark 3.3.0+
# DayTimeIntervalType(startField, endField):
# Represents a day-time interval which is made up of a contiguous subset of the following fields:
# SECOND, seconds within minutes and possibly fractions of a second [0..59.999999],
# Note Spark now uses 99 as max second, see issue https://issues.apache.org/jira/browse/SPARK-38324
# If second is start field, its max value is long.max / microseconds in one second
# MINUTE, minutes within hours [0..59],
# If minute is start field, its max value is long.max / microseconds in one minute
# HOUR, hours within days [0..23],
# If hour is start field, its max value is long.max / microseconds in one hour
# DAY, days in the range [0..106751991]. 106751991 is long.max / microseconds in one day
# For more details: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
MIN_DAY_TIME_INTERVAL = timedelta(microseconds=-pow(2, 63))
MAX_DAY_TIME_INTERVAL = timedelta(microseconds=(pow(2, 63) - 1))
class DayTimeIntervalGen(DataGen):
"""Generate DayTimeIntervalType values"""
def __init__(self, min_value=MIN_DAY_TIME_INTERVAL, max_value=MAX_DAY_TIME_INTERVAL, start_field="day", end_field="second",
nullable=True, special_cases=[timedelta(seconds=0)]):
# Note the nano seconds are truncated for min_value and max_value
self._min_micros = (math.floor(min_value.total_seconds()) * 1000000) + min_value.microseconds
self._max_micros = (math.floor(max_value.total_seconds()) * 1000000) + max_value.microseconds
fields = ["day", "hour", "minute", "second"]
start_index = fields.index(start_field)
end_index = fields.index(end_field)
if start_index > end_index:
raise RuntimeError('Start field {}, end field {}, valid fields is {}, start field index should <= end '
'field index'.format(start_field, end_field, fields))
super().__init__(DayTimeIntervalType(start_index, end_index), nullable=nullable, special_cases=special_cases)
def _gen_random(self, rand):
micros = rand.randint(self._min_micros, self._max_micros)
# issue: Interval types are not truncated to the expected endField when creating a DataFrame via Duration
# https://issues.apache.org/jira/browse/SPARK-38577
# If above issue is fixed, should update this DayTimeIntervalGen.
return timedelta(microseconds=micros)
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_micros) + ',' + str(self._max_micros) + ')'
def start(self, rand):
self._start(rand, lambda: self._gen_random(rand))
class BinaryGen(DataGen):
"""Generate BinaryType values"""
def __init__(self, min_length=0, max_length=20, nullable=True):
super().__init__(BinaryType(), nullable=nullable)
self._min_length = min_length
self._max_length = max_length
def _cache_repr(self):
return super()._cache_repr() + '(' + str(self._min_length) + ',' + str(self._max_length) + ')'
def start(self, rand):
def gen_bytes():
length = rand.randint(self._min_length, self._max_length)
return bytes([ rand.randint(0, 255) for _ in range(length) ])
self._start(rand, gen_bytes)
def skip_if_not_utc():
if (not is_tz_utc()):
skip_unless_precommit_tests('The java system time zone is not set to UTC')
# Note: Current(2023/06/06) maxmium IT data size is 7282688 bytes, so LRU cache with maxsize 128
# will lead to 7282688 * 128 = 932 MB additional memory usage in edge case, which is acceptable.
@lru_cache(maxsize=128, typed=True)
def gen_df_help(data_gen, length, seed):
rand = random.Random(seed)
data_gen.start(rand)
data = [data_gen.gen() for index in range(0, length)]
return data
def gen_df(spark, data_gen, length=2048, seed=0, num_slices=None):
"""Generate a spark dataframe from the given data generators."""
if isinstance(data_gen, list):
src = StructGen(data_gen, nullable=False)
else:
src = data_gen
# we cannot create a data frame from a nullable struct
assert not data_gen.nullable
# Before we get too far we need to verify that we can run with timestamps
if src.contains_ts():
skip_if_not_utc()
data = gen_df_help(src, length, seed)
# We use `numSlices` to create an RDD with the specific number of partitions,
# which is then turned into a dataframe. If not specified, it is `None` (default spark value)
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize(data, numSlices=num_slices),
src.data_type)
def _mark_as_lit(data, data_type):
# To support nested types, 'data_type' is required.
assert data_type is not None
if data is None:
return f.lit(data).cast(data_type)
if isinstance(data_type, ArrayType):
assert isinstance(data, list)
# Sadly you cannot create a literal from just an array in pyspark
return f.array([_mark_as_lit(x, data_type.elementType) for x in data])
elif isinstance(data_type, StructType):
assert isinstance(data, tuple) and len(data) == len(data_type.fields)
# Sadly you cannot create a literal from just a dict/tuple in pyspark
children = zip(data, data_type.fields)
return f.struct([_mark_as_lit(x, fd.dataType).alias(fd.name) for x, fd in children])
elif isinstance(data_type, DateType):
# Due to https://bugs.python.org/issue13305 we need to zero pad for years prior to 1000,
# but this works for all of them
dateString = data.strftime("%Y-%m-%d").zfill(10)
return f.lit(dateString).cast(data_type)
elif isinstance(data_type, MapType):
assert isinstance(data, dict)
# Sadly you cannot create a literal from just a dict/tuple in pyspark
col_array = []
for k in data:
col_array.append(_mark_as_lit(k, data_type.keyType))
col_array.append(_mark_as_lit(data[k], data_type.valueType))
return f.create_map(*col_array)
else:
# lit does not take a data type so we might have to cast it
return f.lit(data).cast(data_type)
def _gen_scalars_common(data_gen, count, seed=0):
if isinstance(data_gen, list):
src = StructGen(data_gen, nullable=False)
else:
src = data_gen
# Before we get too far we need to verify that we can run with timestamps
if src.contains_ts():
skip_if_not_utc()
rand = random.Random(seed)
src.start(rand)
return src
def gen_scalars(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values."""
if force_no_nulls:
assert(not isinstance(data_gen, NullGen))
src = _gen_scalars_common(data_gen, count, seed=seed)
data_type = src.data_type
return (_mark_as_lit(src.gen(force_no_nulls=force_no_nulls), data_type) for i in range(0, count))
def gen_scalar(data_gen, seed=0, force_no_nulls=False):
"""Generate a single scalar value."""
v = list(gen_scalars(data_gen, 1, seed=seed, force_no_nulls=force_no_nulls))
return v[0]
def gen_scalar_values(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values."""
src = _gen_scalars_common(data_gen, count, seed=seed)
return (src.gen(force_no_nulls=force_no_nulls) for i in range(0, count))
def gen_scalar_value(data_gen, seed=0, force_no_nulls=False):
"""Generate a single scalar value."""
v = list(gen_scalar_values(data_gen, 1, seed=seed, force_no_nulls=force_no_nulls))
return v[0]
def debug_df(df, path = None, file_format = 'json', num_parts = 1):
"""Print out or save the contents and the schema of a dataframe for debugging."""
if path is not None:
# Save the dataframe and its schema
# The schema can be re-created by using DataType.fromJson and used
# for loading the dataframe
file_name = f"{path}.{file_format}"
schema_file_name = f"{path}.schema.json"
df.coalesce(num_parts).write.format(file_format).save(file_name)
print(f"SAVED df output for debugging at {file_name}")
schema_json = df.schema.json()
schema_file = open(schema_file_name , 'w')
schema_file.write(schema_json)
schema_file.close()
print(f"SAVED df schema for debugging along in the output dir")
else:
print('COLLECTED\n{}'.format(df.collect()))
df.explain()
df.printSchema()
return df
def print_params(data_gen):
print('Test Datagen Params=' + str([(a, b.get_types()) for a, b in data_gen]))
def idfn(val):
"""Provide an API to provide display names for data type generators."""
return str(val)
def meta_idfn(meta):
def tmp(something):
return meta + idfn(something)
return tmp
def three_col_df(spark, a_gen, b_gen, c_gen, length=2048, seed=0, num_slices=None):
gen = StructGen([('a', a_gen),('b', b_gen),('c', c_gen)], nullable=False)
return gen_df(spark, gen, length=length, seed=seed, num_slices=num_slices)
def two_col_df(spark, a_gen, b_gen, length=2048, seed=0, num_slices=None):
gen = StructGen([('a', a_gen),('b', b_gen)], nullable=False)
return gen_df(spark, gen, length=length, seed=seed, num_slices=num_slices)
def binary_op_df(spark, gen, length=2048, seed=0, num_slices=None):
return two_col_df(spark, gen, gen, length=length, seed=seed, num_slices=num_slices)
def unary_op_df(spark, gen, length=2048, seed=0, num_slices=None):
return gen_df(spark, StructGen([('a', gen)], nullable=False),
length=length, seed=seed, num_slices=num_slices)
def to_cast_string(spark_type):
if isinstance(spark_type, ByteType):
return 'BYTE'
elif isinstance(spark_type, ShortType):
return 'SHORT'
elif isinstance(spark_type, IntegerType):
return 'INT'
elif isinstance(spark_type, LongType):
return 'LONG'
elif isinstance(spark_type, FloatType):
return 'FLOAT'
elif isinstance(spark_type, DoubleType):
return 'DOUBLE'
elif isinstance(spark_type, BooleanType):
return 'BOOLEAN'
elif isinstance(spark_type, DateType):
return 'DATE'
elif isinstance(spark_type, TimestampType):
return 'TIMESTAMP'
elif isinstance(spark_type, StringType):
return 'STRING'
elif isinstance(spark_type, DecimalType):
return 'DECIMAL({}, {})'.format(spark_type.precision, spark_type.scale)
elif isinstance(spark_type, ArrayType):
return 'ARRAY<{}>'.format(to_cast_string(spark_type.elementType))
elif isinstance(spark_type, StructType):
children = [fd.name + ':' + to_cast_string(fd.dataType) for fd in spark_type.fields]
return 'STRUCT<{}>'.format(','.join(children))
elif isinstance(spark_type, BinaryType):
return 'BINARY'
else:
raise RuntimeError('CAST TO TYPE {} NOT SUPPORTED YET'.format(spark_type))
def get_null_lit_string(spark_type):
if isinstance(spark_type, NullType):
return 'null'
else:
string_type = to_cast_string(spark_type)
return 'CAST(null as {})'.format(string_type)
def _convert_to_sql(spark_type, data):
if isinstance(data, str):
d = "'" + data.replace("\\", "\\\\").replace("\'", "\\\'") + "'"
elif isinstance(data, datetime):
d = "'" + data.strftime('%Y-%m-%d T%H:%M:%S.%f').zfill(26) + "'"
elif isinstance(data, date):
d = "'" + data.strftime('%Y-%m-%d').zfill(10) + "'"
elif isinstance(data, list):
assert isinstance(spark_type, ArrayType)
d = "array({})".format(",".join([_convert_to_sql(spark_type.elementType, x) for x in data]))
elif isinstance(data, tuple):
assert isinstance(spark_type, StructType) and len(data) == len(spark_type.fields)
# Format of each child: 'name',data
children = ["'{}'".format(fd.name) + ',' + _convert_to_sql(fd.dataType, x)
for fd, x in zip(spark_type.fields, data)]
d = "named_struct({})".format(','.join(children))
elif isinstance(data, bytearray) or isinstance(data, bytes):
d = "X'{}'".format(data.hex())
elif not data:
# data is None
d = "null"
else:
d = "'{}'".format(str(data))
if isinstance(spark_type, NullType):
return d
else:
return 'CAST({} as {})'.format(d, to_cast_string(spark_type))
def gen_scalars_for_sql(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values, but strings that can be used in selectExpr or SQL"""
src = _gen_scalars_common(data_gen, count, seed=seed)
if isinstance(data_gen, NullGen):
assert not force_no_nulls
return ('null' for i in range(0, count))
spark_type = data_gen.data_type
return (_convert_to_sql(spark_type, src.gen(force_no_nulls=force_no_nulls)) for i in range(0, count))
byte_gen = ByteGen()
short_gen = ShortGen()
int_gen = IntegerGen()
long_gen = LongGen()
float_gen = FloatGen()
double_gen = DoubleGen()
string_gen = StringGen()
boolean_gen = BooleanGen()
date_gen = DateGen()
timestamp_gen = TimestampGen()
binary_gen = BinaryGen()
null_gen = NullGen()
numeric_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen]
integral_gens = [byte_gen, short_gen, int_gen, long_gen]
# A lot of mathematical expressions only support a double as input
# by parametrizing even for a single param for the test it makes the tests consistent
double_gens = [double_gen]
double_n_long_gens = [double_gen, long_gen]
int_n_long_gens = [int_gen, long_gen]
decimal_gen_32bit = DecimalGen(precision=7, scale=3)
decimal_gen_32bit_neg_scale = DecimalGen(precision=7, scale=-3)
decimal_gen_64bit = DecimalGen(precision=12, scale=2)
decimal_gen_128bit = DecimalGen(precision=20, scale=2)
decimal_gens = [decimal_gen_32bit, decimal_gen_64bit, decimal_gen_128bit]
# all of the basic gens
all_basic_gens_no_null = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen]
all_basic_gens = all_basic_gens_no_null + [null_gen]
all_basic_gens_no_nan = [byte_gen, short_gen, int_gen, long_gen, FloatGen(no_nans=True), DoubleGen(no_nans=True),
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen]
# Many Spark versions have issues sorting large decimals,
# see https://issues.apache.org/jira/browse/SPARK-40089.
orderable_decimal_gen_128bit = decimal_gen_128bit
if is_before_spark_340():
orderable_decimal_gen_128bit = DecimalGen(precision=20, scale=2, special_cases=[])
orderable_decimal_gens = [decimal_gen_32bit, decimal_gen_64bit, orderable_decimal_gen_128bit ]
# TODO add in some array generators to this once that is supported for sorting
# a selection of generators that should be orderable (sortable and compareable)
orderable_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen] + orderable_decimal_gens
# TODO add in some array generators to this once that is supported for these operations
# a selection of generators that can be compared for equality
eq_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen]
# Include decimal type while testing equalTo and notEqualTo
eq_gens_with_decimal_gen = eq_gens + decimal_gens
date_gens = [date_gen]
date_n_time_gens = [date_gen, timestamp_gen]
boolean_gens = [boolean_gen]
single_level_array_gens = [ArrayGen(sub_gen) for sub_gen in all_basic_gens + decimal_gens]
single_level_array_gens_no_null = [ArrayGen(sub_gen) for sub_gen in all_basic_gens_no_null + decimal_gens]
single_level_array_gens_no_nan = [ArrayGen(sub_gen) for sub_gen in all_basic_gens_no_nan + decimal_gens]
single_level_array_gens_no_decimal = [ArrayGen(sub_gen) for sub_gen in all_basic_gens]
map_string_string_gen = [MapGen(StringGen(pattern='key_[0-9]', nullable=False), StringGen())]
# Be careful to not make these too large of data generation takes for ever
# This is only a few nested array gens, because nesting can be very deep
nested_array_gens_sample = [ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))]
# Some array gens, but not all because of nesting
array_gens_sample = single_level_array_gens + nested_array_gens_sample
# all of the basic types in a single struct
all_basic_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(all_basic_gens)])
all_basic_struct_gen_no_nan = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(all_basic_gens_no_nan)])
struct_array_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(single_level_array_gens)])
struct_array_gen_no_nans = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(single_level_array_gens_no_nan)])
# Some struct gens, but not all because of nesting
nonempty_struct_gens_sample = [all_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', all_basic_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
nonempty_struct_gens_sample_no_list = [all_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', all_basic_struct_gen]]),
StructGen([['child0', short_gen], ['child1', double_gen]])]
struct_gens_sample = nonempty_struct_gens_sample + [StructGen([])]
struct_gens_sample_no_list = nonempty_struct_gens_sample_no_list + [StructGen([])]
struct_gen_decimal128 = StructGen(
[['child' + str(ind), sub_gen] for ind, sub_gen in enumerate([decimal_gen_128bit])])
struct_gens_sample_with_decimal128 = struct_gens_sample + [struct_gen_decimal128]
struct_gens_sample_with_decimal128_no_list = struct_gens_sample_no_list + [struct_gen_decimal128]
simple_string_to_string_map_gen = MapGen(StringGen(pattern='key_[0-9]', nullable=False),
StringGen(), max_length=10)
all_basic_map_gens = [MapGen(f(nullable=False), f()) for f in [BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DateGen, TimestampGen]] + [simple_string_to_string_map_gen]
decimal_64_map_gens = [MapGen(key_gen=gen, value_gen=gen, nullable=False) for gen in [DecimalGen(7, 3, nullable=False), DecimalGen(12, 2, nullable=False)]]
decimal_128_map_gens = [MapGen(key_gen=gen, value_gen=gen, nullable=False) for gen in [DecimalGen(20, 2, nullable=False)]]
# Some map gens, but not all because of nesting
map_gens_sample = all_basic_map_gens + [MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(string_gen), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen),
MapGen(IntegerGen(False), ArrayGen(int_gen, max_length=3), max_length=3),
MapGen(ShortGen(False), StructGen([['child0', byte_gen], ['child1', double_gen]]), max_length=3),
MapGen(ByteGen(False), MapGen(FloatGen(False), date_gen, max_length=3), max_length=3)]
nested_gens_sample = array_gens_sample + struct_gens_sample_with_decimal128 + map_gens_sample + decimal_128_map_gens
ansi_enabled_conf = {'spark.sql.ansi.enabled': 'true'}
legacy_interval_enabled_conf = {'spark.sql.legacy.interval.enabled': 'true'}
def copy_and_update(conf, *more_confs):
local_conf = conf.copy()
for more in more_confs:
local_conf.update(more)
return local_conf
all_gen = [StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
FloatGen(), DoubleGen(), BooleanGen(), DateGen(), TimestampGen(),
decimal_gen_32bit, decimal_gen_64bit, decimal_gen_128bit]
# Pyarrow will complain the error as below if the timestamp is out of range for both CPU and GPU,
# so narrow down the time range to avoid exceptions causing test failures.
#
# "pyarrow.lib.ArrowInvalid: Casting from timestamp[us, tz=UTC] to timestamp[ns]
# would result in out of bounds timestamp: 51496791452587000"
#
# This issue has been fixed in pyarrow by the PR https://github.com/apache/arrow/pull/7169
# However it still requires PySpark to specify the new argument "timestamp_as_object".
arrow_common_gen = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen,
TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime(2262, 1, 1, tzinfo=timezone.utc))]
arrow_array_gens = [ArrayGen(subGen) for subGen in arrow_common_gen] + nested_array_gens_sample
arrow_one_level_struct_gen = StructGen([
['child'+str(i), sub_gen] for i, sub_gen in enumerate(arrow_common_gen)])
arrow_struct_gens = [arrow_one_level_struct_gen,
StructGen([['child0', ArrayGen(short_gen)], ['child1', arrow_one_level_struct_gen]])]
# This function adds a new column named uniq_int where each row
# has a new unique integer value. It just starts at 0 and
# increments by 1 for each row.
# This can be used to add a column to a dataframe if you need to
# sort on a column with unique values.
# This collects the data to driver though so can be expensive.
def append_unique_int_col_to_df(spark, dataframe):
def append_unique_to_rows(rows):
new = []
for item in range(len(rows)):
row_dict = rows[item].asDict()
row_dict['uniq_int'] = item
new_row = Row(**row_dict)
new.append(new_row)
return new
collected = dataframe.collect()
if (len(collected) > INT_MAX):
raise RuntimeError('To many rows to add unique integer values starting from 0 to')
existing_schema = dataframe.schema
new_rows = append_unique_to_rows(collected)
new_schema = StructType(existing_schema.fields + [StructField("uniq_int", IntegerType(), False)])
return spark.createDataFrame(new_rows, new_schema)
disable_parquet_field_id_write = {"spark.sql.parquet.fieldId.write.enabled": "false"} # default is true
enable_parquet_field_id_write = {"spark.sql.parquet.fieldId.write.enabled": "true"}
enable_parquet_field_id_read = {"spark.sql.parquet.fieldId.read.enabled": "true"} # default is false
# generate a df with c1 and c2 column have 25 combinations
def get_25_partitions_df(spark):
schema = StructType([
StructField("c1", IntegerType()),
StructField("c2", IntegerType()),
StructField("c3", IntegerType())])
data = [[i, j, k] for i in range(0, 5) for j in range(0, 5) for k in range(0, 100)]
return spark.createDataFrame(data, schema)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/data_gen.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from marks import allow_non_gpu
from pyspark.sql.types import *
import pyspark.sql.functions as f
from spark_session import is_before_spark_340
# Many Spark versions have issues sorting decimals.
# https://issues.apache.org/jira/browse/SPARK-40089
_orderable_not_null_big_decimal_gen = DecimalGen(precision=20, scale=2, nullable=False)
if is_before_spark_340():
_orderable_not_null_big_decimal_gen = DecimalGen(precision=20, scale=2, nullable=False, special_cases=[])
orderable_not_null_gen = [ByteGen(nullable=False), ShortGen(nullable=False), IntegerGen(nullable=False),
LongGen(nullable=False), FloatGen(nullable=False), DoubleGen(nullable=False), BooleanGen(nullable=False),
TimestampGen(nullable=False), DateGen(nullable=False), StringGen(nullable=False),
DecimalGen(precision=7, scale=3, nullable=False), DecimalGen(precision=12, scale=2, nullable=False),
_orderable_not_null_big_decimal_gen]
@allow_non_gpu('SortExec', 'ShuffleExchangeExec', 'RangePartitioning', 'SortOrder')
@pytest.mark.parametrize('data_gen', [StringGen(nullable=False)], ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').cast(BinaryType())], ids=idfn)
def test_sort_binary_fallback(data_gen, order):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order),
"SortExec")
@allow_non_gpu('ProjectExec', 'ShuffleExchangeExec', 'RangePartitioning')
@pytest.mark.parametrize('data_gen', [StringGen(nullable=False)], ids=idfn)
def test_sort_nonbinary_carry_binary(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen)
.withColumn("binary_string", f.col("a").cast(BinaryType()))
.orderBy(f.col('a')))
@pytest.mark.parametrize('data_gen', orderable_gens + orderable_not_null_gen, ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_last(), f.col('a').desc(), f.col('a').desc_nulls_first()], ids=idfn)
def test_single_orderby(data_gen, order):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order))
@pytest.mark.parametrize('data_gen', single_level_array_gens, ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_first(), f.col('a').asc_nulls_last(),
f.col('a').desc(), f.col('a').desc_nulls_first(), f.col('a').desc_nulls_last()], ids=idfn)
def test_single_orderby_on_array(data_gen, order):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order))
@allow_non_gpu('SortExec', 'ShuffleExchangeExec')
@pytest.mark.parametrize('data_gen', [ArrayGen(sub_gen) for sub_gen in single_level_array_gens], ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_first(), f.col('a').asc_nulls_last(),
f.col('a').desc(), f.col('a').desc_nulls_first(), f.col('a').desc_nulls_last()], ids=idfn)
def test_single_orderby_fallback_for_multilevel_array(data_gen, order):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order),
"SortExec")
@allow_non_gpu('SortExec', 'ShuffleExchangeExec')
@pytest.mark.parametrize('data_gen', [ArrayGen(StructGen([('child1', sub_gen)])) for sub_gen in orderable_gens], ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_first(), f.col('a').asc_nulls_last(),
f.col('a').desc(), f.col('a').desc_nulls_first(), f.col('a').desc_nulls_last()], ids=idfn)
def test_single_orderby_fallback_for_array_of_struct(data_gen, order):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order),
"SortExec")
@pytest.mark.parametrize('shuffle_parts', [
pytest.param(1),
pytest.param(200)
])
@pytest.mark.parametrize('stable_sort', ['STABLE', 'OUTOFCORE'])
@pytest.mark.parametrize('data_gen', [
pytest.param(all_basic_struct_gen),
pytest.param(StructGen([['child0', decimal_gen_128bit]])),
pytest.param(StructGen([['child0', all_basic_struct_gen]])),
pytest.param(MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen),
marks=pytest.mark.xfail(reason="maps are not supported")),
], ids=idfn)
@pytest.mark.parametrize('order', [
pytest.param(f.col('a').asc()),
pytest.param(f.col('a').asc_nulls_first()),
pytest.param(f.col('a').asc_nulls_last(),
marks=pytest.mark.xfail(reason='opposite null order not supported')),
pytest.param(f.col('a').desc()),
pytest.param(f.col('a').desc_nulls_first(),
marks=pytest.mark.xfail(reason='opposite null order not supported')),
pytest.param(f.col('a').desc_nulls_last()),
], ids=idfn)
def test_single_nested_orderby_plain(data_gen, order, shuffle_parts, stable_sort):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order),
conf = {
'spark.sql.shuffle.partitions': shuffle_parts,
'spark.rapids.sql.stableSort.enabled': stable_sort == 'STABLE'
})
# only default null ordering for direction is supported for nested types
@allow_non_gpu('SortExec', 'ShuffleExchangeExec', 'RangePartitioning', 'SortOrder')
@pytest.mark.parametrize('data_gen', [
pytest.param(all_basic_struct_gen),
pytest.param(StructGen([['child0', all_basic_struct_gen]])),
], ids=idfn)
@pytest.mark.parametrize('order', [
pytest.param(f.col('a').asc_nulls_last()),
pytest.param(f.col('a').desc_nulls_first()),
], ids=idfn)
def test_single_nested_orderby_fallback_for_nullorder(data_gen, order):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order),
"SortExec")
# SPARK CPU itself has issue with negative scale for take ordered and project
orderable_without_neg_decimal = [n for n in (orderable_gens + orderable_not_null_gen) if not (isinstance(n, DecimalGen) and n.scale < 0)]
@pytest.mark.parametrize('data_gen', orderable_without_neg_decimal + single_level_array_gens, ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_last(), f.col('a').desc(), f.col('a').desc_nulls_first()], ids=idfn)
def test_single_orderby_with_limit(data_gen, order):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order).limit(100))
@pytest.mark.parametrize('order,data_gen', [
pytest.param(f.col('a').asc(), all_basic_struct_gen),
pytest.param(f.col('a').asc_nulls_first(), all_basic_struct_gen),
pytest.param(f.col('a').desc(), all_basic_struct_gen),
pytest.param(f.col('a').desc_nulls_last(), all_basic_struct_gen)
], ids=idfn)
def test_single_nested_orderby_with_limit(data_gen, order):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order).limit(100),
conf = {
'spark.rapids.allowCpuRangePartitioning': False
})
@allow_non_gpu('TakeOrderedAndProjectExec', 'SortOrder')
@pytest.mark.parametrize('order,data_gen', [
pytest.param(f.col('a').asc_nulls_last(), all_basic_struct_gen),
pytest.param(f.col('a').desc_nulls_first(), all_basic_struct_gen)
], ids=idfn)
def test_single_nested_orderby_with_limit_fallback(data_gen, order):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).orderBy(order).limit(100),
"TakeOrderedAndProjectExec",
conf = {
'spark.rapids.allowCpuRangePartitioning': False
})
@pytest.mark.parametrize('data_gen', orderable_gens + orderable_not_null_gen + single_level_array_gens, ids=idfn)
@pytest.mark.parametrize('order', [f.col('a').asc(), f.col('a').asc_nulls_last(), f.col('a').desc(), f.col('a').desc_nulls_first()], ids=idfn)
def test_single_sort_in_part(data_gen, order):
# We set `num_slices` to handle https://github.com/NVIDIA/spark-rapids/issues/2477
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen, num_slices=12).sortWithinPartitions(order))
@pytest.mark.parametrize('data_gen', [
pytest.param(all_basic_struct_gen),
pytest.param(struct_gen_decimal128),
pytest.param(StructGen([['child0', all_basic_struct_gen]])),
pytest.param(StructGen([['child0', struct_gen_decimal128]])),
], ids=idfn)
@pytest.mark.parametrize('order', [
pytest.param(f.col('a').asc()),
pytest.param(f.col('a').asc_nulls_first()),
pytest.param(f.col('a').asc_nulls_last(),
marks=pytest.mark.xfail(reason='opposite null order not supported')),
pytest.param(f.col('a').desc()),
pytest.param(f.col('a').desc_nulls_first(),
marks=pytest.mark.xfail(reason='opposite null order not supported')),
pytest.param(f.col('a').desc_nulls_last()),
], ids=idfn)
@pytest.mark.parametrize('stable_sort', ['STABLE', 'OUTOFCORE'], ids=idfn)
def test_single_nested_sort_in_part(data_gen, order, stable_sort):
sort_conf = {'spark.rapids.sql.stableSort.enabled': stable_sort == 'STABLE'}
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen, num_slices=12).sortWithinPartitions(order),
conf=sort_conf)
orderable_gens_sort = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
boolean_gen, timestamp_gen, date_gen, string_gen, null_gen, StructGen([('child0', long_gen)])
] + orderable_decimal_gens + single_level_array_gens
@pytest.mark.parametrize('data_gen', orderable_gens_sort, ids=idfn)
def test_multi_orderby(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).orderBy(f.col('a'), f.col('b').desc()))
@pytest.mark.parametrize('data_gen', single_level_array_gens, ids=idfn)
def test_multi_orderby_on_array(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).orderBy(f.col('a'), f.col('b').desc()))
# SPARK CPU itself has issue with negative scale for take ordered and project
orderable_gens_sort_without_neg_decimal = [n for n in orderable_gens_sort if not (isinstance(n, DecimalGen) and n.scale < 0)]
@pytest.mark.parametrize('data_gen', orderable_gens_sort_without_neg_decimal + single_level_array_gens, ids=idfn)
def test_multi_orderby_with_limit(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).orderBy(f.col('a'), f.col('b').desc()).limit(100))
# We added in a partitioning optimization to take_ordered_and_project
# This should trigger it.
@pytest.mark.parametrize('data_gen', orderable_gens_sort_without_neg_decimal + single_level_array_gens, ids=idfn)
def test_multi_orderby_with_limit_single_part(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).coalesce(1).orderBy(f.col('a'), f.col('b').desc()).limit(100))
# We are not trying all possibilities, just doing a few with numbers so the query works.
@pytest.mark.parametrize('data_gen', [byte_gen, long_gen, float_gen], ids=idfn)
def test_orderby_with_processing(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# avoid ambiguity in the order by statement for floating point by including a as a backup ordering column
lambda spark : unary_op_df(spark, data_gen).orderBy(f.lit(100) - f.col('a'), f.col('a')))
# We are not trying all possibilities, just doing a few with numbers so the query works.
@pytest.mark.parametrize('data_gen', [byte_gen, long_gen, float_gen], ids=idfn)
def test_orderby_with_processing_and_limit(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# avoid ambiguity in the order by statement for floating point by including a as a backup ordering column
lambda spark : unary_op_df(spark, data_gen).orderBy(f.lit(100) - f.col('a'), f.col('a')).limit(100))
# We are not trying all possibilities, just doing a few with numbers so the query works.
@pytest.mark.parametrize('data_gen', [StructGen([('child0', long_gen)])], ids=idfn)
def test_single_nested_orderby_with_processing_and_limit(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# avoid ambiguity in the order by statement for floating point by including a as a backup ordering column
lambda spark : unary_op_df(spark, data_gen)\
.orderBy(f.struct(f.lit(100) - f.col('a.child0')), f.col('a'))\
.limit(100))
# We are not trying all possibilities, just doing a few with numbers so the query works.
@pytest.mark.parametrize('data_gen', [byte_gen, long_gen, float_gen], ids=idfn)
def test_single_orderby_with_skew(data_gen):
# When doing range partitioning the upstream data is sampled to try and get the bounds for cutoffs.
# If the data comes back with skewed partitions then those partitions will be resampled for more data.
# This is to try and trigger it to happen.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen)\
.selectExpr('a', 'random(1) > 0.5 as b')\
.repartition(f.col('b'))\
.orderBy(f.col('a'))\
.selectExpr('a'))
# We are not trying all possibilities, just doing a few with numbers so the query works.
@pytest.mark.parametrize('data_gen', [all_basic_struct_gen, StructGen([['child0', all_basic_struct_gen]])], ids=idfn)
@pytest.mark.parametrize('stable_sort', ['STABLE', 'OUTOFCORE'], ids=idfn)
def test_single_nested_orderby_with_skew(data_gen, stable_sort):
sort_conf = {'spark.rapids.sql.stableSort.enabled': stable_sort == 'STABLE'}
# When doing range partitioning the upstream data is sampled to try and get the bounds for cutoffs.
# If the data comes back with skewed partitions then those partitions will be resampled for more data.
# This is to try and trigger it to happen.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen) \
.selectExpr('a', 'random(1) > 0.5 as b') \
.repartition(f.col('b')) \
.orderBy(f.col('a')) \
.selectExpr('a'),
conf=sort_conf)
# This is primarily to test the out of core sort with multiple batches. For this we set the data size to
# be relatively large (1 MiB across all tasks) and the target size to be small (16 KiB). This means we
# should see around 64 batches of data. So this is the most valid if there are less than 64 tasks
# in the cluster, but it should still work even then.
@pytest.mark.parametrize('data_gen', [long_gen, StructGen([('child0', long_gen)]), ArrayGen(byte_gen)], ids=idfn)
@pytest.mark.parametrize('stable_sort', ['STABLE', 'OUTOFCORE'], ids=idfn)
def test_large_orderby(data_gen, stable_sort):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen, length=1024*128)\
.orderBy(f.col('a')),
conf={
'spark.rapids.sql.batchSizeBytes': '16384',
'spark.rapids.sql.stableSort.enabled': stable_sort == 'STABLE'
})
# This is similar to test_large_orderby, but here we want to test some types
# that are not being sorted on, but are going along with it
@pytest.mark.parametrize('data_gen', [
binary_gen,
byte_gen,
string_gen,
float_gen,
date_gen,
timestamp_gen,
decimal_gen_64bit,
decimal_gen_128bit,
StructGen([('child1', byte_gen)]),
simple_string_to_string_map_gen,
ArrayGen(byte_gen, max_length=5)], ids=idfn)
@pytest.mark.order(2)
def test_large_orderby_nested_ridealong(data_gen):
# We use a UniqueLongGen to avoid duplicate keys that can cause ambiguity in the sort
# results, especially on distributed clusters.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, UniqueLongGen(), data_gen, length=1024*127)\
.orderBy(f.col('a').desc()),
conf = {'spark.rapids.sql.batchSizeBytes': '16384'})
@pytest.mark.parametrize('data_gen', [byte_gen,
string_gen,
float_gen,
date_gen,
timestamp_gen,
decimal_gen_64bit,
decimal_gen_128bit,
StructGen([('child1', byte_gen)]),
simple_string_to_string_map_gen,
ArrayGen(byte_gen, max_length=5),
ArrayGen(decimal_gen_128bit, max_length=5)], ids=idfn)
@pytest.mark.order(2)
def test_orderby_nested_ridealong_limit(data_gen):
# We use a UniqueLongGen to avoid duplicate keys that can cause ambiguity in the sort
# results, especially on distributed clusters.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, UniqueLongGen(), data_gen)\
.orderBy(f.col('a').desc()).limit(100))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/sort_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_cpu_and_gpu_are_equal_collect_with_capture
from data_gen import *
from marks import approximate_float
from spark_session import with_cpu_session, is_before_spark_330
import pyspark.sql.functions as f
# Each descriptor contains a list of data generators and a corresponding boolean
# indicating whether that data type is supported by the AST
ast_integral_descrs = [
(byte_gen, False), # AST implicitly upcasts to INT32, need AST cast to support
(short_gen, False), # AST implicitly upcasts to INT32, need AST cast to support
(int_gen, True),
(long_gen, True)
]
ast_arithmetic_descrs = ast_integral_descrs + [(float_gen, True), (double_gen, True)]
# cudf AST cannot support comparing floating point until it is expressive enough to handle NaNs
# cudf AST does not support strings yet
ast_comparable_descrs = [
(boolean_gen, True),
(byte_gen, True),
(short_gen, True),
(int_gen, True),
(long_gen, True),
(float_gen, False),
(double_gen, False),
(timestamp_gen, True),
(date_gen, True),
(string_gen, False)
]
ast_boolean_descr = [(boolean_gen, True)]
ast_double_descr = [(double_gen, True)]
def assert_gpu_ast(is_supported, func, conf={}):
exist = "GpuProjectAstExec"
non_exist = "GpuProjectExec"
if not is_supported:
exist = "GpuProjectExec"
non_exist = "GpuProjectAstExec"
ast_conf = copy_and_update(conf, {"spark.rapids.sql.projectAstEnabled": "true"})
assert_cpu_and_gpu_are_equal_collect_with_capture(
func,
exist_classes=exist,
non_exist_classes=non_exist,
conf=ast_conf)
def assert_unary_ast(data_descr, func, conf={}):
(data_gen, is_supported) = data_descr
assert_gpu_ast(is_supported, lambda spark: func(unary_op_df(spark, data_gen)), conf=conf)
def assert_binary_ast(data_descr, func, conf={}):
(data_gen, is_supported) = data_descr
assert_gpu_ast(is_supported, lambda spark: func(binary_op_df(spark, data_gen)), conf=conf)
@pytest.mark.parametrize('data_gen', [boolean_gen, byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, timestamp_gen, date_gen], ids=idfn)
def test_literal(spark_tmp_path, data_gen):
# Write data to Parquet so Spark generates a plan using just the count of the data.
data_path = spark_tmp_path + '/AST_TEST_DATA'
with_cpu_session(lambda spark: gen_df(spark, [("a", IntegerGen())]).write.parquet(data_path))
scalar = gen_scalar(data_gen, force_no_nulls=True)
assert_gpu_ast(is_supported=True,
func=lambda spark: spark.read.parquet(data_path).select(scalar))
@pytest.mark.parametrize('data_gen', [boolean_gen, byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, timestamp_gen, date_gen], ids=idfn)
def test_null_literal(spark_tmp_path, data_gen):
# Write data to Parquet so Spark generates a plan using just the count of the data.
data_path = spark_tmp_path + '/AST_TEST_DATA'
with_cpu_session(lambda spark: gen_df(spark, [("a", IntegerGen())]).write.parquet(data_path))
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: spark.read.parquet(data_path).select(f.lit(None).cast(data_type)))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_not(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('~a'))
# This just ends up being a pass through. There is no good way to force
# a unary positive into a plan, because it gets optimized out, but this
# verifies that we can handle it.
@pytest.mark.parametrize('data_descr', [
(byte_gen, True),
(short_gen, True),
(int_gen, True),
(long_gen, True),
(float_gen, True),
(double_gen, True)], ids=idfn)
def test_unary_positive(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('+a'))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_unary_positive_for_daytime_interval():
data_descr = (DayTimeIntervalGen(), True)
assert_unary_ast(data_descr, lambda df: df.selectExpr('+a'))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_unary_minus(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('-a'))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_abs(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('abs(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cbrt(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cbrt(a)'))
@pytest.mark.parametrize('data_descr', ast_boolean_descr, ids=idfn)
def test_not(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('!a'))
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_rint(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('rint(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sqrt(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sqrt(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sin(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sin(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cos(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cos(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_tan(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('tan(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cot(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cot(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_sinh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('sinh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_cosh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('cosh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_tanh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('tanh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_asin(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asin(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_acos(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acos(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_atan(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('atan(a)'))
# AST is not expressive enough to support the ASINH Spark emulation expression
@approximate_float
@pytest.mark.parametrize('data_descr', [(double_gen, False)], ids=idfn)
def test_asinh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asinh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_acosh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acosh(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_atanh(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('atanh(a)'))
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because Spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_descr', [(DoubleGen(min_exp=-20, max_exp=20), True)], ids=idfn)
def test_asinh_improved(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('asinh(a)'),
conf={'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
# The default approximate is 1e-6 or 1 in a million
# in some cases we need to adjust this because the algorithm is different
@approximate_float(rel=1e-4, abs=1e-12)
# Because Spark will overflow on large exponents drop to something well below
# what it fails at, note this is binary exponent, not base 10
@pytest.mark.parametrize('data_descr', [(DoubleGen(min_exp=-20, max_exp=20), True)], ids=idfn)
def test_acosh_improved(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('acosh(a)'),
conf={'spark.rapids.sql.improvedFloatOps.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_exp(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('exp(a)'))
@approximate_float
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_expm1(data_descr):
assert_unary_ast(data_descr, lambda df: df.selectExpr('expm1(a)'))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_eq(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') == s1,
s2 == f.col('b'),
f.col('a') == f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_ne(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') != s1,
s2 != f.col('b'),
f.col('a') != f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_lt(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') < s1,
s2 < f.col('b'),
f.col('a') < f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_lte(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') <= s1,
s2 <= f.col('b'),
f.col('a') <= f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_gt(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') > s1,
s2 > f.col('b'),
f.col('a') > f.col('b')))
@pytest.mark.parametrize('data_descr', ast_comparable_descrs, ids=idfn)
def test_gte(data_descr):
(s1, s2) = gen_scalars(data_descr[0], 2)
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') >= s1,
s2 >= f.col('b'),
f.col('a') >= f.col('b')))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_and(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseAND(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseAND(f.col('b')),
f.col('a').bitwiseAND(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_or(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseOR(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseOR(f.col('b')),
f.col('a').bitwiseOR(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_integral_descrs, ids=idfn)
def test_bitwise_xor(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a').bitwiseXOR(f.lit(100).cast(data_type)),
f.lit(-12).cast(data_type).bitwiseXOR(f.col('b')),
f.col('a').bitwiseXOR(f.col('b'))))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_addition(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') + f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) + f.col('b'),
f.col('a') + f.col('b')))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_subtraction(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') - f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) - f.col('b'),
f.col('a') - f.col('b')))
@pytest.mark.parametrize('data_descr', ast_arithmetic_descrs, ids=idfn)
def test_multiplication(data_descr):
data_type = data_descr[0].data_type
assert_binary_ast(data_descr,
lambda df: df.select(
f.col('a') * f.lit(100).cast(data_type),
f.lit(-12).cast(data_type) * f.col('b'),
f.col('a') * f.col('b')))
@approximate_float
def test_scalar_pow():
# For the 'b' field include a lot more values that we would expect customers to use as a part of a pow
data_gen = [('a', DoubleGen()),('b', DoubleGen().with_special_case(lambda rand: float(rand.randint(-16, 16)), weight=100.0))]
assert_gpu_ast(is_supported=True,
func=lambda spark: gen_df(spark, data_gen).selectExpr(
'pow(a, 7.0)',
'pow(-12.0, b)'))
@approximate_float
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/89')
@pytest.mark.parametrize('data_descr', ast_double_descr, ids=idfn)
def test_columnar_pow(data_descr):
assert_binary_ast(data_descr, lambda df: df.selectExpr('pow(a, b)'))
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_and(data_gen):
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: binary_op_df(spark, data_gen).select(
f.col('a') & f.lit(True),
f.lit(False) & f.col('b'),
f.col('a') & f.col('b')))
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_or(data_gen):
data_type = data_gen.data_type
assert_gpu_ast(is_supported=True,
func=lambda spark: binary_op_df(spark, data_gen).select(
f.col('a') | f.lit(True),
f.lit(False) | f.col('b'),
f.col('a') | f.col('b')))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/ast_test.py |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal
from marks import *
columnarClass = 'com.nvidia.spark.rapids.tests.datasourcev2.parquet.ArrowColumnarDataSourceV2'
def readTable(types, classToUse):
return lambda spark: spark.read\
.option("arrowTypes", types)\
.format(classToUse).load()\
.orderBy("col1")
@validate_execs_in_gpu_plan('HostColumnarToGpu')
def test_read_int():
assert_gpu_and_cpu_are_equal_collect(readTable("int", columnarClass))
@validate_execs_in_gpu_plan('HostColumnarToGpu')
def test_read_strings():
assert_gpu_and_cpu_are_equal_collect(readTable("string", columnarClass))
@validate_execs_in_gpu_plan('HostColumnarToGpu')
def test_read_all_types():
assert_gpu_and_cpu_are_equal_collect(
readTable("int,bool,byte,short,long,string,float,double,date,timestamp", columnarClass),
conf={'spark.rapids.sql.castFloatToString.enabled': 'true'})
@validate_execs_in_gpu_plan('HostColumnarToGpu')
def test_read_all_types_count():
assert_gpu_and_cpu_row_counts_equal(
readTable("int,bool,byte,short,long,string,float,double,date,timestamp", columnarClass),
conf={'spark.rapids.sql.castFloatToString.enabled': 'true'})
@validate_execs_in_gpu_plan('HostColumnarToGpu')
def test_read_arrow_off():
assert_gpu_and_cpu_are_equal_collect(
readTable("int,bool,byte,short,long,string,float,double,date,timestamp", columnarClass),
conf={'spark.rapids.arrowCopyOptimizationEnabled': 'false',
'spark.rapids.sql.castFloatToString.enabled': 'true'})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/datasourcev2_read_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from datetime import datetime
from marks import ignore_order, allow_non_gpu
from pyspark.sql.types import *
import pyspark.sql.functions as f
from pyspark.sql.window import Window
# do it over a day so we have more chance of overlapping values
_restricted_start = datetime(2020, 1, 1, tzinfo=timezone.utc)
_restricted_end = datetime(2020, 1, 2, tzinfo=timezone.utc)
_restricted_ts_gen = TimestampGen(start=_restricted_start, end=_restricted_end)
@pytest.mark.parametrize('data_gen', integral_gens + [string_gen], ids=idfn)
@ignore_order
def test_grouped_tumbling_window(data_gen):
row_gen = StructGen([['ts', _restricted_ts_gen],['data', data_gen]], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).groupBy(f.window('ts', '5 hour')).agg(f.max("data").alias("max_data")))
# Warning. On Sliding windows is it easy to make lots of overlapping windows. This can make the Spark code generation
# have some real problems and even crash some times when trying to JIT it. This problem only happens on the CPU
# so be careful.
@pytest.mark.parametrize('data_gen', integral_gens + [string_gen], ids=idfn)
@ignore_order
def test_grouped_sliding_window(data_gen):
row_gen = StructGen([['ts', _restricted_ts_gen],['data', data_gen]], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).groupBy(f.window('ts', '5 hour', '1 hour')).agg(f.max("data").alias("max_data")))
@pytest.mark.parametrize('data_gen', integral_gens + [string_gen], ids=idfn)
@ignore_order
def test_grouped_sliding_window_array(data_gen):
row_gen = StructGen([['ts', _restricted_ts_gen],['data', ArrayGen(data_gen)]], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).groupBy(f.window('ts', '5 hour', '1 hour')).agg(f.max(f.col("data")[3]).alias("max_data")))
@pytest.mark.parametrize('data_gen', integral_gens + [string_gen], ids=idfn)
@ignore_order
def test_tumbling_window(data_gen):
row_gen = StructGen([['ts', _restricted_ts_gen],['data', data_gen]], nullable=False)
w = Window.partitionBy(f.window('ts', '5 hour'))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).withColumn('rolling_max', f.max("data").over(w)))
@pytest.mark.parametrize('data_gen', integral_gens + [string_gen], ids=idfn)
@ignore_order
def test_sliding_window(data_gen):
row_gen = StructGen([['ts', _restricted_ts_gen],['data', data_gen]], nullable=False)
w = Window.partitionBy(f.window('ts', '5 hour', '1 hour'))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).withColumn('rolling_max', f.max("data").over(w)))
# This allows us to verify that GpuExpandExec works with all of the various types.
@pytest.mark.parametrize('data_gen', all_basic_gens + decimal_gens + array_gens_sample + map_gens_sample, ids=idfn)
# This includes an expand and we produce a different order than the CPU does. Sort locally to allow sorting of all types
@ignore_order(local=True)
def test_just_window(data_gen):
row_gen = StructGen([['ts', timestamp_gen],['data', data_gen]], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, row_gen).withColumn('time_bucket', f.window('ts', '5 hour', '1 hour')))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/time_window_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_sql_writes_are_equal_collect, assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_writes_are_equal_collect, assert_gpu_fallback_write, assert_spark_exception
from datetime import date, datetime, timezone
from data_gen import *
from enum import Enum
from marks import *
from pyspark.sql.types import *
from spark_session import with_cpu_session, with_gpu_session, is_before_spark_330, is_before_spark_320, is_spark_cdh, \
is_databricks_runtime, is_before_spark_340, is_spark_340_or_later, is_databricks122_or_later
import pyspark.sql.functions as f
import pyspark.sql.utils
import random
pytestmark = pytest.mark.nightly_resource_consuming_test
# test with original parquet file reader, the multi-file parallel reader for cloud, and coalesce file reader for
# non-cloud
original_parquet_file_reader_conf={'spark.rapids.sql.format.parquet.reader.type': 'PERFILE'}
multithreaded_parquet_file_reader_conf={'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED'}
coalesce_parquet_file_reader_conf={'spark.rapids.sql.format.parquet.reader.type': 'COALESCING'}
reader_opt_confs = [original_parquet_file_reader_conf, multithreaded_parquet_file_reader_conf,
coalesce_parquet_file_reader_conf]
parquet_decimal_struct_gen= StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(decimal_gens)])
writer_confs={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'CORRECTED',
'spark.sql.legacy.parquet.int96RebaseModeInWrite': 'CORRECTED'}
parquet_basic_gen =[byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, TimestampGen(), binary_gen]
parquet_basic_map_gens = [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DateGen,
TimestampGen]] + [simple_string_to_string_map_gen,
MapGen(DecimalGen(20, 2, nullable=False), decimal_gen_128bit),
# python is not happy with binary values being keys of a map
MapGen(StringGen("a{1,5}", nullable=False), binary_gen)]
parquet_struct_gen_no_maps = [
StructGen([['child' + str(ind), sub_gen] for ind, sub_gen in enumerate(parquet_basic_gen)]),
StructGen([['child0', StructGen([['child1', byte_gen]])]])
]
parquet_struct_of_map_gen = StructGen([['child0', MapGen(StringGen(nullable=False), StringGen(), max_length=5)], ['child1', IntegerGen()]])
parquet_struct_gen = parquet_struct_gen_no_maps + [parquet_struct_of_map_gen]
parquet_array_gen = [ArrayGen(sub_gen, max_length=10) for sub_gen in parquet_basic_gen + parquet_struct_gen] + [
ArrayGen(ArrayGen(sub_gen, max_length=10), max_length=10) for sub_gen in parquet_basic_gen + parquet_struct_gen_no_maps] + [
ArrayGen(ArrayGen(parquet_struct_of_map_gen, max_length=4), max_length=4)]
parquet_map_gens_sample = parquet_basic_map_gens + [MapGen(StringGen(pattern='key_[0-9]', nullable=False),
ArrayGen(string_gen), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen,
max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False),
simple_string_to_string_map_gen)]
parquet_datetime_gen_simple = [DateGen(end=date(3000, 1, 1)),
TimestampGen(end=datetime(3000, 1, 1, tzinfo=timezone.utc))]
parquet_datetime_in_struct_gen = [StructGen([['child' + str(ind), sub_gen] for ind, sub_gen in enumerate(parquet_datetime_gen_simple)]),
StructGen([['child0', StructGen([['child' + str(ind), sub_gen] for ind, sub_gen in enumerate(parquet_datetime_gen_simple)])]])]
parquet_datetime_in_array_gen = [ArrayGen(sub_gen, max_length=10) for sub_gen in parquet_datetime_gen_simple + parquet_datetime_in_struct_gen] + [
ArrayGen(ArrayGen(sub_gen, max_length=10), max_length=10) for sub_gen in parquet_datetime_gen_simple + parquet_datetime_in_struct_gen]
parquet_nested_datetime_gen = parquet_datetime_gen_simple + parquet_datetime_in_struct_gen + parquet_datetime_in_array_gen
parquet_map_gens = parquet_map_gens_sample + [
MapGen(StructGen([['child0', StringGen()], ['child1', StringGen()]], nullable=False), FloatGen()),
MapGen(StructGen([['child0', StringGen(nullable=True)]], nullable=False), StringGen())]
parquet_write_gens_list = [[binary_gen], parquet_basic_gen, decimal_gens] + [ [single_gen] for single_gen in parquet_struct_gen + parquet_array_gen + parquet_map_gens]
parquet_ts_write_options = ['INT96', 'TIMESTAMP_MICROS', 'TIMESTAMP_MILLIS']
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('parquet_gens', parquet_write_gens_list, ids=idfn)
def test_write_round_trip(spark_tmp_path, parquet_gens):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
all_nulls_string_gen = SetValuesGen(StringType(), [None])
empty_or_null_string_gen = SetValuesGen(StringType(), [None, ""])
all_empty_string_gen = SetValuesGen(StringType(), [""])
all_nulls_array_gen = SetValuesGen(ArrayType(StringType()), [None])
all_empty_array_gen = SetValuesGen(ArrayType(StringType()), [[]])
all_array_empty_string_gen = SetValuesGen(ArrayType(StringType()), [["", ""]])
mixed_empty_nulls_array_gen = SetValuesGen(ArrayType(StringType()), [None, [], [None], [""], [None, ""]])
mixed_empty_nulls_map_gen = SetValuesGen(MapType(StringType(), StringType()), [{}, None, {"A": ""}, {"B": None}])
all_nulls_map_gen = SetValuesGen(MapType(StringType(), StringType()), [None])
all_empty_map_gen = SetValuesGen(MapType(StringType(), StringType()), [{}])
par_write_odd_empty_strings_gens_sample = [all_nulls_string_gen,
empty_or_null_string_gen,
all_empty_string_gen,
all_nulls_array_gen,
all_empty_array_gen,
all_array_empty_string_gen,
mixed_empty_nulls_array_gen,
mixed_empty_nulls_map_gen,
all_nulls_map_gen,
all_empty_map_gen]
@pytest.mark.parametrize('par_gen', par_write_odd_empty_strings_gens_sample, ids=idfn)
def test_write_round_trip_corner(spark_tmp_path, par_gen):
gen_list = [('_c0', par_gen)]
data_path = spark_tmp_path + '/PAR_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list, 128000, num_slices=1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path)
@pytest.mark.parametrize('parquet_gens', [[
TimestampGen(),
ArrayGen(TimestampGen(), max_length=10),
MapGen(TimestampGen(nullable=False), TimestampGen())]], ids=idfn)
@pytest.mark.parametrize('ts_type', parquet_ts_write_options)
def test_timestamp_write_round_trip(spark_tmp_path, parquet_gens, ts_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(writer_confs, {'spark.sql.parquet.outputTimestampType': ts_type})
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=all_confs)
@pytest.mark.parametrize('ts_type', parquet_ts_write_options)
@pytest.mark.parametrize('ts_rebase', ['CORRECTED'])
@ignore_order
def test_write_ts_millis(spark_tmp_path, ts_type, ts_rebase):
gen = TimestampGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: unary_op_df(spark, gen).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase,
'spark.sql.parquet.outputTimestampType': ts_type})
parquet_part_write_gens = [
byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
# Some file systems have issues with UTF8 strings so to help the test pass even there
StringGen('(\\w| ){0,50}'),
boolean_gen, date_gen,
TimestampGen()]
# There are race conditions around when individual files are read in for partitioned data
@ignore_order
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('parquet_gen', parquet_part_write_gens, ids=idfn)
def test_part_write_round_trip(spark_tmp_path, parquet_gen):
gen_list = [('a', RepeatSeqGen(parquet_gen, 10)),
('b', parquet_gen)]
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.partitionBy('a').parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
@pytest.mark.skipif(is_spark_340_or_later() or is_databricks122_or_later(), reason="`WriteFilesExec` is only supported in Spark 340+")
@pytest.mark.parametrize('data_gen', [TimestampGen()], ids=idfn)
@pytest.mark.allow_non_gpu("DataWritingCommandExec")
def test_int96_write_conf(spark_tmp_path, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
confs = copy_and_update(writer_confs, {
'spark.sql.parquet.outputTimestampType': 'INT96',
'spark.rapids.sql.format.parquet.writer.int96.enabled': 'false'})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, data_gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
['DataWritingCommandExec'],
confs)
@pytest.mark.skipif(is_before_spark_340() and not is_databricks122_or_later(), reason="`WriteFilesExec` is only supported in Spark 340+")
@pytest.mark.parametrize('data_gen', [TimestampGen()], ids=idfn)
# Note: From Spark 340, WriteFilesExec is introduced.
@pytest.mark.allow_non_gpu("DataWritingCommandExec", "WriteFilesExec")
def test_int96_write_conf_with_write_exec(spark_tmp_path, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
confs = copy_and_update(writer_confs, {
'spark.sql.parquet.outputTimestampType': 'INT96',
'spark.rapids.sql.format.parquet.writer.int96.enabled': 'false'})
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, data_gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
['DataWritingCommandExec', 'WriteFilesExec'],
confs)
def test_all_null_int96(spark_tmp_path):
class AllNullTimestampGen(TimestampGen):
def start(self, rand):
self._start(rand, lambda : None)
data_path = spark_tmp_path + '/PARQUET_DATA'
confs = copy_and_update(writer_confs, {'spark.sql.parquet.outputTimestampType': 'INT96'})
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path : unary_op_df(spark, AllNullTimestampGen()).coalesce(1).write.parquet(path),
lambda spark, path : spark.read.parquet(path),
data_path,
conf=confs)
parquet_write_compress_options = ['none', 'uncompressed', 'snappy']
# zstd is available in spark 3.2.0 and later.
if not is_before_spark_320():
parquet_write_compress_options.append('zstd')
@pytest.mark.parametrize('compress', parquet_write_compress_options)
def test_compress_write_round_trip(spark_tmp_path, compress):
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = {'spark.sql.parquet.compression.codec': compress}
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path : binary_op_df(spark, long_gen).coalesce(1).write.parquet(path),
lambda spark, path : spark.read.parquet(path),
data_path,
conf=all_confs)
@pytest.mark.order(2)
@pytest.mark.parametrize('parquet_gens', parquet_write_gens_list, ids=idfn)
def test_write_save_table(spark_tmp_path, parquet_gens, spark_tmp_table_factory):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.format("parquet").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
def write_parquet_sql_from(spark, df, data_path, write_to_table):
tmp_view_name = 'tmp_view_{}'.format(random.randint(0, 1000000))
df.createOrReplaceTempView(tmp_view_name)
write_cmd = 'CREATE TABLE `{}` USING PARQUET location \'{}\' AS SELECT * from `{}`'.format(write_to_table, data_path, tmp_view_name)
spark.sql(write_cmd)
@pytest.mark.order(2)
@pytest.mark.parametrize('parquet_gens', parquet_write_gens_list, ids=idfn)
def test_write_sql_save_table(spark_tmp_path, parquet_gens, spark_tmp_table_factory):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: write_parquet_sql_from(spark, gen_df(spark, gen_list).coalesce(1), path, spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
def writeParquetUpgradeCatchException(spark, df, data_path, spark_tmp_table_factory, int96_rebase, datetime_rebase, ts_write):
spark.conf.set('spark.sql.parquet.outputTimestampType', ts_write)
spark.conf.set('spark.sql.legacy.parquet.datetimeRebaseModeInWrite', datetime_rebase)
spark.conf.set('spark.sql.legacy.parquet.int96RebaseModeInWrite', int96_rebase) # for spark 310
with pytest.raises(Exception) as e_info:
df.coalesce(1).write.format("parquet").mode('overwrite').option("path", data_path).saveAsTable(spark_tmp_table_factory.get())
assert e_info.match(r".*SparkUpgradeException.*")
@pytest.mark.parametrize('ts_write_data_gen',
[('INT96', TimestampGen()),
('TIMESTAMP_MICROS', TimestampGen(start=datetime(1, 1, 1, tzinfo=timezone.utc), end=datetime(1899, 12, 31, tzinfo=timezone.utc))),
('TIMESTAMP_MILLIS', TimestampGen(start=datetime(1, 1, 1, tzinfo=timezone.utc), end=datetime(1899, 12, 31, tzinfo=timezone.utc)))])
@pytest.mark.parametrize('rebase', ["CORRECTED","EXCEPTION"])
def test_ts_write_fails_datetime_exception(spark_tmp_path, ts_write_data_gen, spark_tmp_table_factory, rebase):
ts_write, gen = ts_write_data_gen
data_path = spark_tmp_path + '/PARQUET_DATA'
int96_rebase = "EXCEPTION" if (ts_write == "INT96") else rebase
date_time_rebase = "EXCEPTION" if (ts_write == "TIMESTAMP_MICROS" or ts_write == "TIMESTAMP_MILLIS") else rebase
with_gpu_session(
lambda spark : writeParquetUpgradeCatchException(spark,
unary_op_df(spark, gen), data_path,
spark_tmp_table_factory,
int96_rebase, date_time_rebase, ts_write))
with_cpu_session(
lambda spark: writeParquetUpgradeCatchException(spark,
unary_op_df(spark, gen), data_path,
spark_tmp_table_factory,
int96_rebase, date_time_rebase, ts_write))
def writeParquetNoOverwriteCatchException(spark, df, data_path, table_name):
with pytest.raises(Exception) as e_info:
df.coalesce(1).write.format("parquet").option("path", data_path).saveAsTable(table_name)
assert e_info.match(r".*already exists.*")
def test_ts_write_twice_fails_exception(spark_tmp_path, spark_tmp_table_factory):
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
table_name = spark_tmp_table_factory.get()
with_gpu_session(
lambda spark : unary_op_df(spark, gen).coalesce(1).write.format("parquet").mode('overwrite').option("path", data_path).saveAsTable(table_name))
with_gpu_session(
lambda spark : writeParquetNoOverwriteCatchException(spark, unary_op_df(spark, gen), data_path, table_name))
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize('ts_write', parquet_ts_write_options)
@pytest.mark.parametrize('ts_rebase', ['LEGACY'])
def test_parquet_write_legacy_fallback(spark_tmp_path, ts_write, ts_rebase, spark_tmp_table_factory):
gen = TimestampGen(start=datetime(1590, 1, 1, tzinfo=timezone.utc))
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase,
'spark.sql.parquet.outputTimestampType': ts_write}
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("parquet").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec',
conf=all_confs)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize('write_options', [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
def test_parquet_write_encryption_option_fallback(spark_tmp_path, spark_tmp_table_factory, write_options):
def write_func(spark, path):
writer = unary_op_df(spark, gen).coalesce(1).write
for key in write_options:
writer.option(key , write_options[key])
writer.format("parquet").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get())
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_fallback_write(
write_func,
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec')
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize("write_options", [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
def test_parquet_write_encryption_runtimeconfig_fallback(spark_tmp_path, write_options):
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
"DataWritingCommandExec",
conf=write_options)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize("write_options", [{"parquet.encryption.footer.key": "k1"},
{"parquet.encryption.column.keys": "k2:a"},
{"parquet.encryption.footer.key": "k1", "parquet.encryption.column.keys": "k2:a"}])
def test_parquet_write_encryption_hadoopconfig_fallback(spark_tmp_path, write_options):
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
def setup_hadoop_confs(spark):
for k, v in write_options.items():
spark.sparkContext._jsc.hadoopConfiguration().set(k, v)
def reset_hadoop_confs(spark):
for k in write_options.keys():
spark.sparkContext._jsc.hadoopConfiguration().unset(k)
try:
with_cpu_session(setup_hadoop_confs)
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
"DataWritingCommandExec")
finally:
with_cpu_session(reset_hadoop_confs)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
# note that others should fail as well but requires you to load the libraries for them
# 'lzo', 'brotli', 'lz4', 'zstd' should all fallback
@pytest.mark.parametrize('codec', ['gzip'])
def test_parquet_write_compression_fallback(spark_tmp_path, codec, spark_tmp_table_factory):
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs={'spark.sql.parquet.compression.codec': codec}
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("parquet").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec',
conf=all_confs)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_parquet_writeLegacyFormat_fallback(spark_tmp_path, spark_tmp_table_factory):
gen = IntegerGen()
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs={'spark.sql.parquet.writeLegacyFormat': 'true'}
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("parquet").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec',
conf=all_confs)
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_buckets_write_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_fallback_write(
lambda spark, path: spark.range(10e4).write.bucketBy(4, "id").sortBy("id").format('parquet').mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec')
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_parquet_write_bloom_filter_with_options_cpu_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_fallback_write(
lambda spark, path: spark.range(10e4).write.mode('overwrite')
.option("parquet.bloom.filter.enabled#id", "true")
.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec')
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_parquet_write_bloom_filter_sql_cpu_fallback(spark_tmp_path, spark_tmp_table_factory):
data_path = spark_tmp_path + '/PARQUET_DATA'
base_table_name = spark_tmp_table_factory.get()
def sql_write(spark, path):
is_gpu = path.endswith('GPU')
table_name = base_table_name + '_GPU' if is_gpu else base_table_name + '_CPU'
spark.sql('CREATE TABLE `{}` STORED AS PARQUET location \'{}\' '
'TBLPROPERTIES("parquet.bloom.filter.enabled#id"="true") '
'AS SELECT id from range(100)'.format(table_name, path))
assert_gpu_fallback_write(
sql_write,
lambda spark, path: spark.read.parquet(path),
data_path,
'DataWritingCommandExec')
# This test is testing how the parquet_writer will behave if column has a validity mask without having any nulls.
# There is no straight forward to do it besides creating a vector with nulls and then dropping nulls
# cudf will create a vector with a null_mask even though we have just filtered them
def test_write_map_nullable(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
def generate_map_with_empty_validity(spark, path):
gen_data = StructGen([['number', IntegerGen()], ['word', LongGen()]], nullable=False)
gen_df(spark, gen_data)
df = gen_df(spark, gen_data)
df_noNulls = df.filter("number is not null")
df_map = df_noNulls.withColumn("map", f.create_map(["number", "word"])).drop("number").drop("word")
df_map.coalesce(1).write.parquet(path)
assert_gpu_and_cpu_writes_are_equal_collect(
generate_map_with_empty_validity,
lambda spark, path: spark.read.parquet(path),
data_path)
@pytest.mark.parametrize('ts_write_data_gen', [('INT96', TimestampGen()),
('TIMESTAMP_MICROS', TimestampGen(start=datetime(1, 1, 1, tzinfo=timezone.utc), end=datetime(1582, 1, 1, tzinfo=timezone.utc))),
('TIMESTAMP_MILLIS', TimestampGen(start=datetime(1, 1, 1, tzinfo=timezone.utc), end=datetime(1582, 1, 1, tzinfo=timezone.utc)))])
@pytest.mark.parametrize('date_time_rebase_write', ["CORRECTED"])
@pytest.mark.parametrize('date_time_rebase_read', ["EXCEPTION", "CORRECTED"])
@pytest.mark.parametrize('int96_rebase_write', ["CORRECTED"])
@pytest.mark.parametrize('int96_rebase_read', ["EXCEPTION", "CORRECTED"])
def test_timestamp_roundtrip_no_legacy_rebase(spark_tmp_path, ts_write_data_gen,
date_time_rebase_read, date_time_rebase_write,
int96_rebase_read, int96_rebase_write):
ts_write, gen = ts_write_data_gen
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = {'spark.sql.parquet.outputTimestampType': ts_write}
all_confs.update({'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': date_time_rebase_write,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': int96_rebase_write})
all_confs.update({'spark.sql.legacy.parquet.datetimeRebaseModeInRead': date_time_rebase_read,
'spark.sql.legacy.parquet.int96RebaseModeInRead': int96_rebase_read})
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=all_confs)
# This should be merged to `test_timestamp_roundtrip_no_legacy_rebase` above when
# we have rebase for int96 supported.
@pytest.mark.parametrize('ts_write', ['TIMESTAMP_MICROS', 'TIMESTAMP_MILLIS'])
@pytest.mark.parametrize('data_gen', parquet_nested_datetime_gen, ids=idfn)
def test_datetime_roundtrip_with_legacy_rebase(spark_tmp_path, ts_write, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = {'spark.sql.parquet.outputTimestampType': ts_write,
'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'LEGACY',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED',
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInWrite' : 'CORRECTED',
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED'}
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: unary_op_df(spark, data_gen).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=all_confs)
test_non_empty_ctas_non_gpu_execs = ["DataWritingCommandExec", "InsertIntoHiveTable", "WriteFilesExec"] if is_spark_340_or_later() or is_databricks122_or_later() else ["DataWritingCommandExec", "HiveTableScanExec"]
@pytest.mark.allow_non_gpu(*test_non_empty_ctas_non_gpu_execs)
@pytest.mark.parametrize('allow_non_empty', [True, False])
def test_non_empty_ctas(spark_tmp_path, spark_tmp_table_factory, allow_non_empty):
data_path = spark_tmp_path + "/CTAS"
conf = {
"spark.sql.hive.convertCTAS": "true",
"spark.sql.legacy.allowNonEmptyLocationInCTAS": str(allow_non_empty)
}
def test_it(spark):
src_name = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE {}(id string) LOCATION '{}/src1'".format(src_name, data_path))
spark.sql("INSERT INTO TABLE {} SELECT 'A'".format(src_name))
ctas1_name = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE {}(id string) LOCATION '{}/ctas/ctas1'".format(ctas1_name, data_path))
spark.sql("INSERT INTO TABLE {} SELECT 'A'".format(ctas1_name))
try:
ctas_with_existing_name = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE {} LOCATION '{}/ctas' AS SELECT * FROM {}".format(
ctas_with_existing_name, data_path, src_name))
except pyspark.sql.utils.AnalysisException as e:
if allow_non_empty or e.desc.find('non-empty directory') == -1:
raise e
with_gpu_session(test_it, conf)
@pytest.mark.parametrize('parquet_gens', parquet_write_gens_list, ids=idfn)
def test_write_empty_parquet_round_trip(spark_tmp_path, parquet_gens):
def create_empty_df(spark, path):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
return gen_df(spark, gen_list, length=0).write.parquet(path)
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
create_empty_df,
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
def get_nested_parquet_meta_data_for_field_id():
schema = StructType([
StructField("c1", IntegerType(), metadata={'parquet.field.id': -1}),
StructField("c2", StructType(
[StructField("c3", IntegerType(), metadata={'parquet.field.id': -3})]),
metadata={'parquet.field.id': -2})
])
data = [(1, (2,)), (11, (22,)), (33, (33,)), ]
return schema, data
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
def test_parquet_write_field_id(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
schema, data = get_nested_parquet_meta_data_for_field_id()
with_gpu_session(
# default write Parquet IDs
lambda spark: spark.createDataFrame(data, schema).coalesce(1).write.mode("overwrite")
.parquet(data_path), conf=enable_parquet_field_id_write)
# check data, for schema check refer to Scala test case `ParquetFieldIdSuite`
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: spark.createDataFrame(data, schema).coalesce(1).write
.mode("overwrite").parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=enable_parquet_field_id_read)
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
def test_parquet_write_field_id_disabled(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
schema, data = get_nested_parquet_meta_data_for_field_id()
with_gpu_session(
lambda spark: spark.createDataFrame(data, schema).coalesce(1).write.mode("overwrite")
.parquet(data_path),
conf=disable_parquet_field_id_write) # disable write Parquet IDs
# check data, for schema check refer to Scala test case `ParquetFieldIdSuite`
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: spark.createDataFrame(data, schema).coalesce(1).write
.mode("overwrite").parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=enable_parquet_field_id_read)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_write_daytime_interval(spark_tmp_path):
gen_list = [('_c1', DayTimeIntervalGen())]
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gen_list).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf=writer_confs)
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="is only supported in Spark 320+")
def test_concurrent_writer(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: get_25_partitions_df(spark) # df has 25 partitions for (c1, c2)
.repartition(2)
.write.mode("overwrite").partitionBy('c1', 'c2').parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
copy_and_update(
# 26 > 25, will not fall back to single writer
{"spark.sql.maxConcurrentOutputFileWriters": 26}
))
@ignore_order
@pytest.mark.skipif(is_before_spark_320(), reason="is only supported in Spark 320+")
@allow_non_gpu(any=True)
@pytest.mark.parametrize('aqe_enabled', [True, False])
def test_fallback_to_single_writer_from_concurrent_writer(spark_tmp_path, aqe_enabled):
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: get_25_partitions_df(spark) # df has 25 partitions for (c1, c2)
.repartition(2)
.write.mode("overwrite").partitionBy('c1', 'c2').parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
copy_and_update(
# 10 < 25, will fall back to single writer
{"spark.sql.maxConcurrentOutputFileWriters": 10},
{"spark.rapids.sql.concurrentWriterPartitionFlushSize": 64 * 1024 * 1024},
{"spark.sql.adaptive.enabled": aqe_enabled},
))
@pytest.mark.skipif(True, reason="currently not support write emtpy data: https://github.com/NVIDIA/spark-rapids/issues/6453")
def test_write_empty_data_concurrent_writer(spark_tmp_path):
schema = StructType(
[StructField("c1", StringType()), StructField("c2", IntegerType()), StructField("c3", IntegerType())])
data = [] # empty data
data_path = spark_tmp_path + '/PARQUET_DATA'
with_gpu_session(lambda spark: spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
.write.mode("overwrite").partitionBy('c1', 'c2').parquet(data_path),
# concurrent writer
{"spark.sql.maxConcurrentOutputFileWriters": 10})
with_cpu_session(lambda spark: spark.read.parquet(data_path).collect())
@pytest.mark.skipif(True, reason="currently not support write emtpy data: https://github.com/NVIDIA/spark-rapids/issues/6453")
def test_write_empty_data_single_writer(spark_tmp_path):
schema = StructType(
[StructField("c1", StringType()), StructField("c2", IntegerType()), StructField("c3", IntegerType())])
data = [] # empty data
data_path = spark_tmp_path + '/PARQUET_DATA'
with_gpu_session(lambda spark: spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
.write.mode("overwrite").partitionBy('c1', 'c2').parquet(data_path))
with_cpu_session(lambda spark: spark.read.parquet(data_path).collect())
PartitionWriteMode = Enum('PartitionWriteMode', ['Static', 'Dynamic'])
@pytest.mark.skipif(is_databricks_runtime() or is_spark_cdh(),
reason="On Databricks and CDH, Hive partitioned SQL writes are routed through InsertIntoHiveTable; "
"GpuInsertIntoHiveTable does not support Parquet writes.")
@ignore_order(local=True)
@pytest.mark.parametrize('mode', [PartitionWriteMode.Static, PartitionWriteMode.Dynamic])
def test_partitioned_sql_parquet_write(mode, spark_tmp_table_factory):
def create_input_table(spark):
tmp_input = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE " + tmp_input +
" (make STRING, model STRING, year INT, type STRING, comment STRING)" +
" STORED AS PARQUET")
spark.sql("INSERT INTO TABLE " + tmp_input + " VALUES " +
"('Ford', 'F-150', 2020, 'ICE', 'Popular' ),"
"('GMC', 'Sierra 1500', 1997, 'ICE', 'Older'),"
"('Chevy', 'D-Max', 2015, 'ICE', 'Isuzu?' ),"
"('Tesla', 'CyberTruck', 2025, 'Electric', 'BladeRunner'),"
"('Rivian', 'R1T', 2022, 'Electric', 'Heavy'),"
"('Jeep', 'Gladiator', 2024, 'Hybrid', 'Upcoming')")
return tmp_input
input_table_name = with_cpu_session(create_input_table)
def write_partitions(spark, table_name):
if mode == PartitionWriteMode.Static:
return [
"CREATE TABLE {} (make STRING, model STRING, year INT, comment STRING) "
"PARTITIONED BY (type STRING) STORED AS PARQUET ".format(table_name),
"INSERT INTO TABLE {} PARTITION (type='ICE') "
"SELECT make, model, year, comment FROM {} "
"WHERE type = 'ICE'".format(table_name, input_table_name),
"INSERT OVERWRITE TABLE {} PARTITION (type='Electric') "
"SELECT make, model, year, comment FROM {} "
"WHERE type = 'ICE'".format(table_name, input_table_name),
"INSERT OVERWRITE TABLE {} PARTITION (type='Hybrid') "
"SELECT make, model, year, comment FROM {} "
"WHERE type = 'ICE'".format(table_name, input_table_name)
]
elif mode == PartitionWriteMode.Dynamic:
return [
"CREATE TABLE {} (make STRING, model STRING, year INT, comment STRING) "
"PARTITIONED BY (type STRING) STORED AS PARQUET ".format(table_name),
"INSERT OVERWRITE TABLE {} "
"SELECT * FROM {} ".format(table_name, input_table_name)
]
else:
raise Exception("Unsupported PartitionWriteMode {}".format(mode))
assert_gpu_and_cpu_sql_writes_are_equal_collect(
spark_tmp_table_factory, write_partitions,
conf={"hive.exec.dynamic.partition.mode": "nonstrict"}
)
@ignore_order(local=True)
def test_dynamic_partitioned_parquet_write(spark_tmp_table_factory, spark_tmp_path):
def create_input_table(spark):
tmp_input = spark_tmp_table_factory.get()
spark.sql("CREATE TABLE " + tmp_input +
" (make STRING, model STRING, year INT, type STRING, comment STRING)" +
" STORED AS PARQUET")
spark.sql("INSERT INTO TABLE " + tmp_input + " VALUES " +
"('Ford', 'F-150', 2020, 'ICE', 'Popular' ),"
"('GMC', 'Sierra 1500', 1997, 'ICE', 'Older'),"
"('Chevy', 'D-Max', 2015, 'ICE', 'Isuzu?' ),"
"('Tesla', 'CyberTruck', 2025, 'Electric', 'BladeRunner'),"
"('Rivian', 'R1T', 2022, 'Electric', 'Heavy'),"
"('Jeep', 'Gladiator', 2024, 'Hybrid', 'Upcoming')")
return tmp_input
input_table_name = with_cpu_session(create_input_table)
base_output_path = spark_tmp_path + "/PARQUET_DYN_WRITE"
def write_partitions(spark, table_path):
input_df = spark.sql("SELECT * FROM {}".format(input_table_name))
input_df.write.mode("overwrite").partitionBy("type").parquet(table_path)
# Second write triggers the actual overwrite.
input_df.write.mode("overwrite").partitionBy("type").parquet(table_path)
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: write_partitions(spark, path),
lambda spark, path: spark.read.parquet(path),
base_output_path,
conf={}
)
def hive_timestamp_value(spark_tmp_table_factory, spark_tmp_path, ts_rebase, func):
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase}
def create_table(spark, path):
tmp_table = spark_tmp_table_factory.get()
spark.sql(f"CREATE TABLE {tmp_table} STORED AS PARQUET " +
f""" LOCATION '{path}' AS SELECT CAST('2015-01-01 00:00:00' AS TIMESTAMP) as t; """)
def read_table(spark, path):
return spark.read.parquet(path)
data_path = spark_tmp_path + '/PARQUET_DATA'
func(create_table, read_table, data_path, conf)
# Test to avoid regression on a known bug in Spark. For details please visit https://github.com/NVIDIA/spark-rapids/issues/8693
def test_hive_timestamp_value(spark_tmp_table_factory, spark_tmp_path):
def func_test(create_table, read_table, data_path, conf):
assert_gpu_and_cpu_writes_are_equal_collect(create_table, read_table, data_path, conf=conf)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.read.parquet(data_path + '/CPU'))
hive_timestamp_value(spark_tmp_table_factory, spark_tmp_path, 'CORRECTED', func_test)
# Test to avoid regression on a known bug in Spark. For details please visit https://github.com/NVIDIA/spark-rapids/issues/8693
@allow_non_gpu('DataWritingCommandExec', 'WriteFilesExec')
def test_hive_timestamp_value_fallback(spark_tmp_table_factory, spark_tmp_path):
def func_test(create_table, read_table, data_path, conf):
assert_gpu_fallback_write(
create_table,
read_table,
data_path,
['DataWritingCommandExec'],
conf)
hive_timestamp_value(spark_tmp_table_factory, spark_tmp_path, 'LEGACY', func_test)
@ignore_order
@pytest.mark.skipif(is_before_spark_340(), reason="`spark.sql.optimizer.plannedWrite.enabled` is only supported in Spark 340+")
# empty string will not set the `planned_write_enabled` option
@pytest.mark.parametrize('planned_write_enabled', ["", "true", "false"])
# df to be written has 25 partitions
# 0 will not set the concurrent writers option
# 100 > 25 will always use concurrent writer without fallback
# 20 <25 will fall back to single writer from concurrent writer
@pytest.mark.parametrize('max_concurrent_writers', [0, 100, 20])
def test_write_with_planned_write_enabled(spark_tmp_path, planned_write_enabled, max_concurrent_writers):
data_path = spark_tmp_path + '/PARQUET_DATA'
conf = {}
if planned_write_enabled != "":
conf = copy_and_update(conf, {"spark.sql.optimizer.plannedWrite.enabled": planned_write_enabled})
if max_concurrent_writers != 0:
conf = copy_and_update(conf, {"spark.sql.maxConcurrentOutputFileWriters": max_concurrent_writers})
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: get_25_partitions_df(spark) # df has 25 partitions for (c1, c2)
.repartition(2)
.write.mode("overwrite").partitionBy('c1', 'c2').parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path,
conf)
# Issue to test a known bug https://github.com/NVIDIA/spark-rapids/issues/8694 to avoid regression
@ignore_order
@allow_non_gpu("SortExec", "ShuffleExchangeExec")
def test_write_list_struct_single_element(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
data_gen = ArrayGen(StructGen([('element', long_gen)], nullable=False), max_length=10, nullable=False)
conf = {}
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, data_gen).write.parquet(path),
lambda spark, path: spark.read.parquet(path), data_path, conf)
cpu_path = data_path + '/CPU'
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.read.parquet(cpu_path), conf)
@ignore_order
def test_parquet_write_column_name_with_dots(spark_tmp_path):
data_path = spark_tmp_path + "/PARQUET_DATA"
gens = [
("a.b", StructGen([
("c.d.e", StructGen([
("f.g", int_gen),
("h", string_gen)])),
("i.j", long_gen)])),
("k", boolean_gen)]
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_df(spark, gens).coalesce(1).write.parquet(path),
lambda spark, path: spark.read.parquet(path),
data_path)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/parquet_write_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql, \
assert_gpu_sql_fallback_collect, assert_gpu_fallback_collect, assert_gpu_and_cpu_error, \
assert_cpu_and_gpu_are_equal_collect_with_capture
from conftest import is_databricks_runtime
from data_gen import *
from marks import *
from pyspark.sql.types import *
import pyspark.sql.utils
import pyspark.sql.functions as f
from spark_session import with_cpu_session, with_gpu_session, is_databricks104_or_later, is_before_spark_320
_regexp_conf = { 'spark.rapids.sql.regexp.enabled': 'true' }
def mk_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
def test_split_no_limit():
data_gen = mk_str_gen('([ABC]{0,3}_?){0,7}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "AB")',
'split(a, "C")',
'split(a, "_")'),
conf=_regexp_conf)
def test_split_negative_limit():
data_gen = mk_str_gen('([ABC]{0,3}_?){0,7}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "AB", -1)',
'split(a, "C", -2)',
'split(a, "_", -999)'),
conf=_regexp_conf)
def test_split_zero_limit():
data_gen = mk_str_gen('([ABC]{0,3}_?){0,7}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "AB", 0)',
'split(a, "C", 0)',
'split(a, "_", 0)'),
conf=_regexp_conf)
def test_split_one_limit():
data_gen = mk_str_gen('([ABC]{0,3}_?){1,7}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "AB", 1)',
'split(a, "C", 1)',
'split(a, "_", 1)'),
conf=_regexp_conf)
def test_split_positive_limit():
data_gen = mk_str_gen('([ABC]{0,3}_?){0,7}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "AB", 2)',
'split(a, "C", 3)',
'split(a, "_", 999)'))
@pytest.mark.parametrize('data_gen,delim', [(mk_str_gen('([ABC]{0,3}_?){0,7}'), '_'),
(mk_str_gen('([MNP_]{0,3}\\.?){0,5}'), '.'),
(mk_str_gen('([123]{0,3}\\^?){0,5}'), '^')], ids=idfn)
def test_substring_index(data_gen,delim):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
f.substring_index(f.lit('123'), delim, 1),
f.substring_index(f.col('a'), delim, 1),
f.substring_index(f.col('a'), delim, 3),
f.substring_index(f.col('a'), delim, 0),
f.substring_index(f.col('a'), delim, -1),
f.substring_index(f.col('a'), delim, -4)))
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', [mk_str_gen('([ABC]{0,3}_?){0,7}')], ids=idfn)
def test_unsupported_fallback_substring_index(data_gen):
delim_gen = StringGen(pattern="_")
num_gen = IntegerGen(min_val=0, max_val=10, special_cases=[])
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [("a", data_gen),
("delim", delim_gen),
("num", num_gen)], length=10).selectExpr(sql_text),
"SubstringIndex")
assert_gpu_did_fallback("SUBSTRING_INDEX(a, '_', num)")
assert_gpu_did_fallback("SUBSTRING_INDEX(a, delim, 0)")
assert_gpu_did_fallback("SUBSTRING_INDEX(a, delim, num)")
assert_gpu_did_fallback("SUBSTRING_INDEX('a_b', '_', num)")
assert_gpu_did_fallback("SUBSTRING_INDEX('a_b', delim, 0)")
assert_gpu_did_fallback("SUBSTRING_INDEX('a_b', delim, num)")
# ONLY LITERAL WIDTH AND PAD ARE SUPPORTED
def test_lpad():
gen = mk_str_gen('.{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'LPAD("literal", 2, " ")',
'LPAD(a, 2, " ")',
'LPAD(a, NULL, " ")',
'LPAD(a, 5, NULL)',
'LPAD(a, 5, "G")',
'LPAD(a, -1, "G")'))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_lpad():
gen = mk_str_gen('.{0,5}')
pad_gen = StringGen(pattern="G")
num_gen = IntegerGen(min_val=0, max_val=10, special_cases=[])
def assert_gpu_did_fallback(sql_string):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [("a", gen),
("len", num_gen),
("pad", pad_gen)], length=10).selectExpr(sql_string),
"StringLPad")
assert_gpu_did_fallback('LPAD(a, 2, pad)')
assert_gpu_did_fallback('LPAD(a, len, " ")')
assert_gpu_did_fallback('LPAD(a, len, pad)')
assert_gpu_did_fallback('LPAD("foo", 2, pad)')
assert_gpu_did_fallback('LPAD("foo", len, " ")')
assert_gpu_did_fallback('LPAD("foo", len, pad)')
# ONLY LITERAL WIDTH AND PAD ARE SUPPORTED
def test_rpad():
gen = mk_str_gen('.{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'RPAD(a, 2, " ")',
'RPAD(a, NULL, " ")',
'RPAD(a, 5, NULL)',
'RPAD(a, 5, "G")',
'RPAD(a, -1, "G")'))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_rpad():
gen = mk_str_gen('.{0,5}')
pad_gen = StringGen(pattern="G")
num_gen = IntegerGen(min_val=0, max_val=10, special_cases=[])
def assert_gpu_did_fallback(sql_string):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [("a", gen),
("len", num_gen),
("pad", pad_gen)], length=10).selectExpr(sql_string),
"StringRPad")
assert_gpu_did_fallback('RPAD(a, 2, pad)')
assert_gpu_did_fallback('RPAD(a, len, " ")')
assert_gpu_did_fallback('RPAD(a, len, pad)')
assert_gpu_did_fallback('RPAD("foo", 2, pad)')
assert_gpu_did_fallback('RPAD("foo", len, " ")')
assert_gpu_did_fallback('RPAD("foo", len, pad)')
# ONLY LITERAL SEARCH PARAMS ARE SUPPORTED
def test_position():
gen = mk_str_gen('.{0,3}Z_Z.{0,3}A.{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'POSITION(NULL IN a)',
'POSITION("Z_" IN a)',
'POSITION("" IN a)',
'POSITION("_" IN a)',
'POSITION("A" IN a)'))
def test_locate():
gen = mk_str_gen('.{0,3}Z_Z.{0,3}A.{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'locate("Z", a, -1)',
'locate("Z", a, 4)',
'locate("abc", "1abcd", 0)',
'locate("abc", "1abcd", 10)',
'locate("A", a, 500)',
'locate("_", a, NULL)'))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_locate():
gen = mk_str_gen('.{0,3}Z_Z.{0,3}A.{0,3}')
pos_gen = IntegerGen()
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [("a", gen), ("pos", pos_gen)], length=10).selectExpr(sql_text),
'StringLocate')
assert_gpu_did_fallback('locate(a, a, -1)')
assert_gpu_did_fallback('locate("a", a, pos)')
assert_gpu_did_fallback('locate(a, a, pos)')
assert_gpu_did_fallback('locate(a, "a", pos)')
def test_instr():
gen = mk_str_gen('.{0,3}Z_Z.{0,3}A.{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'instr("A", "A")',
'instr("a", "A")',
'instr(a, "Z")',
'instr(a, "A")',
'instr(a, "_")',
'instr(a, NULL)',
'instr(NULL, "A")',
'instr(NULL, NULL)'))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_instr():
gen = mk_str_gen('.{0,3}Z_Z.{0,3}A.{0,3}')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).selectExpr(sql_text),
'StringInstr')
assert_gpu_did_fallback('instr(a, a)')
assert_gpu_did_fallback('instr("a", a)')
def test_contains():
gen = mk_str_gen('.{0,3}Z?_Z?.{0,3}A?.{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.lit('Z').contains('Z'),
f.lit('foo').contains('Z_'),
f.col('a').contains('Z'),
f.col('a').contains('Z_'),
f.col('a').contains(''),
f.col('a').contains(None)))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_contains():
gen = StringGen(pattern='[a-z]')
def assert_gpu_did_fallback(op):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).select(op),
'Contains')
assert_gpu_did_fallback(f.lit('Z').contains(f.col('a')))
assert_gpu_did_fallback(f.col('a').contains(f.col('a')))
@pytest.mark.parametrize('data_gen', [mk_str_gen('[Ab \ud720]{0,3}A.{0,3}Z[ Ab]{0,3}'), StringGen('')])
def test_trim(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'TRIM(a)',
'TRIM("Ab" FROM a)',
'TRIM("A\ud720" FROM a)',
'TRIM(BOTH NULL FROM a)',
'TRIM("" FROM a)'))
@pytest.mark.parametrize('data_gen', [mk_str_gen('[Ab \ud720]{0,3}A.{0,3}Z[ Ab]{0,3}'), StringGen('')])
def test_ltrim(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'LTRIM(a)',
'LTRIM("Ab", a)',
'TRIM(LEADING "A\ud720" FROM a)',
'TRIM(LEADING NULL FROM a)',
'TRIM(LEADING "" FROM a)'))
@pytest.mark.parametrize('data_gen', [mk_str_gen('[Ab \ud720]{0,3}A.{0,3}Z[ Ab]{0,3}'), StringGen('')])
def test_rtrim(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'RTRIM(a)',
'RTRIM("Ab", a)',
'TRIM(TRAILING "A\ud720" FROM a)',
'TRIM(TRAILING NULL FROM a)',
'TRIM(TRAILING "" FROM a)'))
def test_startswith():
gen = mk_str_gen('[Ab\ud720]{3}A.{0,3}Z[Ab\ud720]{3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.lit('foo').startswith('f'),
f.lit('bar').startswith('1'),
f.col('a').startswith('A'),
f.col('a').startswith(''),
f.col('a').startswith(None),
f.col('a').startswith('A\ud720')))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_startswith():
gen = StringGen(pattern='[a-z]')
def assert_gpu_did_fallback(op):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).select(op),
'StartsWith')
assert_gpu_did_fallback(f.lit("TEST").startswith(f.col("a")))
assert_gpu_did_fallback(f.col("a").startswith(f.col("a")))
def test_endswith():
gen = mk_str_gen('[Ab\ud720]{3}A.{0,3}Z[Ab\ud720]{3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.lit('foo').startswith('f'),
f.lit('bar').startswith('1'),
f.col('a').endswith('A'),
f.col('a').endswith(''),
f.col('a').endswith(None),
f.col('a').endswith('A\ud720')))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_endswith():
gen = StringGen(pattern='[a-z]')
def assert_gpu_did_fallback(op):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).select(op),
'EndsWith')
assert_gpu_did_fallback(f.lit("TEST").endswith(f.col("a")))
assert_gpu_did_fallback(f.col("a").endswith(f.col("a")))
def test_concat_ws_basic():
gen = StringGen(nullable=True)
(s1, s2) = gen_scalars(gen, 2, force_no_nulls=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, gen).select(
f.concat_ws("-"),
f.concat_ws("-", f.col('a')),
f.concat_ws(None, f.col('a')),
f.concat_ws("-", f.col('a'), f.col('b')),
f.concat_ws("-", f.col('a'), f.lit('')),
f.concat_ws("*", f.col('a'), f.col('b'), f.col('a')),
f.concat_ws("*", s1, f.col('b')),
f.concat_ws("+", f.col('a'), s2),
f.concat_ws("-", f.lit(None), f.lit(None)),
f.concat_ws("-", f.lit(None).cast('string'), f.col('b')),
f.concat_ws("+", f.col('a'), f.lit(None).cast('string')),
f.concat_ws(None, f.col('a'), f.col('b')),
f.concat_ws("+", f.col('a'), f.lit(''))))
def test_concat_ws_arrays():
gen = ArrayGen(StringGen(nullable=True), nullable=True)
(s1, s2) = gen_scalars(gen, 2, force_no_nulls=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, gen).select(
f.concat_ws("*", f.array(f.lit('2'), f.lit(''), f.lit('3'), f.lit('Z'))),
f.concat_ws("*", s1, s2),
f.concat_ws("-", f.array()),
f.concat_ws("-", f.array(), f.lit('u')),
f.concat_ws(None, f.lit('z'), s1, f.lit('b'), s2, f.array()),
f.concat_ws("+", f.lit('z'), s1, f.lit('b'), s2, f.array()),
f.concat_ws("*", f.col('b'), f.lit('z')),
f.concat_ws("*", f.lit('z'), s1, f.lit('b'), s2, f.array(), f.col('b')),
f.concat_ws("-", f.array(f.lit(None))),
f.concat_ws("-", f.array(f.lit('')))))
def test_concat_ws_nulls_arrays():
gen = ArrayGen(StringGen(nullable=True), nullable=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, gen).select(
f.concat_ws("*", f.lit('z'), f.array(f.lit('2'), f.lit(None), f.lit('Z'))),
f.concat_ws("*", f.array(f.lit(None), f.lit(None))),
f.concat_ws("*", f.array(f.lit(None), f.lit(None)), f.col('b'), f.lit('a'))))
def test_concat_ws_sql_basic():
gen = StringGen(nullable=True)
assert_gpu_and_cpu_are_equal_sql(
lambda spark: binary_op_df(spark, gen),
'concat_ws_table',
'select ' +
'concat_ws("-"), ' +
'concat_ws("-", a), ' +
'concat_ws(null, a), ' +
'concat_ws("-", a, b), ' +
'concat_ws("-", null, null), ' +
'concat_ws("+", \'aaa\', \'bbb\', \'zzz\'), ' +
'concat_ws(null, b, \'aaa\', \'bbb\', \'zzz\'), ' +
'concat_ws("=", b, \'\', \'bbb\', \'zzz\'), ' +
'concat_ws("*", b, a, cast(null as string)) from concat_ws_table')
def test_concat_ws_sql_col_sep():
gen = StringGen(nullable=True)
sep = StringGen('[-,*,+,!]', nullable=True)
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, gen, gen, sep),
'concat_ws_table',
'select ' +
'concat_ws(c, a), ' +
'concat_ws(c, a, b), ' +
'concat_ws(c, null, null), ' +
'concat_ws(c, \'aaa\', \'bbb\', \'zzz\'), ' +
'concat_ws(c, b, \'\', \'bbb\', \'zzz\'), ' +
'concat_ws(c, b, a, cast(null as string)) from concat_ws_table')
@pytest.mark.skipif(is_databricks_runtime(),
reason='Databricks optimizes out concat_ws call in this case')
@allow_non_gpu('ProjectExec', 'Alias', 'ConcatWs')
def test_concat_ws_sql_col_sep_only_sep_specified():
gen = StringGen(nullable=True)
sep = StringGen('[-,*,+,!]', nullable=True)
assert_gpu_sql_fallback_collect(
lambda spark: three_col_df(spark, gen, gen, sep),
'ConcatWs',
'concat_ws_table',
'select ' +
'concat_ws(c) from concat_ws_table')
def test_concat_ws_sql_arrays():
gen = ArrayGen(StringGen(nullable=True), nullable=True)
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, gen, gen, StringGen(nullable=True)),
'concat_ws_table',
'select ' +
'concat_ws("-", array()), ' +
'concat_ws(null, c, c, array(c)), ' +
'concat_ws("-", array(), c), ' +
'concat_ws("-", a, b), ' +
'concat_ws("-", a, array(null, c), b, array()), ' +
'concat_ws("-", array(null, null)), ' +
'concat_ws("-", a, array(null), b, array()), ' +
'concat_ws("*", array(\'2\', \'\', \'3\', \'Z\', c)) from concat_ws_table')
def test_concat_ws_sql_arrays_col_sep():
gen = ArrayGen(StringGen(nullable=True), nullable=True)
sep = StringGen('[-,*,+,!]', nullable=True)
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, gen, StringGen(nullable=True), sep),
'concat_ws_table',
'select ' +
'concat_ws(c, array()) as emptyCon, ' +
'concat_ws(c, b, b, array(b)), ' +
'concat_ws(c, a, array(null, c), b, array()), ' +
'concat_ws(c, array(null, null)), ' +
'concat_ws(c, a, array(null), b, array()), ' +
'concat_ws(c, array(\'2\', \'\', \'3\', \'Z\', b)) from concat_ws_table')
def test_concat_ws_sql_arrays_all_null_col_sep():
gen = ArrayGen(StringGen(nullable=True), nullable=True)
sep = NullGen()
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, gen, StringGen(nullable=True), sep),
'concat_ws_table',
'select ' +
'concat_ws(c, array(null, null)), ' +
'concat_ws(c, a, array(null), b, array()), ' +
'concat_ws(c, b, b, array(b)) from concat_ws_table')
def test_substring():
gen = mk_str_gen('.{0,30}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'SUBSTRING(a, 1, 5)',
'SUBSTRING(a, 5, 2147483647)',
'SUBSTRING(a, 5, -2147483648)',
'SUBSTRING(a, 1)',
'SUBSTRING(a, -3)',
'SUBSTRING(a, 3, -2)',
'SUBSTRING(a, 100)',
'SUBSTRING(a, -100)',
'SUBSTRING(a, NULL)',
'SUBSTRING(a, 1, NULL)',
'SUBSTRING(a, -5, 0)',
'SUBSTRING(a, -5, 4)',
'SUBSTRING(a, 10, 0)',
'SUBSTRING(a, -50, 10)',
'SUBSTRING(a, -10, -1)',
'SUBSTRING(a, 0, 10)',
'SUBSTRING(a, 0, 0)'))
def test_substring_column():
str_gen = mk_str_gen('.{0,30}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, str_gen, int_gen, int_gen).selectExpr(
'SUBSTRING(a, b, c)',
'SUBSTRING(a, b, 0)',
'SUBSTRING(a, b, 5)',
'SUBSTRING(a, b, -5)',
'SUBSTRING(a, b, 100)',
'SUBSTRING(a, b, -100)',
'SUBSTRING(a, b, NULL)',
'SUBSTRING(a, 0, c)',
'SUBSTRING(a, 5, c)',
'SUBSTRING(a, -5, c)',
'SUBSTRING(a, 100, c)',
'SUBSTRING(a, -100, c)',
'SUBSTRING(a, NULL, c)',
'SUBSTRING(\'abc\', b, c)',
'SUBSTRING(\'abc\', 1, c)',
'SUBSTRING(\'abc\', 0, c)',
'SUBSTRING(\'abc\', 5, c)',
'SUBSTRING(\'abc\', -1, c)',
'SUBSTRING(\'abc\', -5, c)',
'SUBSTRING(\'abc\', NULL, c)',
'SUBSTRING(\'abc\', b, 10)',
'SUBSTRING(\'abc\', b, -10)',
'SUBSTRING(\'abc\', b, 2)',
'SUBSTRING(\'abc\', b, 0)',
'SUBSTRING(\'abc\', b, NULL)',
'SUBSTRING(\'abc\', b)',
'SUBSTRING(a, b)'))
@pytest.mark.skipif(is_databricks_runtime() and not is_databricks104_or_later(),
reason="https://github.com/NVIDIA/spark-rapids/issues/7463")
def test_ephemeral_substring():
str_gen = mk_str_gen('.{0,30}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, str_gen, int_gen, int_gen)\
.filter("substr(a, 1, 3) > 'mmm'"))
def test_repeat_scalar_and_column():
gen_s = StringGen(nullable=False)
gen_r = IntegerGen(min_val=-100, max_val=100, special_cases=[0], nullable=True)
(s,) = gen_scalars_for_sql(gen_s, 1)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen_r).selectExpr(
'repeat({}, a)'.format(s),
'repeat({}, null)'.format(s)))
def test_repeat_column_and_scalar():
gen_s = StringGen(nullable=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen_s).selectExpr(
'repeat(a, -10)',
'repeat(a, 0)',
'repeat(a, 10)',
'repeat(a, null)'
))
def test_repeat_null_column_and_scalar():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.range(100).selectExpr('CAST(NULL as STRING) AS a').selectExpr(
'repeat(a, -10)',
'repeat(a, 0)',
'repeat(a, 10)'
))
def test_repeat_column_and_column():
gen_s = StringGen(nullable=True)
gen_r = IntegerGen(min_val=-100, max_val=100, special_cases=[0], nullable=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, gen_s, gen_r).selectExpr('repeat(a, b)'))
def test_replace():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REPLACE("TEST", "TEST", "PROD")',
'REPLACE("NO", "T\ud720", "PROD")',
'REPLACE(a, "TEST", "PROD")',
'REPLACE(a, "T\ud720", "PROD")',
'REPLACE(a, "", "PROD")',
'REPLACE(a, "T", NULL)',
'REPLACE(a, NULL, "PROD")',
'REPLACE(a, "T", "")'))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_replace():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).selectExpr(sql_text),
'StringReplace')
assert_gpu_did_fallback('REPLACE(a, "TEST", a)')
assert_gpu_did_fallback('REPLACE(a, a, "TEST")')
assert_gpu_did_fallback('REPLACE(a, a, a)')
assert_gpu_did_fallback('REPLACE("TEST", "TEST", a)')
assert_gpu_did_fallback('REPLACE("TEST", a, "TEST")')
assert_gpu_did_fallback('REPLACE("TEST", a, a)')
@incompat
def test_translate():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'translate(a, "TEST", "PROD")',
'translate(a, "TEST", "P")',
'translate(a, "T\ud720", "PROD")',
'translate(a, "", "PROD")',
'translate(a, NULL, "PROD")',
'translate(a, "TEST", NULL)',
'translate("AaBbCc", "abc", "123")',
'translate("AaBbCc", "abc", "1")'))
@incompat
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_translate():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).selectExpr(sql_text),
'StringTranslate')
assert_gpu_did_fallback('TRANSLATE(a, "TEST", a)')
assert_gpu_did_fallback('TRANSLATE(a, a, "TEST")')
assert_gpu_did_fallback('TRANSLATE(a, a, a)')
assert_gpu_did_fallback('TRANSLATE("TEST", "TEST", a)')
assert_gpu_did_fallback('TRANSLATE("TEST", a, "TEST")')
assert_gpu_did_fallback('TRANSLATE("TEST", a, a)')
@incompat
@pytest.mark.skipif(is_before_spark_320(), reason="Only in Spark 3.2+ does translate() support unicode \
characters with code point >= U+10000. See https://issues.apache.org/jira/browse/SPARK-34094")
def test_translate_large_codepoints():
gen = mk_str_gen('.{0,5}TEST[\ud720 \U0010FFFF A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'translate(a, "T\U0010FFFF", "PROD")'))
def test_length():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'LENGTH(a)',
'CHAR_LENGTH(a)',
'CHARACTER_LENGTH(a)'))
def test_byte_length():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'BIT_LENGTH(a)', 'OCTET_LENGTH(a)'))
@incompat
def test_initcap():
# Because we don't use the same unicode version we need to limit
# the charicter set to something more reasonable
# upper and lower should cover the corner cases, this is mostly to
# see if there are issues with spaces
gen = mk_str_gen('([aAbB1357ȺéŸ_@%-]{0,15}[ \r\n\t]{1,2}){1,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.initcap(f.col('a'))))
@incompat
@pytest.mark.xfail(reason='Spark initcap will not convert ʼn to ʼN')
def test_initcap_special_chars():
gen = mk_str_gen('ʼn([aAbB13ȺéŸ]{0,5}){1,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.initcap(f.col('a'))))
def test_like_null():
gen = mk_str_gen('.{0,3}a[|b*.$\r\n]{0,2}c.{0,3}')\
.with_special_pattern('.{0,3}oo.{0,3}', weight=100.0)\
.with_special_case('_')\
.with_special_case('\r')\
.with_special_case('\n')\
.with_special_case('%SystemDrive%\\Users\\John')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.col('a').like('_')))
def test_like():
gen = mk_str_gen('(\u20ac|\\w){0,3}a[|b*.$\r\n]{0,2}c\\w{0,3}')\
.with_special_pattern('\\w{0,3}oo\\w{0,3}', weight=100.0)\
.with_special_case('_')\
.with_special_case('\r')\
.with_special_case('\n')\
.with_special_case('a{3}bar')\
.with_special_case('12345678')\
.with_special_case('12345678901234')\
.with_special_case('%SystemDrive%\\Users\\John')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(
f.lit('_oo_').like('_oo_'),
f.lit('_aa_').like('_oo_'),
f.col('a').like('%o%'), # turned into contains
f.col('a').like('%a%'), # turned into contains
f.col('a').like(''), #turned into equals
f.col('a').like('12345678'), #turned into equals
f.col('a').like('\\%SystemDrive\\%\\\\Users%'),
f.col('a').like('_'),
f.col('a').like('_oo_'),
f.col('a').like('_oo%'),
f.col('a').like('%oo_'),
f.col('a').like('_\u201c%'),
f.col('a').like('_a[d]%'),
f.col('a').like('_a(d)%'),
f.col('a').like('_$'),
f.col('a').like('_$%'),
f.col('a').like('_._'),
f.col('a').like('_?|}{_%'),
f.col('a').like('%a{3}%')))
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_like():
gen = StringGen('[a-z]')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).selectExpr(sql_text),
'Like')
assert_gpu_did_fallback("'lit' like a")
assert_gpu_did_fallback("a like a")
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_rlike():
gen = StringGen('\/lit\/')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
unary_op_df(spark, gen, length=10).selectExpr(sql_text),
'RLike')
assert_gpu_did_fallback("'lit' rlike a")
assert_gpu_did_fallback("a rlike a")
def test_like_simple_escape():
gen = mk_str_gen('(\u20ac|\\w){0,3}a[|b*.$\r\n]{0,2}c\\w{0,3}')\
.with_special_pattern('\\w{0,3}oo\\w{0,3}', weight=100.0)\
.with_special_case('_')\
.with_special_case('\r')\
.with_special_case('\n')\
.with_special_case('a{3}bar')\
.with_special_case('12345678')\
.with_special_case('12345678901234')\
.with_special_case('%SystemDrive%\\Users\\John')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a like "_a^d%" escape "c"',
'a like "a_a" escape "c"',
'a like "a%a" escape "c"',
'a like "c_" escape "c"',
'a like x "6162632325616263" escape "#"',
'a like x "61626325616263" escape "#"'))
def test_like_complex_escape():
gen = mk_str_gen('(\u20ac|\\w){0,3}a[|b*.$\r\n]{0,2}c\\w{0,3}')\
.with_special_pattern('\\w{0,3}oo\\w{0,3}', weight=100.0)\
.with_special_case('_')\
.with_special_case('\r')\
.with_special_case('\n')\
.with_special_case('a{3}bar')\
.with_special_case('12345678')\
.with_special_case('12345678901234')\
.with_special_case('%SystemDrive%\\Users\\John')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a like x "256f6f5f"',
'a like x "6162632325616263" escape "#"',
'a like x "61626325616263" escape "#"',
'a like ""',
'a like "_oo_"',
'a like "_oo%"',
'a like "%oo_"',
'a like "_\u20AC_"',
'a like "\\%SystemDrive\\%\\\\\\\\Users%"',
'a like "_oo"'),
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
@pytest.mark.parametrize('from_base,pattern',
[
pytest.param(10, r'-?[0-9]{1,18}', id='from_10'),
pytest.param(16, r'-?[0-9a-fA-F]{1,15}', id='from_16')
])
# to_base can be positive and negative
@pytest.mark.parametrize('to_base', [10, 16], ids=['to_plus10', 'to_plus16'])
def test_conv_dec_to_from_hex(from_base, to_base, pattern):
# before 3.2 leading space are deem the string non-numeric and the result is 0
if not is_before_spark_320:
pattern = r' ?' + pattern
gen = mk_str_gen(pattern)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select('a', f.conv(f.col('a'), from_base, to_base)),
conf={'spark.rapids.sql.expression.Conv': True}
)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/string_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from pyspark.sql.types import *
from marks import *
def mk_json_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
@pytest.mark.parametrize('json_str_pattern', [r'\{"store": \{"fruit": \[\{"weight":\d,"type":"[a-z]{1,9}"\}\], ' \
r'"bicycle":\{"price":[1-9]\d\.\d\d,"color":"[a-z]{0,4}"\}\},' \
r'"email":"[a-z]{1,5}\@[a-z]{3,10}\.com","owner":"[a-z]{3,8}"\}',
r'\{"a": "[a-z]{1,3}"\}'], ids=idfn)
def test_get_json_object(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
scalar_json = '{"store": {"fruit": [{"name": "test"}]}}'
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen, length=10).selectExpr(
'get_json_object(a,"$.a")',
'get_json_object(a, "$.owner")',
'get_json_object(a, "$.store.fruit[0]")',
'get_json_object(\'%s\', "$.store.fruit[0]")' % scalar_json,
),
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('json_str_pattern', [r'\{"store": \{"fruit": \[\{"weight":\d,"type":"[a-z]{1,9}"\}\], ' \
r'"bicycle":\{"price":[1-9]\d\.\d\d,"color":"[a-z]{0,4}"\}\},' \
r'"email":"[a-z]{1,5}\@[a-z]{3,10}\.com","owner":"[a-z]{3,8}"\}',
r'\{"a": "[a-z]{1,3}"\}'], ids=idfn)
def test_unsupported_fallback_get_json_object(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
scalar_json = '{"store": {"fruit": "test"}}'
pattern = StringGen(pattern=r'\$\.[a-z]{1,9}')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [('a', gen), ('b', pattern)], length=10).selectExpr(sql_text),
'GetJsonObject',
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
assert_gpu_did_fallback('get_json_object(a, b)')
assert_gpu_did_fallback('get_json_object(\'%s\', b)' % scalar_json)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/get_json_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect, assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from marks import allow_non_gpu
def mk_json_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
json_str_patterns = [r'\{"store": \{"fruit": \[\{"weight":\d,"type":"[a-z]{1,9}"\}\], ' \
r'"bicycle":\{"price":[1-9]\d\.\d\d,"color":"[a-z]{0,4}"\}\},' \
r'"email":"[a-z]{1,5}\@[a-z]{3,10}\.com","owner":"[a-z]{3,8}"\}',
r'\{"a": "[a-z]{1,3}", "b\$":"[b-z]{1,3}"\}']
@pytest.mark.parametrize('json_str_pattern', json_str_patterns, ids=idfn)
def test_json_tuple(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen, length=10).selectExpr(
'json_tuple(a, "a", "email", "owner", "b", "b$", "b$$")'),
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
def test_json_tuple_select_non_generator_col():
gen = StringGen(pattern="{\"Zipcode\":\"abc\",\"ZipCodeType\":\"STANDARD\",\"City\":\"PARC PARQUE\",\"State\":\"PR\"}")
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, [('a', gen)]),
'table',
'select a, json_tuple(a, \"Zipcode\", \"ZipCodeType\", \"City\", \"State\") from table',
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
@allow_non_gpu('GenerateExec', 'JsonTuple')
@pytest.mark.parametrize('json_str_pattern', json_str_patterns, ids=idfn)
def test_json_tuple_with_large_number_of_fields_fallback(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen, length=10).selectExpr(
'json_tuple(a, "a", "email", "owner", "bicycle", "b", "aa", "ab", "type", "color", "name", \
"weight", "x", "y", "z", "category", "address", "phone", "mobile", "aaa", "c", \
"date", "time", "second", "d", "abc", "e", "hour", "minute", "when", "what", \
"location", "city", "country", "zip", "code", "region", "state", "street", "block", "loc", \
"height", "h", "author", "title", "price", "isbn", "book", "rating", "score", "popular")'),
"JsonTuple",
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
@allow_non_gpu('GenerateExec', 'JsonTuple')
@pytest.mark.parametrize('json_str_pattern', json_str_patterns, ids=idfn)
def test_json_tuple_with_special_characters_fallback(json_str_pattern):
gen = mk_json_str_gen(json_str_pattern)
special_characters = ['.', '[', ']', '{', '}', '\\\\', '\'', '\\\"']
for special_character in special_characters:
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen, length=10).selectExpr(
'json_tuple(a, "a", "a' + special_character + '")'),
"JsonTuple",
conf={'spark.sql.parser.escapedStringLiterals': 'true'})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/json_tuple_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from spark_session import is_before_spark_340
from marks import allow_non_gpu, approximate_float
@pytest.mark.parametrize('data_gen', all_basic_gens + decimal_gens + array_gens_sample + map_gens_sample + struct_gens_sample, ids=idfn)
def test_simple_limit(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# We need some processing after the limit to avoid a CollectLimitExec
lambda spark : unary_op_df(spark, data_gen, num_slices=1).limit(10).repartition(1),
conf = {'spark.sql.execution.sortBeforeRepartition': 'false'})
def offset_test_wrapper(sql, batch_size):
conf = {'spark.rapids.sql.exec.CollectLimitExec': 'true',
'spark.rapids.sql.batchSizeBytes': batch_size}
# Create dataframe to test CollectLimit
def spark_df(spark):
unary_op_df(spark, int_gen, length=2048, num_slices=1).createOrReplaceTempView("tmp_table")
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(spark_df, conf)
# Create dataframe to test GlobalLimit
def spark_df_repartition(spark):
return spark_df(spark).repartition(1)
assert_gpu_and_cpu_are_equal_collect(spark_df_repartition, conf)
@pytest.mark.parametrize('offset', [1024, 2048, 4096])
@pytest.mark.parametrize('batch_size', ['1000', '1g'])
@pytest.mark.skipif(is_before_spark_340(), reason='offset is introduced from Spark 3.4.0')
def test_non_zero_offset(offset, batch_size):
# offset is used in the test cases having no limit, that is limit = -1
# 1024: offset < df.numRows
# 2048: offset = df.numRows
# 4096: offset > df.numRows
sql = "select * from tmp_table offset {}".format(offset)
offset_test_wrapper(sql, batch_size)
@pytest.mark.parametrize('limit, offset', [(0, 0), (0, 10), (1024, 500), (2048, 456), (3000, 111), (500, 500), (100, 600)])
@pytest.mark.parametrize('batch_size', ['1000', '1g'])
@pytest.mark.skipif(is_before_spark_340(), reason='offset is introduced from Spark 3.4.0')
@allow_non_gpu('ShuffleExchangeExec') # when limit = 0, ShuffleExchangeExec is not replaced.
def test_non_zero_offset_with_limit(limit, offset, batch_size):
# In CPU version of spark, (limit, offset) can not be negative number.
# Test case description:
# (0, 0): Corner case: both limit and offset are 0
# (0, 10): Corner case: limit = 0, offset > 0
# (1024, 500): offset < limit && limit < df.numRows
# (2048, 456): offset < limit && limit = df.numRows
# (3000, 111): offset < limit && limit > df.numRows
# (500, 500): offset = limit
# (100, 600): offset > limit
sql = "select * from tmp_table limit {} offset {}".format(limit, offset)
offset_test_wrapper(sql, batch_size)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('limit, offset', [(0, 0), (0, 10), (1024, 500), (2048, 456), (3000, 111), (500, 500), (100, 600)])
@pytest.mark.parametrize('batch_size', ['1000', '1g'])
@pytest.mark.skipif(is_before_spark_340(), reason='offset is introduced from Spark 3.4.0')
@allow_non_gpu('ShuffleExchangeExec') # when limit = 0, ShuffleExchangeExec is not replaced.
@approximate_float
def test_order_by_offset_with_limit(limit, offset, data_gen, batch_size):
# In CPU version of spark, (limit, offset) can not be negative number.
# Test case description:
# (0, 0): Corner case: both limit and offset are 0
# (0, 10): Corner case: limit = 0, offset > 0
# (1024, 500): offset < limit && limit < df.numRows
# (2048, 456): offset < limit && limit = df.numRows
# (3000, 111): offset < limit && limit > df.numRows
# (500, 500): offset = limit
# (100, 600): offset > limit
def spark_df(spark):
unary_op_df(spark, data_gen).createOrReplaceTempView("tmp_table")
sql = "select * from tmp_table order by a limit {} offset {}".format(limit, offset)
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(spark_df, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/limit_test.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from data_gen import *
from marks import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from spark_session import with_cpu_session, with_gpu_session
# mark this test as ci_1 for mvn verify sanity check in pre-merge CI
pytestmark = pytest.mark.premerge_ci_1
def create_df(spark, data_gen, left_length, right_length):
left = binary_op_df(spark, data_gen, length=left_length)
right = binary_op_df(spark, data_gen, length=right_length).withColumnRenamed("a", "r_a")\
.withColumnRenamed("b", "r_b")
return left, right
@pytest.mark.parametrize('data_gen', [StringGen()], ids=idfn)
def test_explain_join(spark_tmp_path, data_gen):
data_path1 = spark_tmp_path + '/PARQUET_DATA1'
data_path2 = spark_tmp_path + '/PARQUET_DATA2'
def do_join_explain(spark):
left, right = create_df(spark, data_gen, 500, 500)
left.write.parquet(data_path1)
right.write.parquet(data_path2)
df1 = spark.read.parquet(data_path1)
df2 = spark.read.parquet(data_path2)
df3 = df1.join(df2, df1.a == df2.r_a, "inner")
explain_str = spark.sparkContext._jvm.com.nvidia.spark.rapids.ExplainPlan.explainPotentialGpuPlan(df3._jdf, "ALL")
remove_isnotnull = explain_str.replace("isnotnull", "")
# everything should be on GPU
assert "not" not in remove_isnotnull
with_cpu_session(do_join_explain)
def test_explain_set_config():
conf = {'spark.rapids.sql.hasExtendedYearValues': 'false',
'spark.rapids.sql.castStringToTimestamp.enabled': 'true'}
def do_explain(spark):
df = unary_op_df(spark, StringGen('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}')).select(f.col('a').cast(TimestampType()))
# a bit brittle if these get turned on by default
spark.conf.set('spark.rapids.sql.hasExtendedYearValues', 'false')
spark.conf.set('spark.rapids.sql.castStringToTimestamp.enabled', 'true')
explain_str = spark.sparkContext._jvm.com.nvidia.spark.rapids.ExplainPlan.explainPotentialGpuPlan(df._jdf, "ALL")
print(explain_str)
assert "timestamp) will run on GPU" in explain_str
spark.conf.set('spark.rapids.sql.castStringToTimestamp.enabled', 'false')
explain_str_cast_off = spark.sparkContext._jvm.com.nvidia.spark.rapids.ExplainPlan.explainPotentialGpuPlan(df._jdf, "ALL")
print(explain_str_cast_off)
assert "timestamp) cannot run on GPU" in explain_str_cast_off
with_cpu_session(do_explain)
def test_explain_udf():
slen = udf(lambda s: len(s), IntegerType())
@udf
def to_upper(s):
if s is not None:
return s.upper()
@udf(returnType=IntegerType())
def add_one(x):
if x is not None:
return x + 1
def do_explain(spark):
df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
df2 = df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age"))
explain_str = spark.sparkContext._jvm.com.nvidia.spark.rapids.ExplainPlan.explainPotentialGpuPlan(df2._jdf, "ALL")
# udf shouldn't be on GPU
udf_str_not = 'cannot run on GPU because GPU does not currently support the operator class org.apache.spark.sql.execution.python.BatchEvalPythonExec'
assert udf_str_not in explain_str
not_on_gpu_str = spark.sparkContext._jvm.com.nvidia.spark.rapids.ExplainPlan.explainPotentialGpuPlan(df2._jdf, "NOT")
assert udf_str_not in not_on_gpu_str
assert "will run on GPU" not in not_on_gpu_str
with_cpu_session(do_explain)
@allow_non_gpu(any = True)
def test_explain_bucketd_scan(spark_tmp_table_factory):
"""
Test the physical plan includes the info of enabling bucketed scan.
The code is copied from:
https://github.com/apache/spark/commit/79515e4b6c#diff-03f119698c3637b87c9ce2634c34c14bb0f7efc043ea37a0891c1ab9fbc3ebadR688
"""
def do_explain(spark):
tbl_1 = spark_tmp_table_factory.get()
tbl_2 = spark_tmp_table_factory.get()
spark.createDataFrame([(1, 2), (2, 3)], ("i", "j")).write.bucketBy(8, "i").saveAsTable(tbl_1)
spark.createDataFrame([(2,), (3,)], ("i",)).write.bucketBy(8, "i").saveAsTable(tbl_2)
df1 = spark.table(tbl_1)
df2 = spark.table(tbl_2)
joined_df = df1.join(df2, df1.i == df2.i , "inner")
assert "Bucketed: true" in joined_df._sc._jvm.PythonSQLUtils.explainString(joined_df._jdf.queryExecution(), "simple")
with_gpu_session(do_explain, {"spark.sql.autoBroadcastJoinThreshold": "0"})
@allow_non_gpu(any = True)
def test_explain_bucket_column_not_read(spark_tmp_table_factory):
"""
Test the physical plan includes the info of disabling bucketed scan and the reason.
The code is copied from:
https://github.com/apache/spark/commit/79515e4b6c#diff-03f119698c3637b87c9ce2634c34c14bb0f7efc043ea37a0891c1ab9fbc3ebadR702
"""
def do_explain(spark):
tbl = spark_tmp_table_factory.get()
spark.createDataFrame([(1, 2), (2, 3)], ("i", "j")).write.bucketBy(8, "i").saveAsTable(tbl)
df = spark.table(tbl).select(f.col("j"))
assert "Bucketed: false (bucket column(s) not read)" in df._sc._jvm.PythonSQLUtils.explainString(df._jdf.queryExecution(), "simple")
with_gpu_session(do_explain)
@allow_non_gpu(any = True)
def test_explain_bucket_disabled_by_conf(spark_tmp_table_factory):
"""
Test the physical plan includes the info of disabling bucketed scan and the reason.
The code is copied from:
https://github.com/apache/spark/commit/79515e4b6c#diff-03f119698c3637b87c9ce2634c34c14bb0f7efc043ea37a0891c1ab9fbc3ebadR694
"""
def do_explain(spark):
tbl_1 = spark_tmp_table_factory.get()
tbl_2 = spark_tmp_table_factory.get()
spark.createDataFrame([(1, 2), (2, 3)], ("i", "j")).write.bucketBy(8, "i").saveAsTable(tbl_1)
spark.createDataFrame([(2,), (3,)], ("i",)).write.bucketBy(8, "i").saveAsTable(tbl_2)
df1 = spark.table(tbl_1)
df2 = spark.table(tbl_2)
joined_df = df1.join(df2, df1.i == df2.i , "inner")
assert "Bucketed: false (disabled by configuration)" in joined_df._sc._jvm.PythonSQLUtils.explainString(joined_df._jdf.queryExecution(), "simple")
with_gpu_session(do_explain, {"spark.sql.sources.bucketing.enabled": "false"})
@allow_non_gpu(any=True)
def test_explain_bucket_disabled_by_query_planner(spark_tmp_table_factory):
"""
Test the physical plan includes the info of disabling bucketed scan and the reason.
The code is copied from:
https://github.com/apache/spark/commit/79515e4b6c#diff-03f119698c3637b87c9ce2634c34c14bb0f7efc043ea37a0891c1ab9fbc3ebadR700
This test will be skipped if spark version is before 3.1.0. Because the attribute `disableBucketedScan` is not included in `GpuFileSourceScanExec` before 3.1.0.
"""
def do_explain(spark):
tbl = spark_tmp_table_factory.get()
spark.createDataFrame([(1, 2), (2, 3)], ("i", "j")).write.bucketBy(8, "i").saveAsTable(tbl)
df = spark.table(tbl)
assert "Bucketed: false (disabled by query planner)" in df._sc._jvm.PythonSQLUtils.explainString(df._jdf.queryExecution(), "simple")
with_gpu_session(do_explain)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/explain_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from _pytest.mark.structures import ParameterSet
from pyspark.sql.functions import broadcast, col
from pyspark.sql.types import *
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect, assert_cpu_and_gpu_are_equal_collect_with_capture
from conftest import is_databricks_runtime, is_emr_runtime
from data_gen import *
from marks import ignore_order, allow_non_gpu, incompat, validate_execs_in_gpu_plan
from spark_session import with_cpu_session, is_before_spark_330, is_databricks_runtime
pytestmark = [pytest.mark.nightly_resource_consuming_test]
all_join_types = ['Left', 'Right', 'Inner', 'LeftSemi', 'LeftAnti', 'Cross', 'FullOuter']
all_gen = [StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
BooleanGen(), DateGen(), TimestampGen(), null_gen,
pytest.param(FloatGen(), marks=[incompat]),
pytest.param(DoubleGen(), marks=[incompat])] + orderable_decimal_gens
all_gen_no_nulls = [StringGen(nullable=False), ByteGen(nullable=False),
ShortGen(nullable=False), IntegerGen(nullable=False), LongGen(nullable=False),
BooleanGen(nullable=False), DateGen(nullable=False), TimestampGen(nullable=False),
pytest.param(FloatGen(nullable=False), marks=[incompat]),
pytest.param(DoubleGen(nullable=False), marks=[incompat])]
basic_struct_gen = StructGen([
['child' + str(ind), sub_gen]
for ind, sub_gen in enumerate([StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
BooleanGen(), DateGen(), TimestampGen(), null_gen, decimal_gen_64bit])],
nullable=True)
basic_struct_gen_with_no_null_child = StructGen([
['child' + str(ind), sub_gen]
for ind, sub_gen in enumerate([StringGen(nullable=False), ByteGen(nullable=False),
ShortGen(nullable=False), IntegerGen(nullable=False), LongGen(nullable=False),
BooleanGen(nullable=False), DateGen(nullable=False), TimestampGen(nullable=False)])],
nullable=True)
basic_struct_gen_with_floats = StructGen([['child0', FloatGen()], ['child1', DoubleGen()]], nullable=False)
nested_2d_struct_gens = StructGen([['child0', basic_struct_gen]], nullable=False)
nested_3d_struct_gens = StructGen([['child0', nested_2d_struct_gens]], nullable=False)
struct_gens = [basic_struct_gen, basic_struct_gen_with_no_null_child, nested_2d_struct_gens, nested_3d_struct_gens]
basic_nested_gens = single_level_array_gens + map_string_string_gen + [all_basic_struct_gen, binary_gen]
# data types supported by AST expressions in joins
join_ast_gen = [
boolean_gen, byte_gen, short_gen, int_gen, long_gen, date_gen, timestamp_gen
]
# data types not supported by AST expressions in joins
join_no_ast_gen = [
pytest.param(FloatGen(), marks=[incompat]), pytest.param(DoubleGen(), marks=[incompat]),
string_gen, null_gen, decimal_gen_64bit
]
# Types to use when running joins on small batches. Small batch joins can take a long time
# to run and are mostly redundant with the normal batch size test, so we only run these on a
# set of representative types rather than all types.
join_small_batch_gens = [ StringGen(), IntegerGen(), orderable_decimal_gen_128bit ]
cartesian_join_small_batch_gens = join_small_batch_gens + [basic_struct_gen, ArrayGen(string_gen)]
_sortmerge_join_conf = {'spark.sql.autoBroadcastJoinThreshold': '-1',
'spark.sql.join.preferSortMergeJoin': 'True',
'spark.sql.shuffle.partitions': '2',
}
# For spark to insert a shuffled hash join it has to be enabled with
# "spark.sql.join.preferSortMergeJoin" = "false" and both sides have to
# be larger than a broadcast hash join would want
# "spark.sql.autoBroadcastJoinThreshold", but one side has to be smaller
# than the number of splits * broadcast threshold and also be at least
# 3 times smaller than the other side. So it is not likely to happen
# unless we can give it some help.
_hash_join_conf = {'spark.sql.autoBroadcastJoinThreshold': '160',
'spark.sql.join.preferSortMergeJoin': 'false',
'spark.sql.shuffle.partitions': '2',
}
def create_df(spark, data_gen, left_length, right_length):
left = binary_op_df(spark, data_gen, length=left_length)
right = binary_op_df(spark, data_gen, length=right_length).withColumnRenamed("a", "r_a")\
.withColumnRenamed("b", "r_b")
return left, right
# create a dataframe with 2 columns where one is a nested type to be passed
# along but not used as key and the other can be used as join key
def create_ridealong_df(spark, key_data_gen, data_gen, left_length, right_length):
left = two_col_df(spark, key_data_gen, data_gen, length=left_length).withColumnRenamed("a", "key")
right = two_col_df(spark, key_data_gen, data_gen, length=right_length).withColumnRenamed("a", "r_key")\
.withColumnRenamed("b", "r_b")
return left, right
# Takes a sequence of list-of-generator and batch size string pairs and returns the
# test parameters, using the batch size setting for each corresponding data generator.
def join_batch_size_test_params(*args):
params = []
for (data_gens, batch_size) in args:
for obj in data_gens:
if isinstance(obj, ParameterSet):
params += [ pytest.param(v, batch_size, marks=obj.marks) for v in obj.values ]
else:
params += [ pytest.param(obj, batch_size) ]
return params
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
@pytest.mark.parametrize("aqe_enabled", ["true", "false"], ids=idfn)
def test_right_broadcast_nested_loop_join_without_condition_empty(join_type, aqe_enabled):
def do_join(spark):
left, right = create_df(spark, long_gen, 50, 0)
return left.join(broadcast(right), how=join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={ "spark.sql.adaptive.enabled": aqe_enabled })
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
@pytest.mark.parametrize("aqe_enabled", ["true", "false"], ids=idfn)
def test_left_broadcast_nested_loop_join_without_condition_empty(join_type, aqe_enabled):
def do_join(spark):
left, right = create_df(spark, long_gen, 0, 50)
return left.join(broadcast(right), how=join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={ "spark.sql.adaptive.enabled": aqe_enabled })
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
@pytest.mark.parametrize("aqe_enabled", ["true", "false"], ids=idfn)
def test_broadcast_nested_loop_join_without_condition_empty(join_type, aqe_enabled):
def do_join(spark):
left, right = create_df(spark, long_gen, 0, 0)
return left.join(broadcast(right), how=join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={ "spark.sql.adaptive.enabled": aqe_enabled })
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_right_broadcast_nested_loop_join_without_condition_empty_small_batch(join_type):
def do_join(spark):
left, right = create_df(spark, long_gen, 50, 0)
return left.join(broadcast(right), how=join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.sql.adaptive.enabled': 'true'})
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_empty_broadcast_hash_join(join_type):
def do_join(spark):
left, right = create_df(spark, long_gen, 50, 0)
return left.join(right.hint("broadcast"), left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.sql.adaptive.enabled': 'true'})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen,batch_size', join_batch_size_test_params(
(all_gen, '1g'),
(join_small_batch_gens, '1000')), ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_sortmerge_join(data_gen, join_type, batch_size):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 500)
return left.join(right, left.a == right.r_a, join_type)
conf = copy_and_update(_sortmerge_join_conf, {'spark.rapids.sql.batchSizeBytes': batch_size})
assert_gpu_and_cpu_are_equal_collect(do_join, conf=conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', basic_nested_gens + [decimal_gen_128bit], ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_sortmerge_join_ridealong(data_gen, join_type):
def do_join(spark):
left, right = create_ridealong_df(spark, short_gen, data_gen, 500, 500)
return left.join(right, left.key == right.r_key, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_sortmerge_join_conf)
# For floating point values the normalization is done using a higher order function. We could probably work around this
# for now it falls back to the CPU
@allow_non_gpu('SortMergeJoinExec', 'SortExec', 'ArrayTransform', 'LambdaFunction',
'NamedLambdaVariable', 'NormalizeNaNAndZero', 'ShuffleExchangeExec', 'HashPartitioning')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', single_level_array_gens + [binary_gen], ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_sortmerge_join_wrong_key_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 500)
return left.join(right, left.a == right.r_a, join_type)
assert_gpu_fallback_collect(do_join, 'SortMergeJoinExec', conf=_sortmerge_join_conf)
# For spark to insert a shuffled hash join it has to be enabled with
# "spark.sql.join.preferSortMergeJoin" = "false" and both sides have to
# be larger than a broadcast hash join would want
# "spark.sql.autoBroadcastJoinThreshold", but one side has to be smaller
# than the number of splits * broadcast threshold and also be at least
# 3 times smaller than the other side. So it is not likely to happen
# unless we can give it some help. Parameters are setup to try to make
# this happen, if test fails something might have changed related to that.
@validate_execs_in_gpu_plan('GpuShuffledHashJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', basic_nested_gens + [decimal_gen_128bit], ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
@pytest.mark.parametrize('sub_part_enabled', ['false', 'true'], ids=['SubPartition_OFF', 'SubPartition_ON'])
def test_hash_join_ridealong(data_gen, join_type, sub_part_enabled):
def do_join(spark):
left, right = create_ridealong_df(spark, short_gen, data_gen, 50, 500)
return left.join(right, left.key == right.r_key, join_type)
_all_conf = copy_and_update(_hash_join_conf, {
"spark.rapids.sql.test.subPartitioning.enabled": sub_part_enabled
})
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_all_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
# Not all join types can be translated to a broadcast join, but this tests them to be sure we
# can handle what spark is doing
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_broadcast_join_right_table(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(broadcast(right), left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', basic_nested_gens + [decimal_gen_128bit], ids=idfn)
# Not all join types can be translated to a broadcast join, but this tests them to be sure we
# can handle what spark is doing
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_broadcast_join_right_table_ridealong(data_gen, join_type):
def do_join(spark):
left, right = create_ridealong_df(spark, short_gen, data_gen, 500, 500)
return left.join(broadcast(right), left.key == right.r_key, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
# Not all join types can be translated to a broadcast join, but this tests them to be sure we
# can handle what spark is doing
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_broadcast_join_right_table_with_job_group(data_gen, join_type):
with_cpu_session(lambda spark : spark.sparkContext.setJobGroup("testjob1", "test", False))
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(broadcast(right), left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen,batch_size', join_batch_size_test_params(
(all_gen + basic_nested_gens, '1g'),
(join_small_batch_gens + [basic_struct_gen, ArrayGen(string_gen)], '100')), ids=idfn)
def test_cartesian_join(data_gen, batch_size):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return left.crossJoin(right)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.xfail(condition=is_databricks_runtime(),
reason='https://github.com/NVIDIA/spark-rapids/issues/334')
@pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_cartesian_join_special_case_count(batch_size):
def do_join(spark):
left, right = create_df(spark, int_gen, 50, 25)
return left.crossJoin(right).selectExpr('COUNT(*)')
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.xfail(condition=is_databricks_runtime(),
reason='https://github.com/NVIDIA/spark-rapids/issues/334')
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_cartesian_join_special_case_group_by_count(batch_size):
def do_join(spark):
left, right = create_df(spark, int_gen, 50, 25)
return left.crossJoin(right).groupBy('a').count()
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen,batch_size', join_batch_size_test_params(
(all_gen, '1g'),
(join_small_batch_gens, '100')), ids=idfn)
def test_cartesian_join_with_condition(data_gen, batch_size):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
return left.join(right, left.b >= right.r_b, "cross")
conf = copy_and_update(_sortmerge_join_conf, {'spark.rapids.sql.batchSizeBytes': batch_size})
assert_gpu_and_cpu_are_equal_collect(do_join, conf=conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen,batch_size', join_batch_size_test_params(
(all_gen + basic_nested_gens, '1g'),
(join_small_batch_gens, '100')), ids=idfn)
def test_broadcast_nested_loop_join(data_gen, batch_size):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return left.crossJoin(broadcast(right))
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_broadcast_nested_loop_join_special_case_count(batch_size):
def do_join(spark):
left, right = create_df(spark, int_gen, 50, 25)
return left.crossJoin(broadcast(right)).selectExpr('COUNT(*)')
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.xfail(condition=is_databricks_runtime(),
reason='https://github.com/NVIDIA/spark-rapids/issues/334')
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_broadcast_nested_loop_join_special_case_group_by_count(batch_size):
def do_join(spark):
left, right = create_df(spark, int_gen, 50, 25)
return left.crossJoin(broadcast(right)).groupBy('a').count()
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen,batch_size', join_batch_size_test_params(
(join_ast_gen, '1g'),
([int_gen], 100)), ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Inner', 'LeftSemi', 'LeftAnti', 'Cross'], ids=idfn)
def test_right_broadcast_nested_loop_join_with_ast_condition(data_gen, join_type, batch_size):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
return left.join(broadcast(right), (left.b >= right.r_b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_ast_gen, ids=idfn)
def test_left_broadcast_nested_loop_join_with_ast_condition(data_gen):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
return broadcast(left).join(right, (left.b >= right.r_b), 'Right')
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [IntegerGen(), LongGen(), pytest.param(FloatGen(), marks=[incompat]), pytest.param(DoubleGen(), marks=[incompat])], ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Cross'], ids=idfn)
def test_broadcast_nested_loop_join_with_condition_post_filter(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
# AST does not support cast or logarithm yet, so this must be implemented as a post-filter
return left.join(broadcast(right), left.a > f.log(right.r_a), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
@allow_non_gpu('BroadcastExchangeExec', 'BroadcastNestedLoopJoinExec', 'Cast', 'GreaterThan', 'Log')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [IntegerGen(), LongGen(), pytest.param(FloatGen(), marks=[incompat]), pytest.param(DoubleGen(), marks=[incompat])], ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_broadcast_nested_loop_join_with_condition_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# AST does not support cast or logarithm yet
return broadcast(left).join(right, left.a > f.log(right.r_a), join_type)
assert_gpu_fallback_collect(do_join, 'BroadcastNestedLoopJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_right_broadcast_nested_loop_join_condition_missing(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
# Compute the distinct of the join result to verify the join produces a proper dataframe
# for downstream processing.
return left.join(broadcast(right), how=join_type).distinct()
assert_gpu_and_cpu_are_equal_collect(do_join)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Right'], ids=idfn)
def test_left_broadcast_nested_loop_join_condition_missing(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# This test is impacted by https://github.com/NVIDIA/spark-rapids/issues/294
# if the sizes are large enough to have both 0.0 and -0.0 show up 500 and 250
# but these take a long time to verify so we run with smaller numbers by default
# that do not expose the error
# Compute the distinct of the join result to verify the join produces a proper dataframe
# for downstream processing.
return broadcast(left).join(right, how=join_type).distinct()
assert_gpu_and_cpu_are_equal_collect(do_join)
@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens + [binary_gen], ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_right_broadcast_nested_loop_join_condition_missing_count(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return left.join(broadcast(right), how=join_type).selectExpr('COUNT(*)')
assert_gpu_and_cpu_are_equal_collect(do_join)
@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens + [binary_gen], ids=idfn)
@pytest.mark.parametrize('join_type', ['Right'], ids=idfn)
def test_left_broadcast_nested_loop_join_condition_missing_count(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return broadcast(left).join(right, how=join_type).selectExpr('COUNT(*)')
assert_gpu_and_cpu_are_equal_collect(do_join)
@allow_non_gpu('BroadcastExchangeExec', 'BroadcastNestedLoopJoinExec', 'GreaterThanOrEqual')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['LeftOuter', 'LeftSemi', 'LeftAnti', 'FullOuter'], ids=idfn)
def test_broadcast_nested_loop_join_with_conditionals_build_left_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return broadcast(left).join(right, (left.b >= right.r_b), join_type)
assert_gpu_fallback_collect(do_join, 'BroadcastNestedLoopJoinExec')
@allow_non_gpu('BroadcastExchangeExec', 'BroadcastNestedLoopJoinExec', 'GreaterThanOrEqual')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['RightOuter', 'FullOuter'], ids=idfn)
def test_broadcast_nested_loop_with_conditionals_build_right_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
return left.join(broadcast(right), (left.b >= right.r_b), join_type)
assert_gpu_fallback_collect(do_join, 'BroadcastNestedLoopJoinExec')
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
# Not all join types can be translated to a broadcast join, but this tests them to be sure we
# can handle what spark is doing
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
# Specify 200 shuffle partitions to test cases where streaming side is empty
# as in https://github.com/NVIDIA/spark-rapids/issues/7516
@pytest.mark.parametrize('shuffle_conf', [{}, {'spark.sql.shuffle.partitions': 200}], ids=idfn)
def test_broadcast_join_left_table(data_gen, join_type, shuffle_conf):
def do_join(spark):
left, right = create_df(spark, data_gen, 250, 500)
return broadcast(left).join(right, left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=shuffle_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_ast_gen, ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_broadcast_join_with_conditionals(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(broadcast(right),
(left.a == right.r_a) & (left.b >= right.r_b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@allow_non_gpu('BroadcastExchangeExec', 'BroadcastHashJoinExec', 'Cast', 'GreaterThan', 'Log', 'SortMergeJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [long_gen], ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_broadcast_join_with_condition_ast_op_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# AST does not support cast or logarithm yet
return left.join(broadcast(right),
(left.a == right.r_a) & (left.b > f.log(right.r_b)), join_type)
exec = 'SortMergeJoinExec' if join_type in ['Right', 'FullOuter'] else 'BroadcastHashJoinExec'
assert_gpu_fallback_collect(do_join, exec)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@allow_non_gpu('BroadcastExchangeExec', 'BroadcastHashJoinExec', 'Cast', 'GreaterThan', 'SortMergeJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_no_ast_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_broadcast_join_with_condition_ast_type_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 50, 25)
# AST does not support cast or logarithm yet
return left.join(broadcast(right),
(left.a == right.r_a) & (left.b > right.r_b), join_type)
exec = 'SortMergeJoinExec' if join_type in ['Right', 'FullOuter'] else 'BroadcastHashJoinExec'
assert_gpu_fallback_collect(do_join, exec)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_no_ast_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Cross'], ids=idfn)
def test_broadcast_join_with_condition_post_filter(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(broadcast(right),
(left.a == right.r_a) & (left.b > right.r_b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_ast_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'Inner', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_with_condition_ast(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(right, (left.a == right.r_a) & (left.b >= right.r_b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_sortmerge_join_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@allow_non_gpu('GreaterThan', 'Log', 'ShuffleExchangeExec', 'SortMergeJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [long_gen], ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_with_condition_ast_op_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
# AST does not support cast or logarithm yet
return left.join(right, (left.a == right.r_a) & (left.b > f.log(right.r_b)), join_type)
assert_gpu_fallback_collect(do_join, 'SortMergeJoinExec', conf=_sortmerge_join_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@allow_non_gpu('GreaterThan', 'ShuffleExchangeExec', 'SortMergeJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', join_no_ast_gen, ids=idfn)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'FullOuter', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_with_condition_ast_type_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(right, (left.a == right.r_a) & (left.b > right.r_b), join_type)
assert_gpu_fallback_collect(do_join, 'SortMergeJoinExec', conf=_sortmerge_join_conf)
_mixed_df1_with_nulls = [('a', RepeatSeqGen(LongGen(nullable=(True, 20.0)), length= 10)),
('b', IntegerGen()), ('c', LongGen())]
_mixed_df2_with_nulls = [('a', RepeatSeqGen(LongGen(nullable=(True, 20.0)), length= 10)),
('b', StringGen()), ('c', BooleanGen())]
@ignore_order
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'Inner', 'LeftSemi', 'LeftAnti', 'FullOuter', 'Cross'], ids=idfn)
def test_broadcast_join_mixed(join_type):
def do_join(spark):
left = gen_df(spark, _mixed_df1_with_nulls, length=500)
right = gen_df(spark, _mixed_df2_with_nulls, length=500).withColumnRenamed("a", "r_a")\
.withColumnRenamed("b", "r_b").withColumnRenamed("c", "r_c")
return left.join(broadcast(right), left.a.eqNullSafe(right.r_a), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.xfail(condition=is_emr_runtime(),
reason='https://github.com/NVIDIA/spark-rapids/issues/821')
@pytest.mark.parametrize('repartition', ["true", "false"], ids=idfn)
def test_join_bucketed_table(repartition, spark_tmp_table_factory):
def do_join(spark):
table_name = spark_tmp_table_factory.get()
data = [("http://fooblog.com/blog-entry-116.html", "https://fooblog.com/blog-entry-116.html"),
("http://fooblog.com/blog-entry-116.html", "http://fooblog.com/blog-entry-116.html")]
resolved = spark.sparkContext.parallelize(data).toDF(['Url','ResolvedUrl'])
feature_data = [("http://fooblog.com/blog-entry-116.html", "21")]
feature = spark.sparkContext.parallelize(feature_data).toDF(['Url','Count'])
feature.write.bucketBy(400, 'Url').sortBy('Url').format('parquet').mode('overwrite')\
.saveAsTable(table_name)
testurls = spark.sql("SELECT Url, Count FROM {}".format(table_name))
if (repartition == "true"):
return testurls.repartition(20).join(resolved, "Url", "inner")
else:
return testurls.join(resolved, "Url", "inner")
assert_gpu_and_cpu_are_equal_collect(do_join, conf={'spark.sql.autoBroadcastJoinThreshold': '-1'})
# Because we disable ShuffleExchangeExec in some cases we need to allow it to not be on the GPU
# and we do the result sorting in python to avoid that shuffle also being off the GPU
@allow_non_gpu('ShuffleExchangeExec', 'HashPartitioning')
@ignore_order(local=True)
@pytest.mark.parametrize('join_type', ['Left', 'Right', 'Inner', 'LeftSemi', 'LeftAnti'], ids=idfn)
@pytest.mark.parametrize('cache_side', ['cache_left', 'cache_right'], ids=idfn)
@pytest.mark.parametrize('cpu_side', ['cache', 'not_cache'], ids=idfn)
def test_half_cache_join(join_type, cache_side, cpu_side):
left_gen = [('a', SetValuesGen(LongType(), range(500))), ('b', IntegerGen())]
right_gen = [('r_a', SetValuesGen(LongType(), range(500))), ('c', LongGen())]
def do_join(spark):
# Try to force the shuffle to be split between CPU and GPU for the join
# so don't let the shuffle be on the GPU/CPU depending on how the test is configured
# when we repartition and cache the data
spark.conf.set('spark.rapids.sql.exec.ShuffleExchangeExec', cpu_side != 'cache')
left = gen_df(spark, left_gen, length=500)
right = gen_df(spark, right_gen, length=500)
if (cache_side == 'cache_left'):
# Try to force the shuffle to be split between CPU and GPU for the join
# by default if the operation after the shuffle is not on the GPU then
# don't do a GPU shuffle, so do something simple after the repartition
# to make sure that the GPU shuffle is used.
left = left.repartition('a').selectExpr('b + 1 as b', 'a').cache()
left.count() # populate the cache
else:
#cache_right
# Try to force the shuffle to be split between CPU and GPU for the join
# by default if the operation after the shuffle is not on the GPU then
# don't do a GPU shuffle, so do something simple after the repartition
# to make sure that the GPU shuffle is used.
right = right.repartition('r_a').selectExpr('c + 1 as c', 'r_a').cache()
right.count() # populate the cache
# Now turn it back so the other half of the shuffle will be on the oposite side
spark.conf.set('spark.rapids.sql.exec.ShuffleExchangeExec', cpu_side == 'cache')
return left.join(right, left.a == right.r_a, join_type)
# Even though Spark does not know the size of an RDD input so it will not do a broadcast join unless
# we tell it to, this is just to be safe
assert_gpu_and_cpu_are_equal_collect(do_join, {'spark.sql.autoBroadcastJoinThreshold': '1'})
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_struct_as_key(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(right, left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_sortmerge_join_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_struct_mixed_key(data_gen, join_type):
def do_join(spark):
left = two_col_df(spark, data_gen, int_gen, length=500)
right = two_col_df(spark, data_gen, int_gen, length=500)
return left.join(right, (left.a == right.a) & (left.b == right.b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_sortmerge_join_conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_struct_mixed_key_with_null_filter(data_gen, join_type):
def do_join(spark):
left = two_col_df(spark, data_gen, int_gen, length=500)
right = two_col_df(spark, data_gen, int_gen, length=500)
return left.join(right, (left.a == right.a) & (left.b == right.b), join_type)
# Disable constraintPropagation to test null filter on built table with nullable structures.
conf = {'spark.sql.constraintPropagation.enabled': 'false', **_sortmerge_join_conf}
assert_gpu_and_cpu_are_equal_collect(do_join, conf=conf)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_broadcast_join_right_struct_as_key(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(broadcast(right), left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_broadcast_join_right_struct_mixed_key(data_gen, join_type):
def do_join(spark):
left = two_col_df(spark, data_gen, int_gen, length=500)
right = two_col_df(spark, data_gen, int_gen, length=250)
return left.join(broadcast(right), (left.a == right.a) & (left.b == right.b), join_type)
assert_gpu_and_cpu_are_equal_collect(do_join)
# local sort because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/2140')
@pytest.mark.parametrize('data_gen', [basic_struct_gen_with_floats], ids=idfn)
@pytest.mark.parametrize('join_type', ['Inner', 'Left', 'Right', 'Cross', 'LeftSemi', 'LeftAnti'], ids=idfn)
def test_sortmerge_join_struct_with_floats_key(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 250)
return left.join(right, left.a == right.r_a, join_type)
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_sortmerge_join_conf)
@allow_non_gpu('SortMergeJoinExec', 'SortExec', 'NormalizeNaNAndZero', 'CreateNamedStruct',
'GetStructField', 'Literal', 'If', 'IsNull', 'ShuffleExchangeExec', 'HashPartitioning')
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', struct_gens, ids=idfn)
@pytest.mark.parametrize('join_type', ['FullOuter'], ids=idfn)
def test_sortmerge_join_struct_as_key_fallback(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 500)
return left.join(right, left.a == right.r_a, join_type)
assert_gpu_fallback_collect(do_join, 'SortMergeJoinExec', conf=_sortmerge_join_conf)
# Regression test for https://github.com/NVIDIA/spark-rapids/issues/3775
@ignore_order(local=True)
def test_struct_self_join(spark_tmp_table_factory):
def do_join(spark):
data = [
(("Adam ", "", "Green"), "1", "M", 1000),
(("Bob ", "Middle", "Green"), "2", "M", 2000),
(("Cathy ", "", "Green"), "3", "F", 3000)
]
schema = (StructType()
.add("name", StructType()
.add("firstname", StringType())
.add("middlename", StringType())
.add("lastname", StringType()))
.add("id", StringType())
.add("gender", StringType())
.add("salary", IntegerType()))
df = spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
df_name = spark_tmp_table_factory.get()
df.createOrReplaceTempView(df_name)
resultdf = spark.sql(
"select struct(name, struct(name.firstname, name.lastname) as newname)" +
" as col,name from " + df_name + " union" +
" select struct(name, struct(name.firstname, name.lastname) as newname) as col,name" +
" from " + df_name)
resultdf_name = spark_tmp_table_factory.get()
resultdf.createOrReplaceTempView(resultdf_name)
return spark.sql("select a.* from {} a, {} b where a.name=b.name".format(
resultdf_name, resultdf_name))
assert_gpu_and_cpu_are_equal_collect(do_join)
# ExistenceJoin occurs in the context of existential subqueries (which is rewritten to SemiJoin) if
# there is an additional condition that may qualify left records even though they don't have
# join partner records from the right.
#
# Thus a query is rewritten roughly as a LeftOuter with an additional Boolean column "exists" added.
# which feeds into a filter "exists OR someOtherPredicate"
# If the condition is something like an AND, it makes the result a subset of a SemiJoin, and
# the optimizer won't use ExistenceJoin.
@ignore_order(local=True)
@pytest.mark.parametrize('numComplementsToExists', [0, 1, 2], ids=(lambda val: f"complements:{val}"))
@pytest.mark.parametrize('aqeEnabled', [
pytest.param(False, id='aqe:off'),
# workaround: somehow AQE retains RDDScanExec preventing parent ShuffleExchangeExec
# from being executed on GPU
# pytest.param(True, marks=pytest.mark.allow_non_gpu('ShuffleExchangeExec'), id='aqe:on')
])
@pytest.mark.parametrize('conditionalJoin', [False, True], ids=['ast:off', 'ast:on'])
@pytest.mark.parametrize('forceBroadcastHashJoin', [False, True], ids=['broadcastHJ:off', 'broadcastHJ:on'])
def test_existence_join(numComplementsToExists, aqeEnabled, conditionalJoin, forceBroadcastHashJoin, spark_tmp_table_factory):
leftTable = spark_tmp_table_factory.get()
rightTable = spark_tmp_table_factory.get()
def do_join(spark):
# create non-overlapping ranges to have a mix of exists=true and exists=false
# left-hand side rows
lhs_upper_bound = 10
lhs_data = list((f"left_{v}", v * 10, v * 100) for v in range(2, lhs_upper_bound))
# duplicate without a match
lhs_data.append(('left_1', 10, 100))
# duplicate with a match
lhs_data.append(('left_2', 20, 200))
lhs_data.append(('left_null', None, None))
df_left = spark.createDataFrame(lhs_data)
df_left.createOrReplaceTempView(leftTable)
rhs_data = list((f"right_{v}", v * 10, v * 100) for v in range(0, 8))
rhs_data.append(('right_null', None, None))
# duplicate every row in the rhs to verify it does not affect
# the number of output rows, which should be equal to the left table row count
rhs_data_with_dupes=[]
for dupe in rhs_data:
rhs_data_with_dupes.extend([dupe, dupe])
df_right = spark.createDataFrame(rhs_data_with_dupes)
df_right.createOrReplaceTempView(rightTable)
cond = "<=" if conditionalJoin else "="
res = spark.sql((
"select * "
"from {} as l "
f"where l._2 >= {10 * (lhs_upper_bound - numComplementsToExists)}"
" or exists (select * from {} as r where r._2 = l._2 and r._3 {} l._3)"
).format(leftTable, rightTable, cond))
return res
existenceJoinRegex = r"ExistenceJoin\(exists#[0-9]+\),"
if conditionalJoin:
existenceJoinRegex = existenceJoinRegex + r" \(.+ <= .+\)"
if forceBroadcastHashJoin:
# hints don't work with ExistenceJoin
# forcing by upping the size to the estimated right output
bhjThreshold = "9223372036854775807b"
existenceJoinRegex = r'BroadcastHashJoin .* ' + existenceJoinRegex
else:
bhjThreshold = "-1b"
assert_cpu_and_gpu_are_equal_collect_with_capture(do_join, existenceJoinRegex,
conf={
"spark.sql.adaptive.enabled": aqeEnabled,
"spark.sql.autoBroadcastJoinThreshold": bhjThreshold
})
@ignore_order
@pytest.mark.parametrize('aqeEnabled', [True, False], ids=['aqe:on', 'aqe:off'])
def test_existence_join_in_broadcast_nested_loop_join(spark_tmp_table_factory, aqeEnabled):
left_table_name = spark_tmp_table_factory.get()
right_table_name = spark_tmp_table_factory.get()
def do_join(spark):
gen = LongGen(min_val=0, max_val=5)
left_df = binary_op_df(spark, gen)
left_df.createOrReplaceTempView(left_table_name)
right_df = binary_op_df(spark, gen)
right_df.createOrReplaceTempView(right_table_name)
return spark.sql(("select * "
"from {} as l "
"where l.a >= 3 "
" or exists (select * from {} as r where l.b < r.b)"
).format(left_table_name, right_table_name))
capture_regexp = r"GpuBroadcastNestedLoopJoin ExistenceJoin\(exists#[0-9]+\),"
assert_cpu_and_gpu_are_equal_collect_with_capture(do_join, capture_regexp,
conf={"spark.sql.adaptive.enabled": aqeEnabled})
@ignore_order
@pytest.mark.parametrize('aqeEnabled', [True, False], ids=['aqe:on', 'aqe:off'])
def test_degenerate_broadcast_nested_loop_existence_join(spark_tmp_table_factory, aqeEnabled):
left_table_name = spark_tmp_table_factory.get()
right_table_name = spark_tmp_table_factory.get()
def do_join(spark):
gen = LongGen(min_val=0, max_val=5)
left_df = binary_op_df(spark, gen)
left_df.createOrReplaceTempView(left_table_name)
right_df = binary_op_df(spark, gen)
right_df.createOrReplaceTempView(right_table_name)
return spark.sql(("select * "
"from {} as l "
"where l.a >= 3 "
" or exists (select * from {} as r where l.b < l.a)"
).format(left_table_name, right_table_name))
capture_regexp = r"GpuBroadcastNestedLoopJoin ExistenceJoin\(exists#[0-9]+\),"
assert_cpu_and_gpu_are_equal_collect_with_capture(do_join, capture_regexp,
conf={"spark.sql.adaptive.enabled": aqeEnabled})
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [StringGen(), IntegerGen()], ids=idfn)
@pytest.mark.parametrize("aqe_enabled", [True, False], ids=idfn)
@pytest.mark.parametrize("join_reorder_enabled", [True, False], ids=idfn)
def test_multi_table_hash_join(data_gen, aqe_enabled, join_reorder_enabled):
def do_join(spark):
t1 = binary_op_df(spark, data_gen, length=1000)
t2 = binary_op_df(spark, data_gen, length=800)
t3 = binary_op_df(spark, data_gen, length=300)
t4 = binary_op_df(spark, data_gen, length=50)
return t1.join(t2, t1.a == t2.a, 'Inner') \
.join(t3, t2.a == t3.a, 'Inner') \
.join(t4, t3.a == t4.a, 'Inner')
conf = copy_and_update(_hash_join_conf, {
'spark.sql.adaptive.enabled': aqe_enabled,
'spark.rapids.sql.optimizer.joinReorder.enabled': join_reorder_enabled
})
assert_gpu_and_cpu_are_equal_collect(do_join, conf=conf)
limited_integral_gens = [byte_gen, ShortGen(max_val=BYTE_MAX), IntegerGen(max_val=BYTE_MAX), LongGen(max_val=BYTE_MAX)]
@validate_execs_in_gpu_plan('GpuShuffledHashJoinExec')
@ignore_order(local=True)
@pytest.mark.parametrize('left_gen', limited_integral_gens, ids=idfn)
@pytest.mark.parametrize('right_gen', limited_integral_gens, ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_hash_join_different_key_integral_types(left_gen, right_gen, join_type):
def do_join(spark):
left = unary_op_df(spark, left_gen, length=50)
right = unary_op_df(spark, right_gen, length=500)
return left.join(right, left.a == right.a, join_type)
_all_conf = copy_and_update(_hash_join_conf, {
"spark.rapids.sql.test.subPartitioning.enabled": True
})
assert_gpu_and_cpu_are_equal_collect(do_join, conf=_all_conf)
bloom_filter_confs = {
"spark.sql.autoBroadcastJoinThreshold": "1",
"spark.sql.optimizer.runtime.bloomFilter.applicationSideScanSizeThreshold": 1,
"spark.sql.optimizer.runtime.bloomFilter.creationSideThreshold": "100GB",
"spark.sql.optimizer.runtime.bloomFilter.enabled": "true"
}
def check_bloom_filter_join(confs, expected_classes, is_multi_column):
def do_join(spark):
if is_multi_column:
left = spark.range(100000).withColumn("second_id", col("id") % 5)
right = spark.range(10).withColumn("id2", col("id").cast("string")).withColumn("second_id", col("id") % 5)
return right.filter("cast(id2 as bigint) % 3 = 0").join(left, (left.id == right.id) & (left.second_id == right.second_id), "inner")
else:
left = spark.range(100000)
right = spark.range(10).withColumn("id2", col("id").cast("string"))
return right.filter("cast(id2 as bigint) % 3 = 0").join(left, left.id == right.id, "inner")
all_confs = copy_and_update(bloom_filter_confs, confs)
assert_cpu_and_gpu_are_equal_collect_with_capture(do_join, expected_classes, conf=all_confs)
@ignore_order(local=True)
@pytest.mark.parametrize("batch_size", ['1g', '1000'], ids=idfn)
@pytest.mark.parametrize("is_multi_column", [False, True], ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join(batch_size, is_multi_column):
conf = {"spark.rapids.sql.batchSizeBytes": batch_size}
check_bloom_filter_join(confs=conf,
expected_classes="GpuBloomFilterMightContain,GpuBloomFilterAggregate",
is_multi_column=is_multi_column)
@allow_non_gpu("FilterExec", "ShuffleExchangeExec")
@ignore_order(local=True)
@pytest.mark.parametrize("is_multi_column", [False, True], ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join_cpu_probe(is_multi_column):
conf = {"spark.rapids.sql.expression.BloomFilterMightContain": "false"}
check_bloom_filter_join(confs=conf,
expected_classes="BloomFilterMightContain,GpuBloomFilterAggregate",
is_multi_column=is_multi_column)
@allow_non_gpu("ObjectHashAggregateExec", "ShuffleExchangeExec")
@ignore_order(local=True)
@pytest.mark.parametrize("is_multi_column", [False, True], ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join_cpu_build(is_multi_column):
conf = {"spark.rapids.sql.expression.BloomFilterAggregate": "false"}
check_bloom_filter_join(confs=conf,
expected_classes="GpuBloomFilterMightContain,BloomFilterAggregate",
is_multi_column=is_multi_column)
@allow_non_gpu("ObjectHashAggregateExec", "ProjectExec", "ShuffleExchangeExec")
@ignore_order(local=True)
@pytest.mark.parametrize("agg_replace_mode", ["partial", "final"])
@pytest.mark.parametrize("is_multi_column", [False, True], ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join_split_cpu_build(agg_replace_mode, is_multi_column):
conf = {"spark.rapids.sql.hashAgg.replaceMode": agg_replace_mode}
check_bloom_filter_join(confs=conf,
expected_classes="GpuBloomFilterMightContain,BloomFilterAggregate,GpuBloomFilterAggregate",
is_multi_column=is_multi_column)
@ignore_order(local=True)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join_with_merge_some_null_filters(spark_tmp_path):
data_path1 = spark_tmp_path + "/BLOOM_JOIN_DATA1"
data_path2 = spark_tmp_path + "/BLOOM_JOIN_DATA2"
with_cpu_session(lambda spark: spark.range(100000).coalesce(1).write.parquet(data_path1))
with_cpu_session(lambda spark: spark.range(100000).withColumn("id2", col("id").cast("string"))\
.coalesce(1).write.parquet(data_path2))
confs = copy_and_update(bloom_filter_confs,
{"spark.sql.files.maxPartitionBytes": "1000"})
def do_join(spark):
left = spark.read.parquet(data_path1)
right = spark.read.parquet(data_path2)
return right.filter("cast(id2 as bigint) % 3 = 0").join(left, left.id == right.id, "inner")
assert_gpu_and_cpu_are_equal_collect(do_join, confs)
@ignore_order(local=True)
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/8921")
@pytest.mark.skipif(is_before_spark_330(), reason="Bloom filter joins added in Spark 3.3.0")
def test_bloom_filter_join_with_merge_all_null_filters(spark_tmp_path):
data_path1 = spark_tmp_path + "/BLOOM_JOIN_DATA1"
data_path2 = spark_tmp_path + "/BLOOM_JOIN_DATA2"
with_cpu_session(lambda spark: spark.range(100000).write.parquet(data_path1))
with_cpu_session(lambda spark: spark.range(100000).withColumn("id2", col("id").cast("string")) \
.write.parquet(data_path2))
def do_join(spark):
left = spark.read.parquet(data_path1)
right = spark.read.parquet(data_path2)
return right.filter("cast(id2 as bigint) % 3 = 4").join(left, left.id == right.id, "inner")
assert_gpu_and_cpu_are_equal_collect(do_join, bloom_filter_confs)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/join_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect, assert_gpu_and_cpu_error
from data_gen import *
from datetime import date, datetime, timezone
from marks import ignore_order, incompat, allow_non_gpu
from pyspark.sql.types import *
from spark_session import with_cpu_session, is_before_spark_330, is_before_spark_350
import pyspark.sql.functions as f
# We only support literal intervals for TimeSub
vals = [(-584, 1563), (1943, 1101), (2693, 2167), (2729, 0), (44, 1534), (2635, 3319),
(1885, -2828), (0, 2463), (932, 2286), (0, 0)]
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_timesub(data_gen):
days, seconds = data_gen
assert_gpu_and_cpu_are_equal_collect(
# We are starting at year 0015 to make sure we don't go before year 0001 while doing TimeSub
lambda spark: unary_op_df(spark, TimestampGen(start=datetime(15, 1, 1, tzinfo=timezone.utc)), seed=1)
.selectExpr("a - (interval {} days {} seconds)".format(days, seconds)))
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_timeadd(data_gen):
days, seconds = data_gen
assert_gpu_and_cpu_are_equal_collect(
# We are starting at year 0005 to make sure we don't go before year 0001
# and beyond year 10000 while doing TimeAdd
lambda spark: unary_op_df(spark, TimestampGen(start=datetime(5, 1, 1, tzinfo=timezone.utc), end=datetime(15, 1, 1, tzinfo=timezone.utc)), seed=1)
.selectExpr("a + (interval {} days {} seconds)".format(days, seconds)))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_timeadd_daytime_column():
gen_list = [
# timestamp column max year is 1000
('t', TimestampGen(end=datetime(1000, 1, 1, tzinfo=timezone.utc))),
# max days is 8000 year, so added result will not be out of range
('d', DayTimeIntervalGen(min_value=timedelta(days=0), max_value=timedelta(days=8000 * 365)))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen_list).selectExpr("t + d", "t + INTERVAL '1 02:03:04' DAY TO SECOND"))
@pytest.mark.skipif(is_before_spark_350(), reason='DayTimeInterval overflow check for seconds is not supported before Spark 3.5.0')
def test_interval_seconds_overflow_exception():
assert_gpu_and_cpu_error(
lambda spark : spark.sql(""" select cast("interval '10 01:02:69' day to second" as interval day to second) """).collect(),
conf={},
error_message="IllegalArgumentException")
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_timeadd_from_subquery(data_gen):
def fun(spark):
df = unary_op_df(spark, TimestampGen(start=datetime(5, 1, 1, tzinfo=timezone.utc), end=datetime(15, 1, 1, tzinfo=timezone.utc)), seed=1)
df.createOrReplaceTempView("testTime")
spark.sql("select a, ((select max(a) from testTime) + interval 1 day) as datePlus from testTime").createOrReplaceTempView("testTime2")
return spark.sql("select * from testTime2 where datePlus > current_timestamp")
assert_gpu_and_cpu_are_equal_collect(fun)
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_timesub_from_subquery(data_gen):
def fun(spark):
df = unary_op_df(spark, TimestampGen(start=datetime(5, 1, 1, tzinfo=timezone.utc), end=datetime(15, 1, 1, tzinfo=timezone.utc)), seed=1)
df.createOrReplaceTempView("testTime")
spark.sql("select a, ((select min(a) from testTime) - interval 1 day) as dateMinus from testTime").createOrReplaceTempView("testTime2")
return spark.sql("select * from testTime2 where dateMinus < current_timestamp")
assert_gpu_and_cpu_are_equal_collect(fun)
# Should specify `spark.sql.legacy.interval.enabled` to test `DateAddInterval` after Spark 3.2.0,
# refer to https://issues.apache.org/jira/browse/SPARK-34896
# [SPARK-34896][SQL] Return day-time interval from dates subtraction
# 1. Add the SQL config `spark.sql.legacy.interval.enabled` which will control when Spark SQL should use `CalendarIntervalType` instead of ANSI intervals.
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_dateaddinterval(data_gen):
days, seconds = data_gen
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, DateGen(start=date(200, 1, 1), end=date(800, 1, 1)), seed=1)
.selectExpr('a + (interval {} days {} seconds)'.format(days, seconds),
'a - (interval {} days {} seconds)'.format(days, seconds)),
legacy_interval_enabled_conf)
# test add days(not specify hours, minutes, seconds, milliseconds, microseconds) in ANSI mode.
@pytest.mark.parametrize('data_gen', vals, ids=idfn)
def test_dateaddinterval_ansi(data_gen):
days, _ = data_gen
# only specify the `days`
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, DateGen(start=date(200, 1, 1), end=date(800, 1, 1)), seed=1)
.selectExpr('a + (interval {} days)'.format(days)),
conf=copy_and_update(ansi_enabled_conf, legacy_interval_enabled_conf))
# Throws if add hours, minutes or seconds, milliseconds, microseconds to a date in ANSI mode
def test_dateaddinterval_ansi_exception():
assert_gpu_and_cpu_error(
# specify the `seconds`
lambda spark : unary_op_df(spark, DateGen(start=date(200, 1, 1), end=date(800, 1, 1)), seed=1)
.selectExpr('a + (interval {} days {} seconds)'.format(1, 5)).collect(),
conf=copy_and_update(ansi_enabled_conf, legacy_interval_enabled_conf),
error_message="IllegalArgumentException")
@pytest.mark.parametrize('data_gen', date_gens, ids=idfn)
def test_datediff(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'datediff(a, b)',
'datediff(\'2016-03-02\', b)',
'datediff(date(null), b)',
'datediff(a, date(null))',
'datediff(a, \'2016-03-02\')'))
def test_hour():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, timestamp_gen).selectExpr('hour(a)'))
def test_minute():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, timestamp_gen).selectExpr('minute(a)'))
def test_second():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, timestamp_gen).selectExpr('second(a)'))
def test_quarter():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, date_gen).selectExpr('quarter(a)'))
def test_weekday():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, date_gen).selectExpr('weekday(a)'))
def test_dayofweek():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, date_gen).selectExpr('dayofweek(a)'))
def test_last_day():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, date_gen).selectExpr('last_day(a)'))
# We have to set the upper/lower limit on IntegerGen so the date_add doesn't overflow
# Python uses proleptic gregorian date which extends Gregorian calendar as it always existed and
# always exist in future. When performing date_sub('0001-01-01', 1), it will blow up because python
# doesn't recognize dates before Jan 01, 0001. Samething with date_add('9999-01-01', 1), because
# python doesn't recognize dates after Dec 31, 9999. To get around this problem, we have limited the
# Integer value for days to stay within ~ 200 years or ~70000 days to stay within the legal limits
# python date
days_gen = [ByteGen(), ShortGen(), IntegerGen(min_val=-70000, max_val=70000, special_cases=[-70000, 7000,0,1,-1])]
@pytest.mark.parametrize('data_gen', days_gen, ids=idfn)
def test_dateadd(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, DateGen(start=date(200, 1, 1), end=date(800, 1, 1)), data_gen)
.selectExpr('date_add(a, b)',
'date_add(date(\'2016-03-02\'), b)',
'date_add(date(null), b)',
'date_add(a, cast(null as {}))'.format(string_type),
'date_add(a, cast(24 as {}))'.format(string_type)))
@pytest.mark.parametrize('data_gen', days_gen, ids=idfn)
def test_datesub(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, DateGen(start=date(200, 1, 1), end=date(800, 1, 1)), data_gen)
.selectExpr('date_sub(a, b)',
'date_sub(date(\'2016-03-02\'), b)',
'date_sub(date(null), b)',
'date_sub(a, cast(null as {}))'.format(string_type),
'date_sub(a, cast(24 as {}))'.format(string_type)))
# In order to get a bigger range of values tested for Integer days for date_sub and date_add
# we are casting the output to unix_timestamp. Even that overflows if the integer value is greater
# than 103819094 and less than -109684887 for date('9999-12-31') or greater than 107471152 and less
# than -106032829 for date('0001-01-01') so we have to cap the days values to the lower upper and
# lower ranges.
to_unix_timestamp_days_gen=[ByteGen(), ShortGen(), IntegerGen(min_val=-106032829, max_val=103819094, special_cases=[-106032829, 103819094,0,1,-1])]
@pytest.mark.parametrize('data_gen', to_unix_timestamp_days_gen, ids=idfn)
@incompat
def test_dateadd_with_date_overflow(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, DateGen(),
data_gen).selectExpr('unix_timestamp(date_add(a, b))',
'unix_timestamp(date_add( date(\'2016-03-02\'), b))',
'unix_timestamp(date_add(date(null), b))',
'unix_timestamp(date_add(a, cast(null as {})))'.format(string_type),
'unix_timestamp(date_add(a, cast(24 as {})))'.format(string_type)))
to_unix_timestamp_days_gen=[ByteGen(), ShortGen(), IntegerGen(max_val=106032829, min_val=-103819094, special_cases=[106032829, -103819094,0,1,-1])]
@pytest.mark.parametrize('data_gen', to_unix_timestamp_days_gen, ids=idfn)
@incompat
def test_datesub_with_date_overflow(data_gen):
string_type = to_cast_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : two_col_df(spark, DateGen(),
data_gen).selectExpr('unix_timestamp(date_sub(a, b))',
'unix_timestamp(date_sub( date(\'2016-03-02\'), b))',
'unix_timestamp(date_sub(date(null), b))',
'unix_timestamp(date_sub(a, cast(null as {})))'.format(string_type),
'unix_timestamp(date_sub(a, cast(24 as {})))'.format(string_type)))
@pytest.mark.parametrize('data_gen', date_gens, ids=idfn)
def test_year(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.year(f.col('a'))))
@pytest.mark.parametrize('data_gen', date_gens, ids=idfn)
def test_month(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.month(f.col('a'))))
@pytest.mark.parametrize('data_gen', date_gens, ids=idfn)
def test_dayofmonth(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.dayofmonth(f.col('a'))))
@pytest.mark.parametrize('data_gen', date_gens, ids=idfn)
def test_dayofyear(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.dayofyear(f.col('a'))))
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unix_timestamp(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.unix_timestamp(f.col('a'))))
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unsupported_fallback_unix_timestamp(data_gen):
assert_gpu_fallback_collect(lambda spark: gen_df(
spark, [("a", data_gen), ("b", string_gen)], length=10).selectExpr(
"unix_timestamp(a, b)"),
"UnixTimestamp")
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_to_unix_timestamp(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("to_unix_timestamp(a)"),
{'spark.sql.ansi.enabled': ansi_enabled})
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unsupported_fallback_to_unix_timestamp(data_gen):
assert_gpu_fallback_collect(lambda spark: gen_df(
spark, [("a", data_gen), ("b", string_gen)], length=10).selectExpr(
"to_unix_timestamp(a, b)"),
"ToUnixTimestamp")
@pytest.mark.parametrize('time_zone', ["UTC", "UTC+0", "UTC-0", "GMT", "GMT+0", "GMT-0"], ids=idfn)
@pytest.mark.parametrize('data_gen', [timestamp_gen], ids=idfn)
def test_from_utc_timestamp(data_gen, time_zone):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).select(f.from_utc_timestamp(f.col('a'), time_zone)))
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('time_zone', ["PST", "MST", "EST", "VST", "NST", "AST"], ids=idfn)
@pytest.mark.parametrize('data_gen', [timestamp_gen], ids=idfn)
def test_from_utc_timestamp_unsupported_timezone_fallback(data_gen, time_zone):
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, data_gen).select(f.from_utc_timestamp(f.col('a'), time_zone)),
'FromUTCTimestamp')
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', [timestamp_gen], ids=idfn)
def test_unsupported_fallback_from_utc_timestamp(data_gen):
time_zone_gen = StringGen(pattern="UTC")
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, [("a", data_gen), ("tzone", time_zone_gen)]).selectExpr(
"from_utc_timestamp(a, tzone)"),
'FromUTCTimestamp')
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', [long_gen], ids=idfn)
def test_unsupported_fallback_from_unixtime(data_gen):
fmt_gen = StringGen(pattern="[M]")
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, [("a", data_gen), ("fmt", fmt_gen)]).selectExpr(
"from_unixtime(a, fmt)"),
'FromUnixTime')
@pytest.mark.parametrize('invalid,fmt', [
('2021-01/01', 'yyyy-MM-dd'),
('2021/01-01', 'yyyy/MM/dd'),
('2021/01', 'yyyy-MM'),
('2021-01', 'yyyy/MM'),
('01/02/201', 'dd/MM/yyyy'),
('2021-01-01 00:00', 'yyyy-MM-dd HH:mm:ss'),
('01#01', 'MM-dd'),
('01T01', 'MM/dd'),
('29-02', 'dd-MM'), # 1970-02-29 is invalid
('01-01', 'dd/MM'),
('2021-01', 'MM/yyyy'),
('2021-01', 'MM-yyyy'),
('01-02-2022', 'MM/dd/yyyy'),
('99-01-2022', 'MM-dd-yyyy'),
], ids=idfn)
@pytest.mark.parametrize('parser_policy', ["CORRECTED", "EXCEPTION"], ids=idfn)
@pytest.mark.parametrize('operator', ["to_unix_timestamp", "unix_timestamp", "to_timestamp", "to_date"], ids=idfn)
def test_string_to_timestamp_functions_ansi_invalid(invalid, fmt, parser_policy, operator):
sql = "{operator}(a, '{fmt}')".format(fmt=fmt, operator=operator)
parser_policy_dic = {"spark.sql.legacy.timeParserPolicy": "{}".format(parser_policy)}
def fun(spark):
df = spark.createDataFrame([(invalid,)], "a string")
return df.selectExpr(sql).collect()
assert_gpu_and_cpu_error(fun, conf=copy_and_update(parser_policy_dic, ansi_enabled_conf), error_message="Exception")
@pytest.mark.parametrize('parser_policy', ["CORRECTED", "EXCEPTION"], ids=idfn)
# first get expected string via `date_format`
def test_string_to_timestamp_functions_ansi_valid(parser_policy):
expr_format = "{operator}(date_format(a, '{fmt}'), '{fmt}')"
formats = ['yyyy-MM-dd', 'yyyy/MM/dd', 'yyyy-MM', 'yyyy/MM', 'dd/MM/yyyy', 'yyyy-MM-dd HH:mm:ss',
'MM-dd', 'MM/dd', 'dd-MM', 'dd/MM', 'MM/yyyy', 'MM-yyyy', 'MM/dd/yyyy', 'MM-dd-yyyy']
operators = ["to_unix_timestamp", "unix_timestamp", "to_timestamp", "to_date"]
format_operator_pairs = [(fmt, operator) for fmt in formats for operator in operators]
expr_list = [expr_format.format(operator=operator, fmt=fmt) for (fmt, operator) in format_operator_pairs]
parser_policy_dic = {"spark.sql.legacy.timeParserPolicy": "{}".format(parser_policy)}
def fun(spark):
df = spark.createDataFrame([(datetime(1970, 8, 12, tzinfo=timezone.utc),)], "a timestamp")
return df.selectExpr(expr_list)
assert_gpu_and_cpu_are_equal_collect(fun, conf=copy_and_update(parser_policy_dic, ansi_enabled_conf))
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unix_timestamp_improved(data_gen, ansi_enabled):
conf = {"spark.rapids.sql.improvedTimeOps.enabled": "true",
"spark.sql.legacy.timeParserPolicy": "CORRECTED"}
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.unix_timestamp(f.col('a'))),
copy_and_update({'spark.sql.ansi.enabled': ansi_enabled}, conf))
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unix_timestamp(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.unix_timestamp(f.col("a"))),
{'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_to_unix_timestamp_improved(data_gen, ansi_enabled):
conf = {"spark.rapids.sql.improvedTimeOps.enabled": "true"}
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("to_unix_timestamp(a)"),
copy_and_update({'spark.sql.ansi.enabled': ansi_enabled}, conf))
str_date_and_format_gen = [pytest.param(StringGen('[0-9]{4}/[01][0-9]'),'yyyy/MM', marks=pytest.mark.xfail(reason="cudf does no checks")),
(StringGen('[0-9]{4}/[01][12]/[0-2][1-8]'),'yyyy/MM/dd'),
(StringGen('[01][12]/[0-2][1-8]'), 'MM/dd'),
(StringGen('[0-2][1-8]/[01][12]'), 'dd/MM'),
(ConvertGen(DateGen(nullable=False), lambda d: d.strftime('%Y/%m').zfill(7), data_type=StringType()), 'yyyy/MM')]
# get invalid date string df
def invalid_date_string_df(spark):
return spark.createDataFrame([['invalid_date_string']], "a string")
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen,date_form', str_date_and_format_gen, ids=idfn)
def test_string_to_unix_timestamp(data_gen, date_form, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen, seed=1).selectExpr("to_unix_timestamp(a, '{}')".format(date_form)),
{'spark.sql.ansi.enabled': ansi_enabled})
def test_string_to_unix_timestamp_ansi_exception():
assert_gpu_and_cpu_error(
lambda spark : invalid_date_string_df(spark).selectExpr("to_unix_timestamp(a, '{}')".format('yyyy/MM/dd')).collect(),
error_message="Exception",
conf=ansi_enabled_conf)
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('data_gen,date_form', str_date_and_format_gen, ids=idfn)
def test_string_unix_timestamp(data_gen, date_form, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen, seed=1).select(f.unix_timestamp(f.col('a'), date_form)),
{'spark.sql.ansi.enabled': ansi_enabled})
def test_string_unix_timestamp_ansi_exception():
assert_gpu_and_cpu_error(
lambda spark : invalid_date_string_df(spark).select(f.unix_timestamp(f.col('a'), 'yyyy/MM/dd')).collect(),
error_message="Exception",
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [StringGen('200[0-9]-0[1-9]-[0-2][1-8]')], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
def test_gettimestamp(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.to_date(f.col("a"), "yyyy-MM-dd")),
{'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('data_gen', [StringGen('0[1-9]200[0-9]')], ids=idfn)
def test_gettimestamp_format_MMyyyy(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).select(f.to_date(f.col("a"), "MMyyyy")))
def test_gettimestamp_ansi_exception():
assert_gpu_and_cpu_error(
lambda spark : invalid_date_string_df(spark).select(f.to_date(f.col("a"), "yyyy-MM-dd")).collect(),
error_message="Exception",
conf=ansi_enabled_conf)
supported_date_formats = ['yyyy-MM-dd', 'yyyy-MM', 'yyyy/MM/dd', 'yyyy/MM', 'dd/MM/yyyy',
'MM-dd', 'MM/dd', 'dd-MM', 'dd/MM']
@pytest.mark.parametrize('date_format', supported_date_formats, ids=idfn)
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_date_format(data_gen, date_format):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("date_format(a, '{}')".format(date_format)))
unsupported_date_formats = ['F']
@pytest.mark.parametrize('date_format', unsupported_date_formats, ids=idfn)
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
@allow_non_gpu('ProjectExec')
def test_date_format_f(data_gen, date_format):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("date_format(a, '{}')".format(date_format)),
'DateFormatClass')
@pytest.mark.parametrize('date_format', unsupported_date_formats, ids=idfn)
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
@allow_non_gpu('ProjectExec')
def test_date_format_f_incompat(data_gen, date_format):
# note that we can't support it even with incompatibleDateFormats enabled
conf = {"spark.rapids.sql.incompatibleDateFormats.enabled": "true"}
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("date_format(a, '{}')".format(date_format)),
'DateFormatClass', conf)
maybe_supported_date_formats = ['dd-MM-yyyy', 'yyyy-MM-dd HH:mm:ss.SSS', 'yyyy-MM-dd HH:mm:ss.SSSSSS']
@pytest.mark.parametrize('date_format', maybe_supported_date_formats, ids=idfn)
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
@allow_non_gpu('ProjectExec')
def test_date_format_maybe(data_gen, date_format):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("date_format(a, '{}')".format(date_format)),
'DateFormatClass')
@pytest.mark.parametrize('date_format', maybe_supported_date_formats, ids=idfn)
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_date_format_maybe_incompat(data_gen, date_format):
conf = {"spark.rapids.sql.incompatibleDateFormats.enabled": "true"}
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("date_format(a, '{}')".format(date_format)), conf)
# Reproduce conditions for https://github.com/NVIDIA/spark-rapids/issues/5670
# where we had a failure due to GpuCast canonicalization with timezone.
# In this case it was doing filter after project, the way I get that to happen is by adding in the
# input_file_name(), otherwise filter happens before project.
@allow_non_gpu('CollectLimitExec,FileSourceScanExec,DeserializeToObjectExec')
@ignore_order()
def test_date_format_mmyyyy_cast_canonicalization(spark_tmp_path):
data_path = spark_tmp_path + '/CSV_DATA'
gen = StringGen(pattern='[0][0-9][1][8-9][1-9][1-9]', nullable=False)
schema = gen.data_type
with_cpu_session(lambda spark : gen_df(spark, gen, length=100).write.csv(data_path))
def do_join_cast(spark):
left = spark.read.csv(data_path)\
.selectExpr("date_format(to_date(_c0, 'MMyyyy'), 'MM/dd/yyyy') as monthly_reporting_period", "substring_index(substring_index(input_file_name(),'/',-1),'.',1) as filename")
right = spark.read.csv(data_path).withColumnRenamed("_c0", "r_c0")\
.selectExpr("date_format(to_date(r_c0, 'MMyyyy'), 'MM/dd/yyyy') as monthly_reporting_period", "substring_index(substring_index(input_file_name(),'/',-1),'.',1) as filename")\
.withColumnRenamed("monthly_reporting_period", "r_monthly_reporting_period")\
.withColumnRenamed("filename", "r_filename")
return left.join(right, left.monthly_reporting_period == right.r_monthly_reporting_period, how='inner')
assert_gpu_and_cpu_are_equal_collect(do_join_cast)
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('data_gen', date_n_time_gens, ids=idfn)
def test_unsupported_fallback_date_format(data_gen):
conf = {"spark.rapids.sql.incompatibleDateFormats.enabled": "true"}
assert_gpu_fallback_collect(
lambda spark : gen_df(spark, [("a", data_gen)]).selectExpr(
"date_format(a, a)"),
"DateFormatClass",
conf)
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_to_date():
date_gen = StringGen(pattern="2023-08-01")
pattern_gen = StringGen(pattern="[M]")
conf = {"spark.rapids.sql.incompatibleDateFormats.enabled": "true"}
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, [("a", date_gen), ("b", pattern_gen)]).selectExpr(
"to_date(a, b)"),
'GetTimestamp',
conf)
# (-62135510400, 253402214400) is the range of seconds that can be represented by timestamp_seconds
# considering the influence of time zone.
ts_float_gen = SetValuesGen(FloatType(), [0.0, -0.0, 1.0, -1.0, 1.234567, -1.234567, 16777215.0, float('inf'), float('-inf'), float('nan')])
seconds_gens = [LongGen(min_val=-62135510400, max_val=253402214400), IntegerGen(), ShortGen(), ByteGen(),
DoubleGen(min_exp=0, max_exp=32), ts_float_gen, DecimalGen(16, 6), DecimalGen(13, 3), DecimalGen(10, 0), DecimalGen(7, -3), DecimalGen(6, 6)]
@pytest.mark.parametrize('data_gen', seconds_gens, ids=idfn)
def test_timestamp_seconds(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("timestamp_seconds(a)"))
def test_timestamp_seconds_long_overflow():
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, long_gen).selectExpr("timestamp_seconds(a)").collect(),
conf={},
error_message='long overflow')
@pytest.mark.parametrize('data_gen', [DecimalGen(7, 7), DecimalGen(20, 7)], ids=idfn)
def test_timestamp_seconds_rounding_necessary(data_gen):
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr("timestamp_seconds(a)").collect(),
conf={},
error_message='Rounding necessary')
@pytest.mark.parametrize('data_gen', [DecimalGen(19, 6), DecimalGen(20, 6)], ids=idfn)
def test_timestamp_seconds_decimal_overflow(data_gen):
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr("timestamp_seconds(a)").collect(),
conf={},
error_message='Overflow')
millis_gens = [LongGen(min_val=-62135510400000, max_val=253402214400000), IntegerGen(), ShortGen(), ByteGen()]
@pytest.mark.parametrize('data_gen', millis_gens, ids=idfn)
def test_timestamp_millis(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("timestamp_millis(a)"))
def test_timestamp_millis_long_overflow():
assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, long_gen).selectExpr("timestamp_millis(a)").collect(),
conf={},
error_message='long overflow')
micros_gens = [LongGen(min_val=-62135510400000000, max_val=253402214400000000), IntegerGen(), ShortGen(), ByteGen()]
@pytest.mark.parametrize('data_gen', micros_gens, ids=idfn)
def test_timestamp_micros(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr("timestamp_micros(a)"))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/date_time_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql, assert_gpu_and_cpu_error, assert_gpu_fallback_collect, assert_spark_exception
from data_gen import *
from spark_session import is_before_spark_320, is_before_spark_330, is_spark_340_or_later, is_spark_350_or_later, \
is_databricks113_or_later, with_gpu_session
from marks import allow_non_gpu, approximate_float
from pyspark.sql.types import *
from spark_init_internal import spark_version
from datetime import datetime
import math
_decimal_gen_36_5 = DecimalGen(precision=36, scale=5)
def test_cast_empty_string_to_int():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, StringGen(pattern="")).selectExpr(
'CAST(a as BYTE)',
'CAST(a as SHORT)',
'CAST(a as INTEGER)',
'CAST(a as LONG)'))
# These tests are not intended to be exhaustive. The scala test CastOpSuite should cover
# just about everything for non-nested values. This is intended to check that the
# recursive code in nested type checks, like arrays, is working properly. So we are going
# pick child types that are simple to cast. Upcasting integer values and casting them to strings
@pytest.mark.parametrize('data_gen,to_type', [
(ArrayGen(byte_gen), ArrayType(IntegerType())),
(ArrayGen(_decimal_gen_36_5), ArrayType(DecimalType(38, 5))),
(ArrayGen(StringGen('[0-9]{1,5}')), ArrayType(IntegerType())),
(ArrayGen(byte_gen), ArrayType(StringType())),
(ArrayGen(byte_gen), ArrayType(DecimalType(6, 2))),
(ArrayGen(ArrayGen(byte_gen)), ArrayType(ArrayType(IntegerType()))),
(ArrayGen(ArrayGen(byte_gen)), ArrayType(ArrayType(StringType()))),
(ArrayGen(ArrayGen(byte_gen)), ArrayType(ArrayType(DecimalType(6, 2)))),
(StructGen([('a', byte_gen)]), StructType([StructField('a', IntegerType())])),
(StructGen([('a', _decimal_gen_36_5)]), StructType([StructField('a', DecimalType(38, 5))])),
(StructGen([('a', byte_gen), ('c', short_gen)]), StructType([StructField('b', IntegerType()), StructField('c', ShortType())])),
(StructGen([('a', ArrayGen(byte_gen)), ('c', short_gen)]), StructType([StructField('a', ArrayType(IntegerType())), StructField('c', LongType())])),
(ArrayGen(StructGen([('a', byte_gen), ('b', byte_gen)])), ArrayType(StringType())),
(MapGen(ByteGen(nullable=False), byte_gen), MapType(StringType(), StringType())),
(MapGen(ByteGen(nullable=False), _decimal_gen_36_5), MapType(StringType(), DecimalType(38, 5))),
(MapGen(ShortGen(nullable=False), ArrayGen(byte_gen)), MapType(IntegerType(), ArrayType(ShortType()))),
(MapGen(ShortGen(nullable=False), ArrayGen(StructGen([('a', byte_gen)]))), MapType(IntegerType(), ArrayType(StructType([StructField('b', ShortType())]))))
], ids=idfn)
def test_cast_nested(data_gen, to_type):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').cast(to_type)))
def test_cast_string_date_valid_format():
# In Spark 3.2.0+ the valid format changed, and we cannot support all of the format.
# This provides values that are valid in all of those formats.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, StringGen('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}')).select(f.col('a').cast(DateType())),
conf = {'spark.rapids.sql.hasExtendedYearValues': 'false'})
invalid_values_string_to_date = ['200', ' 1970A', '1970 A', '1970T', # not conform to "yyyy" after trim
'1970 T', ' 1970-01T', '1970-01 A', # not conform to "yyyy-[M]M" after trim
# not conform to 'yyyy-[M]M-[d]d', "yyyy-[M]M-[d]d *" or "yyyy-[M]M-[d]d T*" after trim
'1970-01-01A',
'2022-02-29', # nonexistent day
'200-1-1', # 200 not conform to 'YYYY'
'2001-13-1', # nonexistent day
'2001-1-32', # nonexistent day
'not numbers',
'666666666'
]
valid_values_string_to_date = ['2001', ' 2001 ', '1970-01', ' 1970-1 ',
'1970-1-01', ' 1970-10-5 ', ' 2001-10-16 ', # 'yyyy-[M]M-[d]d' after trim
'1970-01-01T', '1970-01-01T-no_impact', # "yyyy-[M]M-[d]d T*" after trim
' 1970-01-01 A', '1970-01-01 B ' # "yyyy-[M]M-[d]d *" after trim
]
values_string_to_data = invalid_values_string_to_date + valid_values_string_to_date
# Spark 320+ and databricks support Ansi mode when casting string to date
# This means an exception will be thrown when casting invalid string to date on Spark 320+ or databricks
# test Spark versions < 3.2.0 and non databricks, ANSI mode
@pytest.mark.skipif(not is_before_spark_320(), reason="ansi cast(string as date) throws exception only in 3.2.0+ or db")
def test_cast_string_date_invalid_ansi_before_320():
data_rows = [(v,) for v in values_string_to_data]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.createDataFrame(data_rows, "a string").select(f.col('a').cast(DateType())),
conf={'spark.rapids.sql.hasExtendedYearValues': 'false',
'spark.sql.ansi.enabled': 'true'}, )
# test Spark versions >= 320 and databricks, ANSI mode, valid values
@pytest.mark.skipif(is_before_spark_320(), reason="Spark versions(< 320) not support Ansi mode when casting string to date")
def test_cast_string_date_valid_ansi():
data_rows = [(v,) for v in valid_values_string_to_date]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.createDataFrame(data_rows, "a string").select(f.col('a').cast(DateType())),
conf={'spark.rapids.sql.hasExtendedYearValues': 'false',
'spark.sql.ansi.enabled': 'true'})
# test Spark versions >= 320, ANSI mode
@pytest.mark.skipif(is_before_spark_320(), reason="ansi cast(string as date) throws exception only in 3.2.0+")
@pytest.mark.parametrize('invalid', invalid_values_string_to_date)
def test_cast_string_date_invalid_ansi(invalid):
assert_gpu_and_cpu_error(
lambda spark: spark.createDataFrame([(invalid,)], "a string").select(f.col('a').cast(DateType())).collect(),
conf={'spark.rapids.sql.hasExtendedYearValues': 'false',
'spark.sql.ansi.enabled': 'true'},
error_message="DateTimeException")
# test try_cast in Spark versions >= 320 and < 340
@pytest.mark.skipif(is_before_spark_320() or is_spark_340_or_later() or is_databricks113_or_later(), reason="try_cast only in Spark 3.2+")
@allow_non_gpu('ProjectExec', 'TryCast')
@pytest.mark.parametrize('invalid', invalid_values_string_to_date)
def test_try_cast_fallback(invalid):
assert_gpu_fallback_collect(
lambda spark: spark.createDataFrame([(invalid,)], "a string").selectExpr("try_cast(a as date)"),
'TryCast',
conf={'spark.rapids.sql.hasExtendedYearValues': False,
'spark.sql.ansi.enabled': True})
# test try_cast in Spark versions >= 340
@pytest.mark.skipif(not (is_spark_340_or_later() or is_databricks113_or_later()), reason="Cast with EvalMode only in Spark 3.4+")
@allow_non_gpu('ProjectExec','Cast')
@pytest.mark.parametrize('invalid', invalid_values_string_to_date)
def test_try_cast_fallback_340(invalid):
assert_gpu_fallback_collect(
lambda spark: spark.createDataFrame([(invalid,)], "a string").selectExpr("try_cast(a as date)"),
'Cast',
conf={'spark.rapids.sql.hasExtendedYearValues': False,
'spark.sql.ansi.enabled': True})
# test all Spark versions, non ANSI mode, invalid value will be converted to NULL
def test_cast_string_date_non_ansi():
data_rows = [(v,) for v in values_string_to_data]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.createDataFrame(data_rows, "a string").select(f.col('a').cast(DateType())),
conf={'spark.rapids.sql.hasExtendedYearValues': 'false'})
@pytest.mark.parametrize('data_gen', [StringGen('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}'),
StringGen('[0-9]{1,4}-[0-3][0-9]-[0-5][0-9][ |T][0-3][0-9]:[0-6][0-9]:[0-6][0-9]'),
StringGen('[0-9]{1,4}-[0-3][0-9]-[0-5][0-9][ |T][0-3][0-9]:[0-6][0-9]:[0-6][0-9].[0-9]{0,6}Z?')],
ids=idfn)
def test_cast_string_ts_valid_format(data_gen):
# In Spark 3.2.0+ the valid format changed, and we cannot support all of the format.
# This provides values that are valid in all of those formats.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').cast(TimestampType())),
conf = {'spark.rapids.sql.hasExtendedYearValues': 'false',
'spark.rapids.sql.castStringToTimestamp.enabled': 'true'})
@allow_non_gpu('ProjectExec', 'Cast', 'Alias')
@pytest.mark.skipif(is_before_spark_320(), reason="Only in Spark 3.2.0+ do we have issues with extended years")
def test_cast_string_date_fallback():
assert_gpu_fallback_collect(
# Cast back to String because this goes beyond what python can support for years
lambda spark : unary_op_df(spark, StringGen('([0-9]|-|\\+){4,12}')).select(f.col('a').cast(DateType()).cast(StringType())),
'Cast')
@allow_non_gpu('ProjectExec', 'Cast', 'Alias')
@pytest.mark.skipif(is_before_spark_320(), reason="Only in Spark 3.2.0+ do we have issues with extended years")
def test_cast_string_timestamp_fallback():
assert_gpu_fallback_collect(
# Cast back to String because this goes beyond what python can support for years
lambda spark : unary_op_df(spark, StringGen('([0-9]|-|\\+){4,12}')).select(f.col('a').cast(TimestampType()).cast(StringType())),
'Cast',
conf = {'spark.rapids.sql.castStringToTimestamp.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_gen', [
decimal_gen_32bit, decimal_gen_32bit_neg_scale, DecimalGen(precision=7, scale=7),
decimal_gen_64bit, decimal_gen_128bit, DecimalGen(precision=30, scale=2),
DecimalGen(precision=36, scale=5), DecimalGen(precision=38, scale=0),
DecimalGen(precision=38, scale=10), DecimalGen(precision=36, scale=-5),
DecimalGen(precision=38, scale=-10)], ids=meta_idfn('from:'))
@pytest.mark.parametrize('to_type', [ByteType(), ShortType(), IntegerType(), LongType(), FloatType(), DoubleType(), StringType()], ids=meta_idfn('to:'))
def test_cast_decimal_to(data_gen, to_type):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').cast(to_type), f.col('a')),
conf = {'spark.rapids.sql.castDecimalToFloat.enabled': 'true'})
@approximate_float
@pytest.mark.parametrize('data_gen', [
decimal_gen_32bit, decimal_gen_32bit_neg_scale, DecimalGen(precision=7, scale=7),
decimal_gen_64bit, decimal_gen_128bit, DecimalGen(precision=30, scale=2),
DecimalGen(precision=36, scale=5), DecimalGen(precision=38, scale=0),
DecimalGen(precision=38, scale=10), DecimalGen(precision=36, scale=-5),
DecimalGen(precision=38, scale=-10)], ids=meta_idfn('from:'))
@pytest.mark.parametrize('to_type', [FloatType(), DoubleType(), StringType()], ids=meta_idfn('to:'))
def test_ansi_cast_decimal_to(data_gen, to_type):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').cast(to_type), f.col('a')),
conf = {'spark.rapids.sql.castDecimalToFloat.enabled': True,
'spark.sql.ansi.enabled': True})
@pytest.mark.parametrize('data_gen', [
DecimalGen(7, 1),
DecimalGen(9, 9),
DecimalGen(15, 2),
DecimalGen(15, 15),
DecimalGen(30, 3),
DecimalGen(5, -3),
DecimalGen(3, 0)], ids=meta_idfn('from:'))
@pytest.mark.parametrize('to_type', [
DecimalType(9, 0),
DecimalType(17, 2),
DecimalType(35, 4),
DecimalType(30, -4),
DecimalType(38, -10),
DecimalType(1, -1)], ids=meta_idfn('to:'))
def test_cast_decimal_to_decimal(data_gen, to_type):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(f.col('a').cast(to_type), f.col('a')))
@pytest.mark.parametrize('data_gen', [byte_gen, short_gen, int_gen, long_gen], ids=idfn)
@pytest.mark.parametrize('to_type', [
DecimalType(2, 0),
DecimalType(3, 0),
DecimalType(5, 0),
DecimalType(7, 2),
DecimalType(10, 0),
DecimalType(10, 2),
DecimalType(18, 0),
DecimalType(18, 2)], ids=idfn)
def test_cast_integral_to_decimal(data_gen, to_type):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).select(
f.col('a').cast(to_type)))
def test_cast_byte_to_decimal_overflow():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, byte_gen).select(
f.col('a').cast(DecimalType(2, -1))))
def test_cast_short_to_decimal_overflow():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, short_gen).select(
f.col('a').cast(DecimalType(4, -1))))
def test_cast_int_to_decimal_overflow():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, int_gen).select(
f.col('a').cast(DecimalType(9, -1))))
def test_cast_long_to_decimal_overflow():
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, long_gen).select(
f.col('a').cast(DecimalType(18, -1))))
# casting these types to string should be passed
basic_gens_for_cast_to_string = [ByteGen, ShortGen, IntegerGen, LongGen, StringGen, BooleanGen, DateGen, TimestampGen]
basic_array_struct_gens_for_cast_to_string = [f() for f in basic_gens_for_cast_to_string] + [null_gen] + decimal_gens
# We currently do not generate the exact string as Spark for some decimal values of zero
# https://github.com/NVIDIA/spark-rapids/issues/6339
basic_map_gens_for_cast_to_string = [
MapGen(f(nullable=False), f()) for f in basic_gens_for_cast_to_string] + [
MapGen(DecimalGen(nullable=False),
DecimalGen(precision=7, scale=3)),
MapGen(DecimalGen(precision=7, scale=7, nullable=False),
DecimalGen(precision=12, scale=2))]
# GPU does not match CPU to casting these types to string, marked as xfail when testing
not_matched_gens_for_cast_to_string = [FloatGen, DoubleGen]
not_matched_struct_array_gens_for_cast_to_string = [f() for f in not_matched_gens_for_cast_to_string] + [decimal_gen_32bit_neg_scale]
not_matched_map_gens_for_cast_to_string = [MapGen(f(nullable = False), f()) for f in not_matched_gens_for_cast_to_string] + [MapGen(DecimalGen(precision=7, scale=-3, nullable=False), DecimalGen())]
single_level_array_gens_for_cast_to_string = [ArrayGen(sub_gen) for sub_gen in basic_array_struct_gens_for_cast_to_string]
nested_array_gens_for_cast_to_string = [
ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(null_gen, max_length=10), max_length=10),
ArrayGen(MapGen(ByteGen(nullable=False), DateGen()), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', date_gen]]))
]
all_array_gens_for_cast_to_string = single_level_array_gens_for_cast_to_string + nested_array_gens_for_cast_to_string
def _assert_cast_to_string_equal (data_gen, conf):
"""
helper function for casting to string of supported type
"""
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).select(f.col('a').cast("STRING")),
conf
)
@pytest.mark.parametrize('data_gen', all_array_gens_for_cast_to_string, ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(condition=is_spark_350_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/9065')
def test_cast_array_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.sql.legacy.castComplexTypesToString.enabled": legacy})
@pytest.mark.parametrize('data_gen', [ArrayGen(sub) for sub in not_matched_struct_array_gens_for_cast_to_string], ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(reason='casting this type to string is not exact match')
def test_cast_array_with_unmatched_element_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.rapids.sql.castFloatToString.enabled" : "true",
"spark.sql.legacy.castComplexTypesToString.enabled": legacy}
)
@pytest.mark.parametrize('data_gen', basic_map_gens_for_cast_to_string, ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(condition=is_spark_350_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/9065')
def test_cast_map_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.sql.legacy.castComplexTypesToString.enabled": legacy})
@pytest.mark.parametrize('data_gen', not_matched_map_gens_for_cast_to_string, ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(reason='casting this type to string is not exact match')
def test_cast_map_with_unmatched_element_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.rapids.sql.castFloatToString.enabled" : "true",
"spark.sql.legacy.castComplexTypesToString.enabled": legacy}
)
@pytest.mark.parametrize('data_gen', [StructGen([[str(i), gen] for i, gen in enumerate(basic_array_struct_gens_for_cast_to_string)] + [["map", MapGen(ByteGen(nullable=False), null_gen)]])], ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(condition=is_spark_350_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/9065')
def test_cast_struct_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.sql.legacy.castComplexTypesToString.enabled": legacy}
)
# https://github.com/NVIDIA/spark-rapids/issues/2309
@pytest.mark.parametrize('cast_conf', ['LEGACY', 'SPARK311+'])
def test_one_nested_null_field_legacy_cast(cast_conf):
def was_broken_for_nested_null(spark):
data = [
(('foo',),),
((None,),),
(None,)
]
df = spark.createDataFrame(data)
return df.select(df._1.cast(StringType()))
assert_gpu_and_cpu_are_equal_collect(
was_broken_for_nested_null,
{"spark.sql.legacy.castComplexTypesToString.enabled": 'true' if cast_conf == 'LEGACY' else 'false'}
)
# https://github.com/NVIDIA/spark-rapids/issues/2315
@pytest.mark.parametrize('cast_conf', ['LEGACY', 'SPARK311+'])
def test_two_col_struct_legacy_cast(cast_conf):
def broken_df(spark):
key_data_gen = StructGen([
('a', IntegerGen(min_val=0, max_val=4)),
('b', IntegerGen(min_val=5, max_val=9)),
], nullable=False)
val_data_gen = IntegerGen()
df = two_col_df(spark, key_data_gen, val_data_gen)
return df.select(df.a.cast(StringType())).filter(df.b > 1)
assert_gpu_and_cpu_are_equal_collect(
broken_df,
{"spark.sql.legacy.castComplexTypesToString.enabled": 'true' if cast_conf == 'LEGACY' else 'false'}
)
@pytest.mark.parametrize('data_gen', [StructGen([["first", element_gen]]) for element_gen in not_matched_struct_array_gens_for_cast_to_string], ids=idfn)
@pytest.mark.parametrize('legacy', ['true', 'false'])
@pytest.mark.xfail(reason='casting this type to string is not an exact match')
def test_cast_struct_with_unmatched_element_to_string(data_gen, legacy):
_assert_cast_to_string_equal(
data_gen,
{"spark.rapids.sql.castFloatToString.enabled" : "true",
"spark.sql.legacy.castComplexTypesToString.enabled": legacy}
)
# The bug SPARK-37451 only affects the following versions
def is_neg_dec_scale_bug_version():
return ("3.1.1" <= spark_version() < "3.1.3") or ("3.2.0" <= spark_version() < "3.2.1")
@pytest.mark.skipif(is_neg_dec_scale_bug_version(), reason="RAPIDS doesn't support casting string to decimal for negative scale decimal in this version of Spark because of SPARK-37451")
def test_cast_string_to_negative_scale_decimal():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, StringGen("[0-9]{9}")).select(
f.col('a').cast(DecimalType(8, -3))))
@pytest.mark.skipif(is_before_spark_330(), reason="ansi cast throws exception only in 3.3.0+")
@pytest.mark.parametrize('type', [DoubleType(), FloatType()], ids=idfn)
@pytest.mark.parametrize('invalid_value', [float("inf"), float("-inf"), float("nan")])
def test_cast_float_to_timestamp_ansi_for_nan_inf(type, invalid_value):
def fun(spark):
data = [invalid_value]
df = spark.createDataFrame(data, type)
return df.select(f.col('value').cast(TimestampType())).collect()
assert_gpu_and_cpu_error(fun, {"spark.sql.ansi.enabled": True}, "SparkDateTimeException")
# if float.floor > Long.max or float.ceil < Long.min, throw exception
@pytest.mark.skipif(is_before_spark_330(), reason="ansi cast throws exception only in 3.3.0+")
@pytest.mark.parametrize('type', [DoubleType(), FloatType()], ids=idfn)
@pytest.mark.parametrize('invalid_value', [float(LONG_MAX) + 100, float(LONG_MIN) - 100])
def test_cast_float_to_timestamp_ansi_overflow(type, invalid_value):
def fun(spark):
data = [invalid_value]
df = spark.createDataFrame(data, type)
return df.select(f.col('value').cast(TimestampType())).collect()
assert_gpu_and_cpu_error(fun, {"spark.sql.ansi.enabled": True}, "ArithmeticException")
@pytest.mark.skipif(is_before_spark_330(), reason='330+ throws exception in ANSI mode')
def test_cast_float_to_timestamp_side_effect():
def getDf(spark):
data = [(True, float(LONG_MAX) + 100), (False, float(1))]
distData = spark.sparkContext.parallelize(data, 1)
return spark.createDataFrame(distData, "c_b boolean, c_f float")
assert_gpu_and_cpu_are_equal_collect(
lambda spark: getDf(spark).selectExpr("if(c_b, cast(0 as timestamp), cast(c_f as timestamp))"),
conf=ansi_enabled_conf)
# non ansi mode, will get null
@pytest.mark.parametrize('type', [DoubleType(), FloatType()], ids=idfn)
def test_cast_float_to_timestamp_for_nan_inf(type):
def fun(spark):
data = [(float("inf"),), (float("-inf"),), (float("nan"),)]
schema = StructType([StructField("value", type, True)])
df = spark.createDataFrame(data, schema)
return df.select(f.col('value').cast(TimestampType()))
assert_gpu_and_cpu_are_equal_collect(fun)
# gen for casting long to timestamp, range is about in [0000, 9999]
long_gen_to_timestamp = LongGen(max_val=math.floor((9999-1970) * 365 * 86400),
min_val=-math.floor(1970 * 365 * 86400))
# the overflow case of `cast(long to timestamp)` is move to `TimestampSuite`
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
@pytest.mark.parametrize('gen', [
byte_gen,
short_gen,
int_gen,
long_gen_to_timestamp], ids=idfn)
def test_cast_integral_to_timestamp(gen, ansi_enabled):
if(is_before_spark_330() and ansi_enabled): # 330- does not support in ANSI mode
pytest.skip()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr("cast(a as timestamp)"),
conf={"spark.sql.ansi.enabled": ansi_enabled})
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
def test_cast_float_to_timestamp(ansi_enabled):
if(is_before_spark_330() and ansi_enabled): # 330- does not support in ANSI mode
pytest.skip()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, long_gen_to_timestamp)
.selectExpr("cast(cast(a as float) as timestamp)"),
conf={"spark.sql.ansi.enabled": ansi_enabled})
@pytest.mark.parametrize('ansi_enabled', [True, False], ids=['ANSI_ON', 'ANSI_OFF'])
def test_cast_double_to_timestamp(ansi_enabled):
if (is_before_spark_330() and ansi_enabled): # 330- does not support in ANSI mode
pytest.skip()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, long_gen_to_timestamp)
.selectExpr("cast(cast(a as double) as timestamp)"),
conf={"spark.sql.ansi.enabled": ansi_enabled})
@pytest.mark.parametrize('invalid_and_type', [
(BYTE_MAX + 1, ByteType()),
(BYTE_MIN - 1, ByteType()),
(SHORT_MAX + 1, ShortType()),
(SHORT_MIN - 1, ShortType()),
(INT_MAX + 1, IntegerType()),
(INT_MIN - 1, IntegerType()),
], ids=idfn)
@pytest.mark.skipif(is_before_spark_330(), reason="Spark 330- does not ansi casting between numeric and timestamp")
def test_cast_timestamp_to_integral_ansi_overflow(invalid_and_type):
(invalid, to_type) = invalid_and_type
assert_gpu_and_cpu_error(
# pass seconds to `datetime.fromtimestamp`
lambda spark: spark.createDataFrame([datetime.fromtimestamp(invalid)], TimestampType())
.select(f.col("value").cast(to_type)).collect(),
conf=ansi_enabled_conf,
error_message="overflow")
@pytest.mark.skipif(is_before_spark_330(), reason="Spark 330- does not ansi casting between numeric and timestamp")
def test_cast_timestamp_to_numeric_ansi_no_overflow():
data = [datetime.fromtimestamp(i) for i in range(BYTE_MIN, BYTE_MAX + 1)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.createDataFrame(data, TimestampType())
.selectExpr("cast(value as byte)", "cast(value as short)", "cast(value as int)", "cast(value as long)",
"cast(value as float)", "cast(value as double)"),
conf=ansi_enabled_conf)
def test_cast_timestamp_to_numeric_non_ansi():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, timestamp_gen)
.selectExpr("cast(a as byte)", "cast(a as short)", "cast(a as int)", "cast(a as long)",
"cast(a as float)", "cast(a as double)"))
def test_cast_timestamp_to_string():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, timestamp_gen)
.selectExpr("cast(a as string)"))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_cast_day_time_interval_to_string():
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='day', end_field='day', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='day', end_field='hour', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='day', end_field='minute', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='day', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='hour', end_field='hour', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='hour', end_field='minute', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='hour', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='minute', end_field='minute', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='minute', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
_assert_cast_to_string_equal(DayTimeIntervalGen(start_field='second', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)]), {})
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_cast_string_to_day_time_interval():
gen = DayTimeIntervalGen(start_field='day', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)])
dtType = DayTimeIntervalType(0, 3) # 0 is day; 3 is second
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(f.col('a').cast(StringType()).cast(dtType)))
gen = DayTimeIntervalGen(start_field='hour', end_field='second', special_cases=[MIN_DAY_TIME_INTERVAL, MAX_DAY_TIME_INTERVAL, timedelta(seconds=0)])
dtType = DayTimeIntervalType(1, 3) # 1 is hour; 3 is second
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).select(f.col('a').cast(StringType()).cast(dtType)))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('invalid_string', [
"INTERVAL 'xxx' DAY TO SECOND", # invalid format
"-999999999 04:00:54.775808000" # exceeds min value, min value is "-106751991 04:00:54.775808000"
])
def test_cast_string_to_day_time_interval_exception(invalid_string):
dtType = DayTimeIntervalType(0, 3)
def fun(spark):
data=[invalid_string]
df = spark.createDataFrame(data, StringType())
return df.select(f.col('value').cast(dtType)).collect()
assert_gpu_and_cpu_error(fun, {}, "java.lang.IllegalArgumentException")
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
def test_cast_day_time_interval_to_integral_no_overflow():
second_dt_gen = DayTimeIntervalGen(start_field='second', end_field='second', min_value=timedelta(seconds=-128), max_value=timedelta(seconds=127), nullable=False)
gen = StructGen([('a', DayTimeIntervalGen(start_field='day', end_field='day', min_value=timedelta(seconds=-128 * 86400), max_value=timedelta(seconds=127 * 86400))),
('b', DayTimeIntervalGen(start_field='hour', end_field='hour', min_value=timedelta(seconds=-128 * 3600), max_value=timedelta(seconds=127 * 3600))),
('c', DayTimeIntervalGen(start_field='minute', end_field='minute', min_value=timedelta(seconds=-128 * 60), max_value=timedelta(seconds=127 * 60))),
('d', second_dt_gen),
('c_array', ArrayGen(second_dt_gen)),
('c_struct', StructGen([("a", second_dt_gen), ("b", second_dt_gen)])),
('c_map', MapGen(second_dt_gen, second_dt_gen))
], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).select(f.col('a').cast(ByteType()), f.col('a').cast(ShortType()), f.col('a').cast(IntegerType()), f.col('a').cast(LongType()),
f.col('b').cast(ByteType()), f.col('b').cast(ShortType()), f.col('b').cast(IntegerType()), f.col('b').cast(LongType()),
f.col('c').cast(ByteType()), f.col('c').cast(ShortType()), f.col('c').cast(IntegerType()), f.col('c').cast(LongType()),
f.col('d').cast(ByteType()), f.col('d').cast(ShortType()), f.col('d').cast(IntegerType()), f.col('d').cast(LongType()),
f.col('c_array').cast(ArrayType(ByteType())),
f.col('c_struct').cast(StructType([StructField('a', ShortType()), StructField('b', ShortType())])),
f.col('c_map').cast(MapType(IntegerType(), IntegerType()))
))
integral_gens_no_overflow = [
LongGen(min_val=math.ceil(LONG_MIN / 86400 / 1000000), max_val=math.floor(LONG_MAX / 86400 / 1000000), special_cases=[0, 1, -1]),
IntegerGen(min_val=math.ceil(INT_MIN / 86400 / 1000000), max_val=math.floor(INT_MAX / 86400 / 1000000), special_cases=[0, 1, -1]),
ShortGen(),
ByteGen(),
# StructGen([("a", ShortGen()), ("b", ByteGen())])
]
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
def test_cast_integral_to_day_time_interval_no_overflow():
long_gen = IntegerGen(min_val=math.ceil(INT_MIN / 86400 / 1000000), max_val=math.floor(INT_MAX / 86400 / 1000000), special_cases=[0, 1, -1])
int_gen = LongGen(min_val=math.ceil(LONG_MIN / 86400 / 1000000), max_val=math.floor(LONG_MAX / 86400 / 1000000), special_cases=[0, 1, -1], nullable=False)
gen = StructGen([("a", long_gen),
("b", int_gen),
("c", ShortGen()),
("d", ByteGen()),
("c_struct", StructGen([("a", long_gen), ("b", int_gen)], nullable=False)),
('c_array', ArrayGen(int_gen)),
('c_map', MapGen(int_gen, long_gen))], nullable=False)
# day_time_field: 0 is day, 1 is hour, 2 is minute, 3 is second
day_type = DayTimeIntervalType(0, 0)
hour_type = DayTimeIntervalType(1, 1)
minute_type = DayTimeIntervalType(2, 2)
second_type = DayTimeIntervalType(3, 3)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).select(
f.col('a').cast(day_type), f.col('a').cast(hour_type), f.col('a').cast(minute_type), f.col('a').cast(second_type),
f.col('b').cast(day_type), f.col('b').cast(hour_type), f.col('b').cast(minute_type), f.col('b').cast(second_type),
f.col('c').cast(day_type), f.col('c').cast(hour_type), f.col('c').cast(minute_type), f.col('c').cast(second_type),
f.col('d').cast(day_type), f.col('d').cast(hour_type), f.col('d').cast(minute_type), f.col('d').cast(second_type),
f.col('c_struct').cast(StructType([StructField('a', day_type), StructField('b', hour_type)])),
f.col('c_array').cast(ArrayType(hour_type)),
f.col('c_map').cast(MapType(minute_type, second_type)),
))
cast_day_time_to_inregral_overflow_pairs = [
(INT_MIN - 1, IntegerType()),
(INT_MAX + 1, IntegerType()),
(SHORT_MIN - 1, ShortType()),
(SHORT_MAX + 1, ShortType()),
(BYTE_MIN - 1, ByteType()),
(BYTE_MAX + 1, ByteType())
]
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('large_second, integral_type', cast_day_time_to_inregral_overflow_pairs)
def test_cast_day_time_interval_to_integral_overflow(large_second, integral_type):
def getDf(spark):
return spark.createDataFrame([timedelta(seconds=large_second)], DayTimeIntervalType(DayTimeIntervalType.SECOND, DayTimeIntervalType.SECOND))
assert_gpu_and_cpu_error(
lambda spark: getDf(spark).select(f.col('value').cast(integral_type)).collect(),
conf={},
error_message="overflow")
day_time_interval_max_day = math.floor(LONG_MAX / (86400 * 1000000))
large_days_overflow_pairs = [
(-day_time_interval_max_day - 1, LongType()),
(+day_time_interval_max_day + 1, LongType()),
(-day_time_interval_max_day - 1, IntegerType()),
(+day_time_interval_max_day + 1, IntegerType())
]
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
@pytest.mark.parametrize('large_day,integral_type', large_days_overflow_pairs)
def test_cast_integral_to_day_time_interval_overflow(large_day, integral_type):
def getDf(spark):
return spark.createDataFrame([large_day], integral_type)
assert_gpu_and_cpu_error(
lambda spark: getDf(spark).select(f.col('value').cast(DayTimeIntervalType(DayTimeIntervalType.DAY, DayTimeIntervalType.DAY))).collect(),
conf={},
error_message="overflow")
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
def test_cast_integral_to_day_time_side_effect():
def getDf(spark):
# INT_MAX > 106751991 (max value of interval day)
return spark.createDataFrame([(True, INT_MAX, LONG_MAX), (False, 0, 0)], "c_b boolean, c_i int, c_l long").repartition(1)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: getDf(spark).selectExpr("if(c_b, interval 0 day, cast(c_i as interval day))", "if(c_b, interval 0 second, cast(c_l as interval second))"))
@pytest.mark.skipif(is_before_spark_330(), reason='casting between interval and integral is not supported before Pyspark 3.3.0')
def test_cast_day_time_to_integral_side_effect():
def getDf(spark):
# 106751991 > Byte.MaxValue
return spark.createDataFrame([(True, MAX_DAY_TIME_INTERVAL), (False, (timedelta(microseconds=0)))], "c_b boolean, c_dt interval day to second").repartition(1)
assert_gpu_and_cpu_are_equal_collect(lambda spark: getDf(spark).selectExpr("if(c_b, 0, cast(c_dt as byte))", "if(c_b, 0, cast(c_dt as short))", "if(c_b, 0, cast(c_dt as int))"))
def test_cast_binary_to_string():
assert_gpu_and_cpu_are_equal_collect(lambda spark: unary_op_df(spark, binary_gen).selectExpr("a", "CAST(a AS STRING) as str"))
def test_cast_int_to_string_not_UTC():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, int_gen, 100).selectExpr("a", "CAST(a AS STRING) as str"),
{"spark.sql.session.timeZone": "+08"})
not_utc_fallback_test_params = [(timestamp_gen, 'STRING'), (timestamp_gen, 'DATE'),
(date_gen, 'TIMESTAMP'),
(SetValuesGen(StringType(), ['2023-03-20 10:38:50', '2023-03-20 10:39:02']), 'TIMESTAMP')]
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('from_gen, to_type', not_utc_fallback_test_params)
def test_cast_fallback_not_UTC(from_gen, to_type):
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, from_gen, 100).selectExpr("CAST(a AS {}) as casted".format(to_type)),
"Cast",
{"spark.sql.session.timeZone": "+08",
"spark.rapids.sql.castStringToTimestamp.enabled": "true"})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/cast_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, run_with_cpu_and_gpu, assert_equal
from data_gen import *
from marks import *
from pyspark.sql.types import IntegerType
from spark_session import with_cpu_session, is_before_spark_320
from conftest import spark_jvm
# Several values to avoid generating too many folders for partitions.
part1_gen = SetValuesGen(IntegerType(), [-10, -1, 0, 1, 10])
part2_gen = SetValuesGen(LongType(), [-100, 0, 100])
file_formats = ['parquet', 'orc', 'csv',
pytest.param('json', marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/7446'))]
if os.environ.get('INCLUDE_SPARK_AVRO_JAR', 'false') == 'true':
file_formats = file_formats + ['avro']
_enable_read_confs = {
'spark.rapids.sql.format.avro.enabled': 'true',
'spark.rapids.sql.format.avro.read.enabled': 'true',
'spark.rapids.sql.format.json.enabled': 'true',
'spark.rapids.sql.format.json.read.enabled': 'true',
}
def do_prune_partition_column_when_project(spark_tmp_path, prune_part_enabled, file_format,
gpu_project_enabled=True):
data_path = spark_tmp_path + '/PARTED_DATA/'
with_cpu_session(
lambda spark: three_col_df(spark, int_gen, part1_gen, part2_gen).write \
.partitionBy('b', 'c').format(file_format).save(data_path))
all_confs = copy_and_update(_enable_read_confs, {
'spark.rapids.sql.exec.ProjectExec': gpu_project_enabled,
'spark.sql.sources.useV1SourceList': file_format,
'spark.rapids.sql.fileScanPrunePartition.enabled': prune_part_enabled})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format(file_format).schema('a int, b int, c long') \
.load(data_path).select('a', 'c'),
conf=all_confs)
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
def test_prune_partition_column_when_project(spark_tmp_path, prune_part_enabled, file_format):
do_prune_partition_column_when_project(spark_tmp_path, prune_part_enabled, file_format)
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
def test_prune_partition_column_when_fallback_project(spark_tmp_path, prune_part_enabled,
file_format):
do_prune_partition_column_when_project(spark_tmp_path, prune_part_enabled, file_format,
gpu_project_enabled=False)
def do_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, file_format,
filter_col, gpu_project_enabled=True,
gpu_filter_enabled=True):
data_path = spark_tmp_path + '/PARTED_DATA/'
with_cpu_session(
lambda spark: three_col_df(spark, int_gen, part1_gen, part2_gen).write \
.partitionBy('b', 'c').format(file_format).save(data_path))
all_confs = copy_and_update(_enable_read_confs, {
'spark.rapids.sql.exec.ProjectExec': gpu_project_enabled,
'spark.rapids.sql.exec.FilterExec': gpu_filter_enabled,
'spark.sql.sources.useV1SourceList': file_format,
'spark.rapids.sql.fileScanPrunePartition.enabled': prune_part_enabled})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.format(file_format).schema('a int, b int, c long').load(data_path) \
.filter('{} > 0'.format(filter_col)) \
.select('a', 'c'),
conf=all_confs)
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
@pytest.mark.parametrize('filter_col', ['a', 'b', 'c'])
def test_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, filter_col,
file_format):
do_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, file_format,
filter_col)
@allow_non_gpu('ProjectExec', 'FilterExec')
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
@pytest.mark.parametrize('filter_col', ['a', 'b', 'c'])
def test_prune_partition_column_when_fallback_filter_and_project(spark_tmp_path, prune_part_enabled,
filter_col, file_format):
do_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, file_format,
filter_col, gpu_project_enabled=False,
gpu_filter_enabled=False)
@allow_non_gpu('FilterExec')
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
@pytest.mark.parametrize('filter_col', ['a', 'b', 'c'])
def test_prune_partition_column_when_fallback_filter_project(spark_tmp_path, prune_part_enabled,
filter_col, file_format):
do_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, file_format,
filter_col, gpu_filter_enabled=False)
@allow_non_gpu('ProjectExec')
@pytest.mark.parametrize('prune_part_enabled', [False, True])
@pytest.mark.parametrize('file_format', file_formats)
@pytest.mark.parametrize('filter_col', ['a', 'b', 'c'])
def test_prune_partition_column_when_filter_fallback_project(spark_tmp_path, prune_part_enabled,
filter_col, file_format):
do_prune_partition_column_when_filter_project(spark_tmp_path, prune_part_enabled, file_format,
filter_col, gpu_project_enabled=False)
# This method creates two tables and saves them to partitioned Parquet/ORC files. The file is then
# read in using the read function that is passed in
def create_contacts_table_and_read(is_partitioned, format, data_path, expected_schemata, func, conf, table_name):
full_name_type = StructGen([('first', StringGen()), ('middle', StringGen()), ('last', StringGen())])
name_type = StructGen([('first', StringGen()), ('last', StringGen())])
contacts_data_gen = StructGen([
('id', IntegerGen()),
('name', full_name_type),
('address', StringGen()),
('friends', ArrayGen(full_name_type, max_length=10, nullable=False))], nullable=False)
brief_contacts_data_gen = StructGen([
('id', IntegerGen()),
('name', name_type),
('address', StringGen())], nullable=False)
# We are adding the field 'p' twice just like it is being done in Spark tests
# https://github.com/apache/spark/blob/85e252e8503534009f4fb5ea005d44c9eda31447/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala#L193
def contact_gen_df(spark, data_gen, partition):
gen = gen_df(spark, data_gen)
if is_partitioned:
return gen.withColumn('p', f.lit(partition))
else:
return gen
with_cpu_session(lambda spark: contact_gen_df(spark, contacts_data_gen, 1).write.format(format).save(data_path + f"/{table_name}/p=1"))
with_cpu_session(lambda spark: contact_gen_df(spark, brief_contacts_data_gen, 2).write.format(format).save(data_path + f"/{table_name}/p=2"))
# Schema to read in.
read_schema = contacts_data_gen.data_type.add("p", IntegerType(), True) if is_partitioned else contacts_data_gen.data_type
(from_cpu, cpu_df), (from_gpu, gpu_df) = run_with_cpu_and_gpu(
func(read_schema),
'COLLECT_WITH_DATAFRAME',
conf=conf)
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertSchemataMatch(cpu_df._jdf, gpu_df._jdf, expected_schemata)
assert_equal(from_cpu, from_gpu)
# https://github.com/NVIDIA/spark-rapids/issues/8712
# https://github.com/NVIDIA/spark-rapids/issues/8713
# https://github.com/NVIDIA/spark-rapids/issues/8714
@pytest.mark.parametrize('query,expected_schemata', [("select friends.middle, friends from {} where p=1", "struct<friends:array<struct<first:string,middle:string,last:string>>>"),
pytest.param("select name.middle, address from {} where p=2", "struct<name:struct<middle:string>,address:string>", marks=pytest.mark.skip(reason='https://github.com/NVIDIA/spark-rapids/issues/8788')),
("select name.first from {} where name.first = 'Jane'", "struct<name:struct<first:string>>")])
@pytest.mark.parametrize('is_partitioned', [True, False])
@pytest.mark.parametrize('format', ["parquet", "orc"])
def test_select_complex_field(format, spark_tmp_path, query, expected_schemata, is_partitioned, spark_tmp_table_factory):
table_name = spark_tmp_table_factory.get()
data_path = spark_tmp_path + "/DATA"
def read_temp_view(schema):
def do_it(spark):
spark.read.format(format).schema(schema).load(data_path + f"/{table_name}").createOrReplaceTempView(table_name)
return spark.sql(query.format(table_name))
return do_it
conf={"spark.sql.parquet.enableVectorizedReader": "true"}
create_contacts_table_and_read(is_partitioned, format, data_path, expected_schemata, read_temp_view, conf, table_name)
# https://github.com/NVIDIA/spark-rapids/issues/8715
@pytest.mark.parametrize('query, expected_schemata', [("friend.First", "struct<friends:array<struct<first:string>>>"),
("friend.MIDDLE", "struct<friends:array<struct<middle:string>>>")])
@pytest.mark.skipif(is_before_spark_320(), reason='https://issues.apache.org/jira/browse/SPARK-34638')
@pytest.mark.parametrize('is_partitioned', [True, False])
@pytest.mark.parametrize('format', ["parquet", "orc"])
def test_nested_column_prune_on_generator_output(format, spark_tmp_path, query, expected_schemata, is_partitioned, spark_tmp_table_factory):
table_name = spark_tmp_table_factory.get()
data_path = spark_tmp_path + "/DATA"
def read_temp_view(schema):
def do_it(spark):
spark.read.format(format).schema(schema).load(data_path + f"/{table_name}").createOrReplaceTempView(table_name)
return spark.table(table_name).select(f.explode(f.col("friends")).alias("friend")).select(query)
return do_it
conf = {"spark.sql.caseSensitive": "false",
"spark.sql.parquet.enableVectorizedReader": "true"}
create_contacts_table_and_read(is_partitioned, format, data_path, expected_schemata, read_temp_view, conf, table_name) | spark-rapids-branch-23.10 | integration_tests/src/main/python/prune_partition_column_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_equal, assert_gpu_and_cpu_writes_are_equal_collect, assert_gpu_fallback_write
from data_gen import *
from delta_lake_write_test import assert_gpu_and_cpu_delta_logs_equivalent, delta_meta_allow, delta_writes_enabled_conf
from delta_lake_merge_test import read_delta_path, read_delta_path_with_cdf, setup_dest_tables
from marks import *
from spark_session import is_before_spark_320, is_databricks_runtime, supports_delta_lake_deletion_vectors, \
with_cpu_session, with_gpu_session
delta_delete_enabled_conf = copy_and_update(delta_writes_enabled_conf,
{"spark.rapids.sql.command.DeleteCommand": "true",
"spark.rapids.sql.command.DeleteCommandEdge": "true"})
def delta_sql_delete_test(spark_tmp_path, use_cdf, dest_table_func, delete_sql,
check_func, partition_columns=None):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
setup_dest_tables(spark, data_path, dest_table_func, use_cdf, partition_columns)
def do_delete(spark, path):
return spark.sql(delete_sql.format(path=path))
with_cpu_session(setup_tables)
check_func(data_path, do_delete)
def assert_delta_sql_delete_collect(spark_tmp_path, use_cdf, dest_table_func, delete_sql,
partition_columns=None,
conf=delta_delete_enabled_conf,
skip_sql_result_check=False):
def read_data(spark, path):
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
df = read_func(spark, path)
return df.sort(df.columns)
def checker(data_path, do_delete):
cpu_path = data_path + "/CPU"
gpu_path = data_path + "/GPU"
if not skip_sql_result_check:
# compare resulting dataframe from the delete operation (some older Spark versions return empty here)
cpu_result = with_cpu_session(lambda spark: do_delete(spark, cpu_path).collect(), conf=conf)
gpu_result = with_gpu_session(lambda spark: do_delete(spark, gpu_path).collect(), conf=conf)
assert_equal(cpu_result, gpu_result)
# compare table data results, read both via CPU to make sure GPU write can be read by CPU
cpu_result = with_cpu_session(lambda spark: read_data(spark, cpu_path).collect(), conf=conf)
gpu_result = with_cpu_session(lambda spark: read_data(spark, gpu_path).collect(), conf=conf)
assert_equal(cpu_result, gpu_result)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
delta_sql_delete_test(spark_tmp_path, use_cdf, dest_table_func, delete_sql, checker,
partition_columns)
@allow_non_gpu("ExecutedCommandExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("disable_conf",
[{"spark.rapids.sql.format.delta.write.enabled": "false"},
{"spark.rapids.sql.format.parquet.enabled": "false"},
{"spark.rapids.sql.format.parquet.write.enabled": "false"},
{"spark.rapids.sql.command.DeleteCommand": "false"},
delta_writes_enabled_conf # Test disabled by default
], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_delete_disabled_fallback(spark_tmp_path, disable_conf):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
setup_dest_tables(spark, data_path,
dest_table_func=lambda spark: unary_op_df(spark, int_gen),
use_cdf=False)
def write_func(spark, path):
delete_sql="DELETE FROM delta.`{}`".format(path)
spark.sql(delete_sql)
with_cpu_session(setup_tables)
assert_gpu_fallback_write(write_func, read_delta_path, data_path,
"ExecutedCommandExec", disable_conf)
@allow_non_gpu("ExecutedCommandExec", *delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.skipif(not supports_delta_lake_deletion_vectors(), \
reason="Deletion vectors new in Delta Lake 2.4 / Apache Spark 3.4")
def test_delta_deletion_vector_fallback(spark_tmp_path, use_cdf):
data_path = spark_tmp_path + "/DELTA_DATA"
def setup_tables(spark):
setup_dest_tables(spark, data_path,
dest_table_func=lambda spark: unary_op_df(spark, int_gen),
use_cdf=use_cdf, enable_deletion_vectors=True)
def write_func(spark, path):
delete_sql="DELETE FROM delta.`{}`".format(path)
spark.sql(delete_sql)
with_cpu_session(setup_tables)
disable_conf = copy_and_update(delta_delete_enabled_conf,
{"spark.databricks.delta.delete.deletionVectors.persistent": "true"})
assert_gpu_fallback_write(write_func, read_delta_path, data_path,
"ExecutedCommandExec", disable_conf)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_delete_entire_table(spark_tmp_path, use_cdf, partition_columns):
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen)
delete_sql = "DELETE FROM delta.`{path}`"
# Databricks recently changed how the num_affected_rows is computed
# on deletes of entire files, RAPIDS Accelerator has yet to be updated.
# https://github.com/NVIDIA/spark-rapids/issues/8123
skip_sql_result = is_databricks_runtime()
assert_delta_sql_delete_collect(spark_tmp_path, use_cdf, generate_dest_data,
delete_sql, partition_columns,
skip_sql_result_check=skip_sql_result)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [["a"], ["a", "b"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_delete_partitions(spark_tmp_path, use_cdf, partition_columns):
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen)
delete_sql = "DELETE FROM delta.`{path}` WHERE a = 3"
# Databricks recently changed how the num_affected_rows is computed
# on deletes of entire files, RAPIDS Accelerator has yet to be updated.
# https://github.com/NVIDIA/spark-rapids/issues/8123
skip_sql_result = is_databricks_runtime()
assert_delta_sql_delete_collect(spark_tmp_path, use_cdf, generate_dest_data,
delete_sql, partition_columns,
skip_sql_result_check=skip_sql_result)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_delete_rows(spark_tmp_path, use_cdf, partition_columns):
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen, num_slices=num_slices_to_test)
delete_sql = "DELETE FROM delta.`{path}` WHERE b < 'd'"
assert_delta_sql_delete_collect(spark_tmp_path, use_cdf, generate_dest_data,
delete_sql, partition_columns)
@allow_non_gpu(*delta_meta_allow)
@delta_lake
@ignore_order
@pytest.mark.parametrize("use_cdf", [True, False], ids=idfn)
@pytest.mark.parametrize("partition_columns", [None, ["a"]], ids=idfn)
@pytest.mark.skipif(is_before_spark_320(), reason="Delta Lake writes are not supported before Spark 3.2.x")
def test_delta_delete_dataframe_api(spark_tmp_path, use_cdf, partition_columns):
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/DELTA_DATA"
# Databricks changes the number of files being written, so we cannot compare logs unless there's only one slice
num_slices_to_test = 1 if is_databricks_runtime() else 10
def generate_dest_data(spark):
return three_col_df(spark,
SetValuesGen(IntegerType(), range(5)),
SetValuesGen(StringType(), "abcdefg"),
string_gen, num_slices=num_slices_to_test)
with_cpu_session(lambda spark: setup_dest_tables(spark, data_path, generate_dest_data, use_cdf, partition_columns))
def do_delete(spark, path):
dest_table = DeltaTable.forPath(spark, path)
dest_table.delete("b > 'c'")
read_func = read_delta_path_with_cdf if use_cdf else read_delta_path
assert_gpu_and_cpu_writes_are_equal_collect(do_delete, read_func, data_path,
conf=delta_delete_enabled_conf)
with_cpu_session(lambda spark: assert_gpu_and_cpu_delta_logs_equivalent(spark, data_path))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_delete_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect
from data_gen import *
from marks import allow_non_gpu, ignore_order
from spark_session import is_before_spark_320
# Spark 3.1.x does not normalize -0.0 and 0.0 but GPU version does
_xxhash_gens = [
null_gen,
boolean_gen,
byte_gen,
short_gen,
int_gen,
long_gen,
date_gen,
timestamp_gen,
decimal_gen_32bit,
decimal_gen_64bit,
decimal_gen_128bit]
if not is_before_spark_320():
_xxhash_gens += [float_gen, double_gen]
_struct_of_xxhash_gens = StructGen([(f"c{i}", g) for i, g in enumerate(_xxhash_gens)])
_xxhash_fallback_gens = single_level_array_gens + nested_array_gens_sample + [
all_basic_struct_gen,
struct_array_gen_no_nans,
_struct_of_xxhash_gens]
if is_before_spark_320():
_xxhash_fallback_gens += [float_gen, double_gen]
@ignore_order(local=True)
@pytest.mark.parametrize("gen", _xxhash_gens, ids=idfn)
def test_xxhash64_single_column(gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, gen).selectExpr("a", "xxhash64(a)"))
@ignore_order(local=True)
def test_xxhash64_multi_column():
gen = StructGen(_struct_of_xxhash_gens.children, nullable=False)
col_list = ",".join(gen.data_type.fieldNames())
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen).selectExpr("c0", f"xxhash64({col_list})"))
@allow_non_gpu("ProjectExec")
@ignore_order(local=True)
@pytest.mark.parametrize("gen", _xxhash_fallback_gens, ids=idfn)
def test_xxhash64_fallback(gen):
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, gen).selectExpr("a", "xxhash64(a)"),
"ProjectExec")
| spark-rapids-branch-23.10 | integration_tests/src/main/python/hashing_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.types import *
from pyspark import SparkConf, SparkContext, SQLContext
import pyspark.sql.functions as f
import datetime
from argparse import ArgumentParser
from decimal import Decimal
from asserts import assert_gpu_and_cpu_are_equal_collect
from qa_nightly_sql import *
import pytest
from spark_session import with_cpu_session, is_jvm_charset_utf8
from marks import approximate_float, ignore_order, incompat, qarun
from data_gen import copy_and_update
def num_stringDf(spark):
print("### CREATE DATAFRAME 1 ####")
schema = StructType([StructField("strF", StringType()),
StructField("byteF", ByteType()),
StructField("shortF", ShortType()),
StructField("intF", IntegerType()),
StructField("longF", LongType()),
StructField("floatF", FloatType()),
StructField("doubleF", DoubleType()),
StructField("decimalF", DoubleType()),
StructField("booleanF", BooleanType()),
StructField("timestampF", TimestampType()),
StructField("dateF", DateType())])
dt = datetime.date(1990, 1, 1)
print(dt)
tm = datetime.datetime(2020,2,1,12,1,1)
data = [("FIRST", None, 500, 1200, 10, 10.001, 10.0003, 1.01, True, tm, dt),
("sold out", 20, 600, None, 20, 20.12, 2.000013, 2.01, True, tm, dt),
("take out", 20, 600, None, 20, 20.12, 2.000013, 2.01, True, tm, dt),
("Yuan", 20, 600, 2200, None, 20.12, 2.000013, 2.01, False, tm, dt),
("Alex", 30, 700, 3200, 30, None, 3.000013, 2.01, True, None, dt),
("Satish", 30, 700, 3200, 30, 30.12, None, 3.01, False, tm, dt),
("Gary", 40, 800, 4200, 40, 40.12, 4.000013, None, False, tm, dt),
("NVIDIA", 40, 800, 4200, -40, 40.12, 4.00013, 4.01, None, tm, dt),
("Mellanox", 40, 800, 4200, -20, -20.12, 4.00013, 4.01, False,None, dt),
(None, 30, 500, -3200, -20, 2.012, 4.000013, -4.01, False, tm, None),
("NVIDIASPARKTEAM", 0, 500, -3200, -20, 2.012, 4.000013, -4.01, False, tm, dt),
("NVIDIASPARKTEAM", 20, 0, -3200, -20, 2.012, 4.000013, -4.01, False, tm, dt),
("NVIDIASPARKTEAM", 0, 50, 0, -20, 2.012, 4.000013, -4.01, False, tm, dt),
(None, 0, 500, -3200, 0, 0.0, 0.0, -4.01, False, tm, dt),
("phuoc", 30, 500, 3200, -20, 20.12, 4.000013, 4.01, False, tm, dt)]
df = spark.createDataFrame(data,schema=schema)
df.createOrReplaceTempView("test_table")
# create dataframe for join & union operation testing
def num_stringDf_two(spark):
print("### CREATE DATAFRAME TWO ####")
schema = StructType([StructField("strF", StringType()),
StructField("byteF", ByteType()),
StructField("shortF", ShortType()),
StructField("intF", IntegerType()),
StructField("longF", LongType()),
StructField("floatF", FloatType()),
StructField("doubleF", DoubleType()),
StructField("decimalF", DoubleType()),
StructField("booleanF", BooleanType()),
StructField("timestampF", TimestampType()),
StructField("dateF", DateType())])
dt = datetime.date(2000, 1, 1)
print(dt)
tm = datetime.datetime(2022,12,1,12,1,1)
data = [("AL", 10, 500, 1200, 10, 10.001, 10.0003, 1.01, True, tm, dt),
("Jhon", 20, 600, 2200, 20, 20.12, 2.000013, 2.01, True, tm, dt),
("Alex", 30, 700, 3200, 30, 30.12, 3.000013, 3.01, True, tm, dt),
("Satish", 30, 700, 3200, 30, 30.12, 3.000013, 3.01, False, tm, dt),
("Kary", 40, 800, 4200, 40, 40.12, 4.000013, 4.01, False, tm, dt),
(None, 40, 800, 4200, -40, 40.12, 4.00013, 4.01, False, tm, dt),
(None, 40, 800, 4200, -20, -20.12, 4.00013, 4.01, False, tm, dt),
(None, 30, 500, -3200, -20, 2.012, 4.000013, -4.01, False, tm, dt),
("phuoc", 30, 500, 3200, -20, 20.12, 4.000013, 4.01, False, tm, dt)]
df = spark.createDataFrame(data, schema=schema)
df.createOrReplaceTempView("test_table1")
def num_stringDf_first_last(spark, field_name):
print("### CREATE DATAFRAME 1 ####")
schema = StructType([StructField("strF", StringType()),
StructField("byteF", ByteType()),
StructField("shortF", ShortType()),
StructField("intF", IntegerType()),
StructField("longF", LongType()),
StructField("floatF", FloatType()),
StructField("doubleF", DoubleType()),
StructField("decimalF", DoubleType()),
StructField("booleanF", BooleanType()),
StructField("timestampF", TimestampType()),
StructField("dateF", DateType())])
dt = datetime.date(1990, 1, 1)
print(dt)
tm = datetime.datetime(2020,2,1,12,1,1)
data = [("FIRST", None, 500, 1200, 10, 10.001, 10.0003, 1.01, True, tm, dt),
("sold out", 20, 600, None, 20, 20.12, 2.000013, 2.01, True, tm, dt),
("take out", 20, 600, None, 20, 20.12, 2.000013, 2.01, True, tm, dt),
("Yuan", 20, 600, 2200, None, 20.12, 2.000013, 2.01, False, tm, dt),
("Alex", 30, 700, 3200, 30, None, 3.000013, 2.01, True, None, dt),
("Satish", 30, 700, 3200, 30, 30.12, None, 3.01, False, tm, dt),
("Gary", 40, 800, 4200, 40, 40.12, 4.000013, None, False, tm, dt),
("NVIDIA", 40, 800, 4200, -40, 40.12, 4.00013, 4.01, None, tm, dt),
("Mellanox", 40, 800, 4200, -20, -20.12, 4.00013, 4.01, False,None, dt),
(None, 30, 500, -3200, -20, 2.012, 4.000013, -4.01, False, tm, None),
("NVIDIASPARKTEAM", 0, 500, -3200, -20, 2.012, 4.000013, -4.01, False, tm, dt),
("NVIDIASPARKTEAM", 20, 0, -3200, -20, 2.012, 4.000013, -4.01, False, tm, dt),
("NVIDIASPARKTEAM", 0, 50, 0, -20, 2.012, 4.000013, -4.01, False, tm, dt),
(None, 0, 500, -3200, 0, 0.0, 0.0, -4.01, False, tm, dt),
("phuoc", 30, 500, 3200, -20, 20.12, 4.000013, 4.01, False, tm, dt)]
# First/Last have a lot of odd issues with getting these tests to pass
# They are non-deterministic unless you have a single partition that is sorted
# that is why we are coalesce to a single partition and sort within the partition
# also for sort aggregations (done when variable width types like strings are in the output)
# spark will re-sort the data based off of the grouping key. Spark sort appears to
# have no guarantee about being a stable sort. In practice I have found that
# sorting the data desc with nulls last matches with what spark is doing, but
# there is no real guarantee that it will continue to work, so if the first/last
# tests fail on strings this might be the cause of it.
df = spark.createDataFrame(data,schema=schema)\
.coalesce(1)\
.sortWithinPartitions(f.col(field_name).desc_nulls_last())
df.createOrReplaceTempView("test_table")
def idfn(val):
return val[1]
_qa_conf = {
'spark.rapids.sql.variableFloatAgg.enabled': 'true',
'spark.rapids.sql.castStringToFloat.enabled': 'true',
'spark.rapids.sql.castFloatToIntegralTypes.enabled': 'true',
'spark.rapids.sql.castFloatToString.enabled': 'true',
'spark.rapids.sql.regexp.enabled': 'true'
}
_first_last_qa_conf = copy_and_update(_qa_conf, {
# some of the first/last tests need a single partition to work reliably when run on a large cluster.
'spark.sql.shuffle.partitions': '1'})
@approximate_float
@incompat
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_SQL, ids=idfn)
def test_select(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
with_cpu_session(num_stringDf)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_qa_conf)
@ignore_order
@approximate_float
@incompat
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_NEEDS_SORT_SQL, ids=idfn)
def test_needs_sort_select(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
with_cpu_session(num_stringDf)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_qa_conf)
@approximate_float
@incompat
@ignore_order(local=True)
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_JOIN_SQL, ids=idfn)
def test_select_join(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
def init_tables(spark):
num_stringDf(spark)
if ("UNION" in sql_query) or ("JOIN" in sql_query):
num_stringDf_two(spark)
with_cpu_session(init_tables)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_qa_conf)
@approximate_float
@incompat
@ignore_order(local=True)
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_PRE_ORDER_SQL, ids=idfn)
def test_select_first_last(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
with_cpu_session(lambda spark: num_stringDf_first_last(spark, sql_query_line[2]))
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_first_last_qa_conf)
@approximate_float(abs=1e-6)
@incompat
@ignore_order(local=True)
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_FLOAT_SQL, ids=idfn)
def test_select_float_order_local(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
with_cpu_session(num_stringDf)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_qa_conf)
@approximate_float(abs=1e-6)
@incompat
@ignore_order(local=True)
@qarun
@pytest.mark.parametrize('sql_query_line', SELECT_REGEXP_SQL, ids=idfn)
@pytest.mark.skipif(not is_jvm_charset_utf8(), reason="Regular expressions require UTF-8")
def test_select_regexp(sql_query_line, pytestconfig):
sql_query = sql_query_line[0]
if sql_query:
print(sql_query)
with_cpu_session(num_stringDf)
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.sql(sql_query), conf=_qa_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/qa_nightly_select_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from spark_session import is_before_spark_320, is_jvm_charset_utf8
from pyspark.sql.types import *
import pyspark.sql.functions as f
def mk_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
all_gens = all_gen + [NullGen(), binary_gen]
all_nested_gens = array_gens_sample + [ArrayGen(BinaryGen(max_length=10), max_length=10)] + struct_gens_sample + map_gens_sample
all_nested_gens_nonempty_struct = array_gens_sample + nonempty_struct_gens_sample
# Create dedicated data gens of nested type for 'if' tests here with two exclusions:
# 1) Excludes the nested 'NullGen' because it seems to be impossible to convert the
# 'NullType' to a SQL type string. But the top level NullGen is handled specially
# in 'gen_scalars_for_sql'.
# 2) Excludes the empty struct gen 'Struct()' because it leads to an error as below
# in both cpu and gpu runs.
# E: java.lang.AssertionError: assertion failed: each serializer expression should contain\
# at least one `BoundReference`
if_array_gens_sample = [ArrayGen(sub_gen) for sub_gen in all_gen] + nested_array_gens_sample
if_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(all_gen)])
if_struct_gens_sample = [if_struct_gen,
StructGen([['child0', byte_gen], ['child1', if_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
if_nested_gens = if_array_gens_sample + if_struct_gens_sample
@pytest.mark.parametrize('data_gen', all_gens + if_nested_gens, ids=idfn)
def test_if_else(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
null_lit = get_null_lit_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : three_col_df(spark, boolean_gen, data_gen, data_gen).selectExpr(
'IF(TRUE, b, c)',
'IF(TRUE, {}, {})'.format(s1, null_lit),
'IF(FALSE, {}, {})'.format(s1, null_lit),
'IF(a, b, c)',
'IF(a, {}, c)'.format(s1),
'IF(a, b, {})'.format(s2),
'IF(a, {}, {})'.format(s1, s2),
'IF(a, b, {})'.format(null_lit),
'IF(a, {}, c)'.format(null_lit)))
# Maps scalars are not really supported by Spark from python without jumping through a lot of hoops
# so for now we are going to skip them
@pytest.mark.parametrize('data_gen', map_gens_sample, ids=idfn)
def test_if_else_map(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : three_col_df(spark, boolean_gen, data_gen, data_gen).selectExpr(
'IF(TRUE, b, c)',
'IF(a, b, c)'))
@pytest.mark.order(1) # at the head of xdist worker queue if pytest-order is installed
@pytest.mark.parametrize('data_gen', all_gens + all_nested_gens, ids=idfn)
def test_case_when(data_gen):
num_cmps = 20
s1 = gen_scalar(data_gen, force_no_nulls=not isinstance(data_gen, NullGen))
# we want lots of false
bool_gen = BooleanGen().with_special_case(False, weight=1000.0)
gen_cols = [('_b' + str(x), bool_gen) for x in range(0, num_cmps)]
gen_cols = gen_cols + [('_c' + str(x), data_gen) for x in range(0, num_cmps)]
gen = StructGen(gen_cols, nullable=False)
command = f.when(f.col('_b0'), f.col('_c0'))
for x in range(1, num_cmps):
command = command.when(f.col('_b'+ str(x)), f.col('_c' + str(x)))
command = command.otherwise(s1)
data_type = data_gen.data_type
# `command` covers the case of (column, scalar) for values, so the following 3 ones
# are for
# (scalar, scalar) -> the default `otherwise` is a scalar.
# (column, column)
# (scalar, column)
# in sequence.
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen).select(command,
f.when(f.col('_b0'), s1),
f.when(f.col('_b0'), f.col('_c0')).otherwise(f.col('_c1')),
f.when(f.col('_b0'), s1).otherwise(f.col('_c0')),
f.when(f.col('_b0'), s1).when(f.lit(False), f.col('_c0')),
f.when(f.col('_b0'), s1).when(f.lit(True), f.col('_c0')),
f.when(f.col('_b0'), f.lit(None).cast(data_type)).otherwise(f.col('_c0')),
f.when(f.lit(False), f.col('_c0'))))
@pytest.mark.parametrize('data_gen', [float_gen, double_gen], ids=idfn)
def test_nanvl(data_gen):
s1 = gen_scalar(data_gen, force_no_nulls=not isinstance(data_gen, NullGen))
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.nanvl(f.col('a'), f.col('b')),
f.nanvl(f.col('a'), s1.cast(data_type)),
f.nanvl(f.lit(None).cast(data_type), f.col('b')),
f.nanvl(f.lit(float('nan')).cast(data_type), f.col('b'))))
@pytest.mark.parametrize('data_gen', all_basic_gens + decimal_gens, ids=idfn)
def test_nvl(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
null_lit = get_null_lit_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'nvl(a, b)',
'nvl(a, {})'.format(s2),
'nvl({}, b)'.format(s1),
'nvl({}, b)'.format(null_lit),
'nvl(a, {})'.format(null_lit)))
#nvl is translated into a 2 param version of coalesce
# Exclude the empty struct gen 'Struct()' because it leads to an error as below
# in both cpu and gpu runs.
# E: java.lang.AssertionError: assertion failed: each serializer expression should contain\
# at least one `BoundReference`
@pytest.mark.parametrize('data_gen', all_gens + all_nested_gens_nonempty_struct + map_gens_sample, ids=idfn)
def test_coalesce(data_gen):
num_cols = 20
s1 = gen_scalar(data_gen, force_no_nulls=not isinstance(data_gen, NullGen))
# we want lots of nulls
gen = StructGen([('_c' + str(x), data_gen.copy_special_case(None, weight=1000.0))
for x in range(0, num_cols)], nullable=False)
command_args = [f.col('_c' + str(x)) for x in range(0, num_cols)]
command_args.append(s1)
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, gen).select(
f.coalesce(*command_args)))
def test_coalesce_constant_output():
# Coalesce can allow a constant value as output. Technically Spark should mark this
# as foldable and turn it into a constant, but it does not, so make sure our code
# can deal with it. (This means something like + will get two constant scalar values)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.range(1, 100).selectExpr("4 + coalesce(5, id) as nine"))
@pytest.mark.parametrize('data_gen', all_basic_gens + decimal_gens, ids=idfn)
def test_nvl2(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
null_lit = get_null_lit_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : three_col_df(spark, data_gen, data_gen, data_gen).selectExpr(
'nvl2(a, b, c)',
'nvl2(a, b, {})'.format(s2),
'nvl2({}, b, c)'.format(s1),
'nvl2({}, b, c)'.format(null_lit),
'nvl2(a, {}, c)'.format(null_lit)))
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen, ids=idfn)
def test_nullif(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
null_lit = get_null_lit_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'nullif(a, b)',
'nullif(a, {})'.format(s2),
'nullif({}, b)'.format(s1),
'nullif({}, b)'.format(null_lit),
'nullif(a, {})'.format(null_lit)))
@pytest.mark.parametrize('data_gen', eq_gens_with_decimal_gen, ids=idfn)
def test_ifnull(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
null_lit = get_null_lit_string(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'ifnull(a, b)',
'ifnull(a, {})'.format(s2),
'ifnull({}, b)'.format(s1),
'ifnull({}, b)'.format(null_lit),
'ifnull(a, {})'.format(null_lit)))
@pytest.mark.parametrize('data_gen', [IntegerGen().with_special_case(2147483647)], ids=idfn)
def test_conditional_with_side_effects_col_col(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'IF(a < 2147483647, a + 1, a)'),
conf = ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [IntegerGen().with_special_case(2147483647)], ids=idfn)
def test_conditional_with_side_effects_col_scalar(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'IF(a < 2147483647, a + 1, 2147483647)',
'IF(a >= 2147483646, 2147483647, a + 1)'),
conf = ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [mk_str_gen('[0-9]{1,20}')], ids=idfn)
@pytest.mark.skipif(not is_jvm_charset_utf8(), reason="regular expressions require UTF-8")
def test_conditional_with_side_effects_cast(data_gen):
test_conf=copy_and_update(
ansi_enabled_conf, {'spark.rapids.sql.regexp.enabled': True})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'IF(a RLIKE "^[0-9]{1,5}\\z", CAST(a AS INT), 0)'),
conf = test_conf)
@pytest.mark.parametrize('data_gen', [mk_str_gen('[0-9]{1,9}')], ids=idfn)
@pytest.mark.skipif(not is_jvm_charset_utf8(), reason="regular expressions require UTF-8")
def test_conditional_with_side_effects_case_when(data_gen):
test_conf=copy_and_update(
ansi_enabled_conf, {'spark.rapids.sql.regexp.enabled': True})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE \
WHEN a RLIKE "^[0-9]{1,3}\\z" THEN CAST(a AS INT) \
WHEN a RLIKE "^[0-9]{4,6}\\z" THEN CAST(a AS INT) + 123 \
ELSE -1 END'),
conf = test_conf)
@pytest.mark.parametrize('data_gen', [mk_str_gen('[a-z]{0,3}')], ids=idfn)
def test_conditional_with_side_effects_sequence(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE \
WHEN length(a) > 0 THEN sequence(1, length(a), 1) \
ELSE null END'),
conf = ansi_enabled_conf)
@pytest.mark.skipif(is_before_spark_320(), reason='Earlier versions of Spark cannot cast sequence to string')
@pytest.mark.parametrize('data_gen', [mk_str_gen('[a-z]{0,3}')], ids=idfn)
def test_conditional_with_side_effects_sequence_cast(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE \
WHEN length(a) > 0 THEN CAST(sequence(1, length(a), 1) AS STRING) \
ELSE null END'),
conf = ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [ArrayGen(mk_str_gen('[a-z]{0,3}'))], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_conditional_with_side_effects_element_at(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE WHEN size(a) > 1 THEN element_at(a, 2) ELSE null END'),
conf = {'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('data_gen', [ArrayGen(mk_str_gen('[a-z]{0,3}'))], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_conditional_with_side_effects_array_index(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE WHEN size(a) > 1 THEN a[1] ELSE null END'),
conf = {'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('map_gen',
[MapGen(StringGen(pattern='key_[0-9]', nullable=False),
mk_str_gen('[a-z]{0,3}'), max_length=6)])
@pytest.mark.parametrize('data_gen', [StringGen(pattern='neverempty_[0-9]', nullable=False)])
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_conditional_with_side_effects_map_key_not_found(map_gen, data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, map_gen, data_gen).selectExpr(
'CASE WHEN length(b) = 0 THEN a["not_found"] ELSE null END'),
conf = {'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('data_gen', [ShortGen().with_special_case(SHORT_MIN)], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_conditional_with_side_effects_abs(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE WHEN a > -32768 THEN abs(a) ELSE null END'),
conf = {'spark.sql.ansi.enabled': ansi_enabled})
@pytest.mark.parametrize('data_gen', [ShortGen().with_special_case(SHORT_MIN)], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_conditional_with_side_effects_unary_minus(data_gen, ansi_enabled):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'CASE WHEN a > -32768 THEN -a ELSE null END'),
conf = {'spark.sql.ansi.enabled': ansi_enabled})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/conditionals_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql, assert_gpu_fallback_collect, assert_gpu_sql_fallback_collect
from data_gen import *
from marks import *
from pyspark.sql.types import *
from pyspark.sql.types import NumericType
from pyspark.sql.window import Window
import pyspark.sql.functions as f
from spark_session import is_before_spark_320, is_databricks113_or_later
import warnings
_grpkey_longs_with_no_nulls = [
('a', RepeatSeqGen(LongGen(nullable=False), length=20)),
('b', IntegerGen()),
('c', IntegerGen())]
_grpkey_longs_with_nulls = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', IntegerGen()),
('c', IntegerGen())]
_grpkey_longs_with_dates = [
('a', RepeatSeqGen(LongGen(), length=2048)),
('b', DateGen(nullable=False, start=date(year=2020, month=1, day=1), end=date(year=2020, month=12, day=31))),
('c', IntegerGen())]
_grpkey_longs_with_nullable_dates = [
('a', RepeatSeqGen(LongGen(nullable=False), length=20)),
('b', DateGen(nullable=(True, 5.0), start=date(year=2020, month=1, day=1), end=date(year=2020, month=12, day=31))),
('c', IntegerGen())]
_grpkey_longs_with_timestamps = [
('a', RepeatSeqGen(LongGen(), length=2048)),
('b', TimestampGen(nullable=False)),
('c', IntegerGen())]
_grpkey_longs_with_nullable_timestamps = [
('a', RepeatSeqGen(LongGen(nullable=False), length=20)),
('b', TimestampGen(nullable=(True, 5.0))),
('c', IntegerGen())]
_grpkey_longs_with_decimals = [
('a', RepeatSeqGen(LongGen(nullable=False), length=20)),
('b', DecimalGen(precision=18, scale=3, nullable=False)),
('c', DecimalGen(precision=18, scale=3))]
_grpkey_longs_with_nullable_decimals = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', DecimalGen(precision=18, scale=10, nullable=True)),
('c', DecimalGen(precision=18, scale=10, nullable=True))]
_grpkey_longs_with_nullable_larger_decimals = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', DecimalGen(precision=23, scale=10, nullable=True)),
('c', DecimalGen(precision=23, scale=10, nullable=True))]
_grpkey_longs_with_nullable_largest_decimals = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', DecimalGen(precision=38, scale=2, nullable=True)),
('c', DecimalGen(precision=38, scale=2, nullable=True))]
_grpkey_longs_with_nullable_floats = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', FloatGen(nullable=True)),
('c', IntegerGen(nullable=True))]
_grpkey_longs_with_nullable_doubles = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', DoubleGen(nullable=True)),
('c', IntegerGen(nullable=True))]
_grpkey_decimals_with_nulls = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length=20)),
('b', IntegerGen()),
('c', DecimalGen(precision=8, scale=3, nullable=True))]
_grpkey_byte_with_nulls = [
('a', RepeatSeqGen(int_gen, length=20)),
# restrict the values generated by min_val/max_val not to be overflow when calculating
('b', ByteGen(nullable=True, min_val=-98, max_val=98, special_cases=[])),
('c', IntegerGen())]
_grpkey_short_with_nulls = [
('a', RepeatSeqGen(int_gen, length=20)),
# restrict the values generated by min_val/max_val not to be overflow when calculating
('b', ShortGen(nullable=True, min_val=-32700, max_val=32700, special_cases=[])),
('c', IntegerGen())]
_grpkey_int_with_nulls = [
('a', RepeatSeqGen(int_gen, length=20)),
# restrict the values generated by min_val/max_val not to be overflow when calculating
('b', IntegerGen(nullable=True, min_val=-2147483000, max_val=2147483000, special_cases=[])),
('c', IntegerGen())]
_grpkey_long_with_nulls = [
('a', RepeatSeqGen(int_gen, length=20)),
# restrict the values generated by min_val/max_val not to be overflow when calculating
('b', LongGen(nullable=True, min_val=-9223372036854775000, max_val=9223372036854775000, special_cases=[])),
('c', IntegerGen())]
_grpkey_date_with_nulls = [
('a', RepeatSeqGen(int_gen, length=20)),
('b', DateGen(nullable=(True, 5.0), start=date(year=2020, month=1, day=1), end=date(year=2020, month=12, day=31))),
('c', IntegerGen())]
_grpkey_byte_with_nulls_with_overflow = [
('a', IntegerGen()),
('b', ByteGen(nullable=True))]
_grpkey_short_with_nulls_with_overflow = [
('a', IntegerGen()),
('b', ShortGen(nullable=True))]
_grpkey_int_with_nulls_with_overflow = [
('a', IntegerGen()),
('b', IntegerGen(nullable=True))]
_grpkey_long_with_nulls_with_overflow = [
('a', IntegerGen()),
('b', LongGen(nullable=True))]
part_and_order_gens = [long_gen, DoubleGen(no_nans=True, special_cases=[]),
string_gen, boolean_gen, timestamp_gen, DecimalGen(precision=18, scale=1),
DecimalGen(precision=38, scale=1)]
running_part_and_order_gens = [long_gen, DoubleGen(no_nans=True, special_cases=[]),
string_gen, byte_gen, timestamp_gen, DecimalGen(precision=18, scale=1),
DecimalGen(precision=38, scale=1)]
lead_lag_data_gens = [long_gen, DoubleGen(no_nans=True, special_cases=[]),
boolean_gen, timestamp_gen, string_gen, DecimalGen(precision=18, scale=3),
DecimalGen(precision=38, scale=4),
StructGen(children=[
['child_int', IntegerGen()],
['child_time', DateGen()],
['child_string', StringGen()]
])]
_float_conf = {'spark.rapids.sql.variableFloatAgg.enabled': 'true',
'spark.rapids.sql.castStringToFloat.enabled': 'true'
}
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [SetValuesGen(t, [math.nan, None]) for t in [FloatType(), DoubleType()]], ids=idfn)
def test_float_window_min_max_all_nans(data_gen):
w = Window().partitionBy('a')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, byte_gen, data_gen)
.withColumn("min_b", f.min('a').over(w))
.withColumn("max_b", f.max('a').over(w))
)
@ignore_order
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_count_window(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, byte_gen, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' count(c) over '
' (partition by a order by b asc '
' rows between 2 preceding and 10 following) as count_c_asc '
'from window_agg_table')
@ignore_order
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_count_window_no_part(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: two_col_df(spark, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' count(b) over '
' (order by a asc '
' rows between 2 preceding and 10 following) as count_b_asc '
'from window_agg_table')
@ignore_order
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_decimal_sum_window(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, byte_gen, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' sum(c) over '
' (partition by a order by b asc '
' rows between 2 preceding and 10 following) as sum_c_asc '
'from window_agg_table')
@ignore_order
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_decimal_sum_window_no_part(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: two_col_df(spark, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' sum(b) over '
' (order by a asc '
' rows between 2 preceding and 10 following) as sum_b_asc '
'from window_agg_table')
@ignore_order
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_decimal_running_sum_window(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: three_col_df(spark, byte_gen, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' sum(c) over '
' (partition by a order by b asc '
' rows between UNBOUNDED PRECEDING AND CURRENT ROW) as sum_c_asc '
'from window_agg_table',
conf = {'spark.rapids.sql.batchSizeBytes': '100'})
@ignore_order
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_decimal_running_sum_window_no_part(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: two_col_df(spark, UniqueLongGen(), data_gen),
'window_agg_table',
'select '
' sum(b) over '
' (order by a asc '
' rows between UNBOUNDED PRECEDING AND CURRENT ROW) as sum_b_asc '
'from window_agg_table',
conf = {'spark.rapids.sql.batchSizeBytes': '100'})
@pytest.mark.xfail(reason="[UNSUPPORTED] Ranges over order by byte column overflow "
"(https://github.com/NVIDIA/spark-rapids/pull/2020#issuecomment-838127070)")
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_byte_with_nulls_with_overflow], ids=idfn)
def test_window_aggs_for_ranges_numeric_byte_overflow(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(b) over '
' (partition by a order by b asc '
' range between 127 preceding and 127 following) as sum_c_asc, '
'from window_agg_table',
conf={'spark.rapids.sql.window.range.byte.enabled': True})
@pytest.mark.xfail(reason="[UNSUPPORTED] Ranges over order by short column overflow "
"(https://github.com/NVIDIA/spark-rapids/pull/2020#issuecomment-838127070)")
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_short_with_nulls_with_overflow], ids=idfn)
def test_window_aggs_for_ranges_numeric_short_overflow(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(b) over '
' (partition by a order by b asc '
' range between 32767 preceding and 32767 following) as sum_c_asc, '
'from window_agg_table',
conf={'spark.rapids.sql.window.range.short.enabled': True})
@pytest.mark.xfail(reason="[UNSUPPORTED] Ranges over order by int column overflow "
"(https://github.com/NVIDIA/spark-rapids/pull/2020#issuecomment-838127070)")
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_int_with_nulls_with_overflow], ids=idfn)
def test_window_aggs_for_ranges_numeric_int_overflow(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(b) over '
' (partition by a order by b asc '
' range between 2147483647 preceding and 2147483647 following) as sum_c_asc, '
'from window_agg_table')
@pytest.mark.xfail(reason="[UNSUPPORTED] Ranges over order by long column overflow "
"(https://github.com/NVIDIA/spark-rapids/pull/2020#issuecomment-838127070)")
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_long_with_nulls_with_overflow], ids=idfn)
def test_window_aggs_for_ranges_numeric_long_overflow(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(b) over '
' (partition by a order by b asc '
' range between 9223372036854775807 preceding and 9223372036854775807 following) as sum_c_asc, '
'from window_agg_table')
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
@pytest.mark.parametrize('data_gen', [
_grpkey_byte_with_nulls,
_grpkey_short_with_nulls,
_grpkey_int_with_nulls,
_grpkey_long_with_nulls,
_grpkey_date_with_nulls,
], ids=idfn)
def test_window_aggs_for_range_numeric_date(data_gen, batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.window.range.byte.enabled': True,
'spark.rapids.sql.window.range.short.enabled': True}
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
'window_agg_table',
'select '
' sum(c) over '
' (partition by a order by b asc '
' range between 1 preceding and 3 following) as sum_c_asc, '
' avg(c) over '
' (partition by a order by b asc '
' range between 1 preceding and 3 following) as avg_b_asc, '
' max(c) over '
' (partition by a order by b asc '
' range between 1 preceding and 3 following) as max_b_desc, '
' min(c) over '
' (partition by a order by b asc '
' range between 1 preceding and 3 following) as min_b_asc, '
' count(1) over '
' (partition by a order by b asc '
' range between CURRENT ROW and UNBOUNDED following) as count_1_asc, '
' count(c) over '
' (partition by a order by b asc '
' range between CURRENT ROW and UNBOUNDED following) as count_b_asc, '
' avg(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and CURRENT ROW) as avg_b_unbounded, '
' sum(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and CURRENT ROW) as sum_b_unbounded, '
' max(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and UNBOUNDED following) as max_b_unbounded '
'from window_agg_table ',
conf = conf)
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
@pytest.mark.parametrize('data_gen', [_grpkey_longs_with_no_nulls,
_grpkey_longs_with_nulls,
_grpkey_longs_with_dates,
_grpkey_longs_with_nullable_dates,
_grpkey_longs_with_decimals,
_grpkey_longs_with_nullable_decimals,
_grpkey_longs_with_nullable_larger_decimals,
_grpkey_decimals_with_nulls], ids=idfn)
def test_window_aggs_for_rows(data_gen, batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.castFloatToDecimal.enabled': True}
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(c) over '
' (partition by a order by b,c asc rows between 1 preceding and 1 following) as sum_c_asc, '
' max(c) over '
' (partition by a order by b desc, c desc rows between 2 preceding and 1 following) as max_c_desc, '
' min(c) over '
' (partition by a order by b,c rows between 2 preceding and current row) as min_c_asc, '
' count(1) over '
' (partition by a order by b,c rows between UNBOUNDED preceding and UNBOUNDED following) as count_1, '
' count(c) over '
' (partition by a order by b,c rows between UNBOUNDED preceding and UNBOUNDED following) as count_c, '
' avg(c) over '
' (partition by a order by b,c rows between UNBOUNDED preceding and UNBOUNDED following) as avg_c, '
' rank() over '
' (partition by a order by b,c rows between UNBOUNDED preceding and CURRENT ROW) as rank_val, '
' dense_rank() over '
' (partition by a order by b,c rows between UNBOUNDED preceding and CURRENT ROW) as dense_rank_val, '
' percent_rank() over '
' (partition by a order by b,c rows between UNBOUNDED preceding and CURRENT ROW) as percent_rank_val, '
' row_number() over '
' (partition by a order by b,c rows between UNBOUNDED preceding and CURRENT ROW) as row_num '
'from window_agg_table ',
conf = conf)
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn)
@pytest.mark.parametrize('data_gen', [
[('grp', RepeatSeqGen(int_gen, length=20)), # Grouping column.
('ord', UniqueLongGen(nullable=True)), # Order-by column (after cast to STRING).
('agg', IntegerGen())] # Aggregation column.
], ids=idfn)
def test_range_windows_with_string_order_by_column(data_gen, batch_size):
"""
Tests that RANGE window functions can be used with STRING order-by columns.
"""
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
'window_agg_table',
'SELECT '
' ROW_NUMBER() OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC ) as row_num_asc, '
' RANK() OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) DESC ) as rank_desc, '
' DENSE_RANK() OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC ) as dense_rank_asc, '
' COUNT(1) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC ) as count_1_asc_default, '
' COUNT(agg) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) DESC ) as count_desc_default, '
' SUM(agg) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC '
' RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as sum_asc_UNB_to_CURRENT, '
' MIN(agg) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) DESC '
' RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as min_desc_UNB_to_CURRENT, '
' MAX(agg) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC '
' RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) as max_asc_CURRENT_to_UNB, '
' COUNT(1) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) DESC '
' RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) as count_1_desc_CURRENT_to_UNB, '
' COUNT(1) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC '
' RANGE BETWEEN CURRENT ROW AND CURRENT ROW) as count_1_asc_CURRENT_to_CURRENT, '
' COUNT(1) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) ASC '
' RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) as count_1_asc_UNB_to_UNB, '
' COUNT(1) OVER '
' (PARTITION BY grp ORDER BY CAST(ord AS STRING) DESC '
' RANGE BETWEEN CURRENT ROW AND CURRENT ROW) as count_1_desc_CURRENT_to_CURRENT '
' FROM window_agg_table ',
conf={'spark.rapids.sql.batchSizeBytes': batch_size})
# This is for aggregations that work with a running window optimization. They don't need to be batched
# specially, but it only works if all of the aggregations can support this.
# the order returned should be consistent because the data ends up in a single task (no partitioning)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
@pytest.mark.parametrize('b_gen', all_basic_gens + [decimal_gen_32bit, decimal_gen_128bit], ids=meta_idfn('data:'))
def test_window_running_no_part(b_gen, batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.castFloatToDecimal.enabled': True}
query_parts = ['row_number() over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as row_num',
'rank() over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as rank_val',
'dense_rank() over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dense_rank_val',
'count(b) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as count_col',
'min(b) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as min_col',
'max(b) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as max_col']
if isinstance(b_gen.data_type, NumericType) and not isinstance(b_gen, FloatGen) and not isinstance(b_gen, DoubleGen):
query_parts.append('sum(b) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as sum_col')
assert_gpu_and_cpu_are_equal_sql(
lambda spark : two_col_df(spark, UniqueLongGen(), b_gen, length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# Test that we can do a running window sum on floats and doubles. This becomes problematic because we do the agg in parallel
# which means that the result can switch back and forth from Inf to not Inf depending on the order of aggregations.
# We test this by limiting the range of the values in the sum to never hit Inf, and by using abs so we don't have
# positive and negative values that interfere with each other.
# the order returned should be consistent because the data ends up in a single task (no partitioning)
@approximate_float
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_running_float_sum_no_part(batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.variableFloatAgg.enabled': True,
'spark.rapids.sql.castFloatToDecimal.enabled': True}
query_parts = ['a',
'sum(cast(b as double)) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as shrt_dbl_sum',
'sum(abs(dbl)) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dbl_sum',
'sum(cast(b as float)) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as shrt_flt_sum',
'sum(abs(flt)) over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as flt_sum']
gen = StructGen([('a', UniqueLongGen()),('b', short_gen),('flt', float_gen),('dbl', double_gen)], nullable=False)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, gen, length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# Rank aggregations are running window aggregations but they care about the ordering. In most tests we don't
# allow duplicate ordering, because that makes the results ambiguous. If two rows end up being switched even
# if the order-by column is the same then we can get different results for say a running sum. Here we are going
# to allow for duplication in the ordering, because there will be no other columns. This means that if you swtich
# rows it does not matter because the only time rows are switched is when the rows are exactly the same.
@pytest.mark.parametrize('data_gen',
all_basic_gens + [decimal_gen_32bit, orderable_decimal_gen_128bit],
ids=meta_idfn('data:'))
def test_window_running_rank_no_part(data_gen):
# Keep the batch size small. We have tested these with operators with exact inputs already, this is mostly
# testing the fixup operation.
conf = {'spark.rapids.sql.batchSizeBytes': 1000}
query_parts = ['a',
'rank() over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as rank_val',
'dense_rank() over (order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dense_rank_val']
# When generating the ordering try really hard to have duplicate values
assert_gpu_and_cpu_are_equal_sql(
lambda spark : unary_op_df(spark, RepeatSeqGen(data_gen, length=500), length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# Rank aggregations are running window aggregations but they care about the ordering. In most tests we don't
# allow duplicate ordering, because that makes the results ambiguous. If two rows end up being switched even
# if the order-by column is the same then we can get different results for say a running sum. Here we are going
# to allow for duplication in the ordering, because there will be no other columns. This means that if you swtich
# rows it does not matter because the only time rows are switched is when the rows are exactly the same.
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_basic_gens + [decimal_gen_32bit], ids=idfn)
def test_window_running_rank(data_gen):
# Keep the batch size small. We have tested these with operators with exact inputs already, this is mostly
# testing the fixup operation.
conf = {'spark.rapids.sql.batchSizeBytes': 1000}
query_parts = ['b', 'a',
'rank() over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as rank_val',
'dense_rank() over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dense_rank_val']
# When generating the ordering try really hard to have duplicate values
assert_gpu_and_cpu_are_equal_sql(
lambda spark : two_col_df(spark, RepeatSeqGen(data_gen, length=500), RepeatSeqGen(data_gen, length=100), length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# This is for aggregations that work with a running window optimization. They don't need to be batched
# specially, but it only works if all of the aggregations can support this.
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
@pytest.mark.parametrize('b_gen, c_gen', [(long_gen, x) for x in running_part_and_order_gens] +
[(x, long_gen) for x in all_basic_gens + [decimal_gen_32bit]], ids=idfn)
def test_window_running(b_gen, c_gen, batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.variableFloatAgg.enabled': True,
'spark.rapids.sql.castFloatToDecimal.enabled': True}
query_parts = ['b', 'a', 'row_number() over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as row_num',
'rank() over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as rank_val',
'dense_rank() over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dense_rank_val',
'count(c) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as count_col',
'min(c) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as min_col',
'max(c) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as max_col']
# Decimal precision can grow too large. Float and Double can get odd results for Inf/-Inf because of ordering
if isinstance(c_gen.data_type, NumericType) and (not isinstance(c_gen, FloatGen)) and (not isinstance(c_gen, DoubleGen)) and (not isinstance(c_gen, DecimalGen)):
query_parts.append('sum(c) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as sum_col')
assert_gpu_and_cpu_are_equal_sql(
lambda spark : three_col_df(spark, UniqueLongGen(), RepeatSeqGen(b_gen, length=100), c_gen, length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# Test that we can do a running window sum on floats and doubles and decimal. This becomes problematic because we do the agg in parallel
# which means that the result can switch back and forth from Inf to not Inf depending on the order of aggregations.
# We test this by limiting the range of the values in the sum to never hit Inf, and by using abs so we don't have
# positive and negative values that interfere with each other.
# decimal is problematic if the precision is so high it falls back to the CPU.
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
def test_window_running_float_decimal_sum(batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size,
'spark.rapids.sql.variableFloatAgg.enabled': True,
'spark.rapids.sql.castFloatToDecimal.enabled': True}
query_parts = ['b', 'a',
'sum(cast(c as double)) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dbl_sum',
'sum(abs(dbl)) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dbl_sum',
'sum(cast(c as float)) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as flt_sum',
'sum(abs(flt)) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as flt_sum',
'sum(cast(c as Decimal(6,1))) over (partition by b order by a rows between UNBOUNDED PRECEDING AND CURRENT ROW) as dec_sum']
gen = StructGen([('a', UniqueLongGen()),('b', RepeatSeqGen(int_gen, length=1000)),('c', short_gen),('flt', float_gen),('dbl', double_gen)], nullable=False)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, gen, length=1024 * 14),
"window_agg_table",
'select ' +
', '.join(query_parts) +
' from window_agg_table ',
validate_execs_in_gpu_plan = ['GpuRunningWindowExec'],
conf = conf)
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@approximate_float
@pytest.mark.parametrize('batch_size', ['1000', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches
@pytest.mark.parametrize('c_gen', lead_lag_data_gens, ids=idfn)
@pytest.mark.parametrize('a_b_gen', part_and_order_gens, ids=meta_idfn('partAndOrderBy:'))
def test_multi_types_window_aggs_for_rows_lead_lag(a_b_gen, c_gen, batch_size):
conf = {'spark.rapids.sql.batchSizeBytes': batch_size}
data_gen = [
('a', RepeatSeqGen(a_b_gen, length=20)),
('b', a_b_gen),
('c', c_gen)]
# By default for many operations a range of unbounded to unbounded is used
# This will not work until https://github.com/NVIDIA/spark-rapids/issues/216
# is fixed.
# Ordering needs to include c because with nulls and especially on booleans
# it is possible to get a different ordering when it is ambiguous.
base_window_spec = Window.partitionBy('a').orderBy('b', 'c')
inclusive_window_spec = base_window_spec.rowsBetween(-10, 100)
def do_it(spark):
df = gen_df(spark, data_gen, length=2048) \
.withColumn('inc_count_1', f.count('*').over(inclusive_window_spec)) \
.withColumn('inc_count_c', f.count('c').over(inclusive_window_spec)) \
.withColumn('lead_5_c', f.lead('c', 5).over(base_window_spec)) \
.withColumn('lag_1_c', f.lag('c', 1).over(base_window_spec)) \
.withColumn('row_num', f.row_number().over(base_window_spec))
if isinstance(c_gen, StructGen):
"""
The MIN()/MAX() aggregations amount to a RANGE query. These are not
currently supported on STRUCT columns.
Also, LEAD()/LAG() defaults cannot currently be specified for STRUCT
columns. `[ 10, 3.14159, "foobar" ]` isn't recognized as a valid STRUCT scalar.
"""
return df.withColumn('lead_def_c', f.lead('c', 2, None).over(base_window_spec)) \
.withColumn('lag_def_c', f.lag('c', 4, None).over(base_window_spec))
else:
default_val = gen_scalar_value(c_gen, force_no_nulls=False)
return df.withColumn('inc_max_c', f.max('c').over(inclusive_window_spec)) \
.withColumn('inc_min_c', f.min('c').over(inclusive_window_spec)) \
.withColumn('lead_def_c', f.lead('c', 2, default_val).over(base_window_spec)) \
.withColumn('lag_def_c', f.lag('c', 4, default_val).over(base_window_spec))
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
struct_with_arrays = StructGen(children=[
['child_int', int_gen],
['child_time', date_gen],
['child_string', string_gen],
['child_array', ArrayGen(int_gen, max_length=10)]])
lead_lag_struct_with_arrays_gen = [struct_with_arrays,
ArrayGen(struct_with_arrays, max_length=10),
StructGen(children=[['child_struct', struct_with_arrays]])]
@ignore_order(local=True)
@approximate_float
@pytest.mark.parametrize('struct_gen', lead_lag_struct_with_arrays_gen, ids=idfn)
@pytest.mark.parametrize('a_b_gen', part_and_order_gens, ids=meta_idfn('partAndOrderBy:'))
def test_lead_lag_for_structs_with_arrays(a_b_gen, struct_gen):
data_gen = [
('a', RepeatSeqGen(a_b_gen, length=20)),
('b', IntegerGen(nullable=False, special_cases=[])),
('c', struct_gen)]
# By default for many operations a range of unbounded to unbounded is used
# This will not work until https://github.com/NVIDIA/spark-rapids/issues/216
# is fixed.
# Ordering needs to include c because with nulls and especially on booleans
# it is possible to get a different ordering when it is ambiguous.
base_window_spec = Window.partitionBy('a').orderBy('b')
def do_it(spark):
return gen_df(spark, data_gen, length=2048) \
.withColumn('lead_5_c', f.lead('c', 5).over(base_window_spec)) \
.withColumn('lag_1_c', f.lag('c', 1).over(base_window_spec))
assert_gpu_and_cpu_are_equal_collect(do_it)
lead_lag_array_data_gens =\
[ArrayGen(sub_gen, max_length=10) for sub_gen in lead_lag_data_gens] + \
[ArrayGen(ArrayGen(sub_gen, max_length=10), max_length=10) for sub_gen in lead_lag_data_gens] + \
[ArrayGen(ArrayGen(ArrayGen(sub_gen, max_length=10), max_length=10), max_length=10) \
for sub_gen in lead_lag_data_gens]
@ignore_order(local=True)
@pytest.mark.parametrize('d_gen', lead_lag_array_data_gens, ids=meta_idfn('agg:'))
@pytest.mark.parametrize('c_gen', [UniqueLongGen()], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('b_gen', [long_gen], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('a_gen', [long_gen], ids=meta_idfn('partBy:'))
def test_window_aggs_for_rows_lead_lag_on_arrays(a_gen, b_gen, c_gen, d_gen):
data_gen = [
('a', RepeatSeqGen(a_gen, length=20)),
('b', b_gen),
('c', c_gen),
('d', d_gen),
('d_default', d_gen)]
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'''
SELECT
LEAD(d, 5) OVER (PARTITION by a ORDER BY b,c) lead_d_5,
LEAD(d, 2, d_default) OVER (PARTITION by a ORDER BY b,c) lead_d_2_default,
LAG(d, 5) OVER (PARTITION by a ORDER BY b,c) lag_d_5,
LAG(d, 2, d_default) OVER (PARTITION by a ORDER BY b,c) lag_d_2_default
FROM window_agg_table
''')
# lead and lag don't currently work for string columns, so redo the tests, but just for strings
# without lead and lag
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@approximate_float
@pytest.mark.parametrize('c_gen', [string_gen], ids=idfn)
@pytest.mark.parametrize('a_b_gen', part_and_order_gens, ids=meta_idfn('partAndOrderBy:'))
def test_multi_types_window_aggs_for_rows(a_b_gen, c_gen):
data_gen = [
('a', RepeatSeqGen(a_b_gen, length=20)),
('b', a_b_gen),
('c', c_gen)]
# By default for many operations a range of unbounded to unbounded is used
# This will not work until https://github.com/NVIDIA/spark-rapids/issues/216
# is fixed.
# Ordering needs to include c because with nulls and especially on booleans
# it is possible to get a different ordering when it is ambiguous
baseWindowSpec = Window.partitionBy('a').orderBy('b', 'c')
inclusiveWindowSpec = baseWindowSpec.rowsBetween(-10, 100)
def do_it(spark):
return gen_df(spark, data_gen, length=2048) \
.withColumn('inc_count_1', f.count('*').over(inclusiveWindowSpec)) \
.withColumn('inc_count_c', f.count('c').over(inclusiveWindowSpec)) \
.withColumn('inc_max_c', f.max('c').over(inclusiveWindowSpec)) \
.withColumn('inc_min_c', f.min('c').over(inclusiveWindowSpec)) \
.withColumn('rank_val', f.rank().over(baseWindowSpec)) \
.withColumn('dense_rank_val', f.dense_rank().over(baseWindowSpec)) \
.withColumn('percent_rank_val', f.percent_rank().over(baseWindowSpec)) \
.withColumn('row_num', f.row_number().over(baseWindowSpec))
assert_gpu_and_cpu_are_equal_collect(do_it)
def test_percent_rank_no_part_multiple_batches():
data_gen = [('a', long_gen)]
# The goal of this is to have multiple batches so we can verify that the code
# is working properly, but not so large that it takes forever to run.
baseWindowSpec = Window.orderBy('a')
def do_it(spark):
return gen_df(spark, data_gen, length=8000) \
.withColumn('percent_rank_val', f.percent_rank().over(baseWindowSpec))
assert_gpu_and_cpu_are_equal_collect(do_it, conf = {'spark.rapids.sql.batchSizeBytes': '100'})
def test_percent_rank_single_part_multiple_batches():
data_gen = [('a', long_gen)]
# The goal of this is to have multiple batches so we can verify that the code
# is working properly, but not so large that it takes forever to run.
baseWindowSpec = Window.partitionBy('b').orderBy('a')
def do_it(spark):
return gen_df(spark, data_gen, length=8000) \
.withColumn('b', f.lit(1)) \
.withColumn('percent_rank_val', f.percent_rank().over(baseWindowSpec))
assert_gpu_and_cpu_are_equal_collect(do_it, conf = {'spark.rapids.sql.batchSizeBytes': '100'})
@pytest.mark.skipif(is_before_spark_320(), reason="Only in Spark 3.2.0 is IGNORE NULLS supported for lead and lag by Spark")
@allow_non_gpu('WindowExec', 'Alias', 'WindowExpression', 'Lead', 'Literal', 'WindowSpecDefinition', 'SpecifiedWindowFrame')
@ignore_order(local=True)
@pytest.mark.parametrize('d_gen', all_basic_gens, ids=meta_idfn('agg:'))
@pytest.mark.parametrize('c_gen', [UniqueLongGen()], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('b_gen', [long_gen], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('a_gen', [long_gen], ids=meta_idfn('partBy:'))
def test_window_aggs_lead_ignore_nulls_fallback(a_gen, b_gen, c_gen, d_gen):
data_gen = [
('a', RepeatSeqGen(a_gen, length=20)),
('b', b_gen),
('c', c_gen),
('d', d_gen)]
assert_gpu_sql_fallback_collect(
lambda spark: gen_df(spark, data_gen),
'Lead',
"window_agg_table",
'''
SELECT
LEAD(d, 5) IGNORE NULLS OVER (PARTITION by a ORDER BY b,c) lead_d_5
FROM window_agg_table
''')
@pytest.mark.skipif(is_before_spark_320(), reason="Only in Spark 3.2.0 is IGNORE NULLS supported for lead and lag by Spark")
@allow_non_gpu('WindowExec', 'Alias', 'WindowExpression', 'Lag', 'Literal', 'WindowSpecDefinition', 'SpecifiedWindowFrame')
@ignore_order(local=True)
@pytest.mark.parametrize('d_gen', all_basic_gens, ids=meta_idfn('agg:'))
@pytest.mark.parametrize('c_gen', [UniqueLongGen()], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('b_gen', [long_gen], ids=meta_idfn('orderBy:'))
@pytest.mark.parametrize('a_gen', [long_gen], ids=meta_idfn('partBy:'))
def test_window_aggs_lag_ignore_nulls_fallback(a_gen, b_gen, c_gen, d_gen):
data_gen = [
('a', RepeatSeqGen(a_gen, length=20)),
('b', b_gen),
('c', c_gen),
('d', d_gen)]
assert_gpu_sql_fallback_collect(
lambda spark: gen_df(spark, data_gen),
'Lag',
"window_agg_table",
'''
SELECT
LAG(d, 5) IGNORE NULLS OVER (PARTITION by a ORDER BY b,c) lag_d_5
FROM window_agg_table
''')
# Test for RANGE queries, with timestamp order-by expressions.
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [_grpkey_longs_with_timestamps,
pytest.param(_grpkey_longs_with_nullable_timestamps)],
ids=idfn)
def test_window_aggs_for_ranges_timestamps(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'select '
' sum(c) over '
' (partition by a order by b asc '
' range between interval 1 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND preceding '
' and interval 1 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND following) as sum_c_asc, '
' avg(c) over '
' (partition by a order by b asc '
' range between interval 1 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND preceding '
' and interval 1 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND following) as avg_c_asc, '
' max(c) over '
' (partition by a order by b desc '
' range between interval 2 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND preceding '
' and interval 1 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND following) as max_c_desc, '
' min(c) over '
' (partition by a order by b asc '
' range between interval 2 DAY 5 HOUR 3 MINUTE 2 SECOND 1 MILLISECOND 5 MICROSECOND preceding '
' and current row) as min_c_asc, '
' count(1) over '
' (partition by a order by b asc '
' range between CURRENT ROW and UNBOUNDED following) as count_1_asc, '
' count(c) over '
' (partition by a order by b asc '
' range between CURRENT ROW and UNBOUNDED following) as count_c_asc, '
' avg(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and CURRENT ROW) as avg_c_unbounded, '
' sum(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and CURRENT ROW) as sum_c_unbounded, '
' max(c) over '
' (partition by a order by b asc '
' range between UNBOUNDED preceding and UNBOUNDED following) as max_c_unbounded '
'from window_agg_table',
conf = {'spark.rapids.sql.castFloatToDecimal.enabled': True})
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [
_grpkey_longs_with_nullable_decimals,
_grpkey_longs_with_nullable_larger_decimals,
pytest.param(_grpkey_longs_with_nullable_largest_decimals,
marks=pytest.mark.xfail(
condition=is_databricks113_or_later(),
reason='https://github.com/NVIDIA/spark-rapids/issues/7429')),
_grpkey_longs_with_nullable_floats,
_grpkey_longs_with_nullable_doubles
], ids=idfn)
def test_window_aggregations_for_decimal_and_float_ranges(data_gen):
"""
Tests for range window aggregations, with DECIMAL/FLOATING POINT order by columns.
The table schema used:
a: Group By column
b: Order By column (decimals, floats, doubles)
c: Aggregation column (decimals or ints)
Since this test is for the order-by column type, and not for each specific windowing aggregation,
we use COUNT(1) throughout the test, for different window widths and ordering.
Some other aggregation functions are thrown in for variety.
"""
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'SELECT '
' COUNT(1) OVER (PARTITION BY a ORDER BY b ASC RANGE BETWEEN 10.2345 PRECEDING AND 6.7890 FOLLOWING), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b ASC), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b ASC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b ASC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b DESC RANGE BETWEEN 10.2345 PRECEDING AND 6.7890 FOLLOWING), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b DESC), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b DESC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' COUNT(1) OVER (PARTITION BY a ORDER BY b DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),'
' COUNT(c) OVER (PARTITION BY a ORDER BY b RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' SUM(c) OVER (PARTITION BY a ORDER BY b RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' MIN(c) OVER (PARTITION BY a ORDER BY b RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' MAX(c) OVER (PARTITION BY a ORDER BY b RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), '
' RANK() OVER (PARTITION BY a ORDER BY b) '
'FROM window_agg_table',
conf={})
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [
pytest.param(_grpkey_longs_with_nullable_largest_decimals,
marks=pytest.mark.xfail(
condition=is_databricks113_or_later(),
reason='https://github.com/NVIDIA/spark-rapids/issues/7429'))
], ids=idfn)
def test_window_aggregations_for_big_decimal_ranges(data_gen):
"""
Tests for range window aggregations, with DECIMAL order by columns.
The table schema used:
a: Group By column
b: Order By column (decimal)
c: Aggregation column (incidentally, also decimal)
Since this test is for the order-by column type, and not for each specific windowing aggregation,
we use COUNT(1) throughout the test, for different window widths and ordering.
Some other aggregation functions are thrown in for variety.
"""
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"window_agg_table",
'SELECT '
' COUNT(1) OVER (PARTITION BY a ORDER BY b ASC '
' RANGE BETWEEN 12345678901234567890123456789012345.12 PRECEDING '
' AND 11111111112222222222333333333344444.12 FOLLOWING) '
'FROM window_agg_table',
conf={})
_gen_data_for_collect_list = [
('a', RepeatSeqGen(LongGen(), length=20)),
('b', UniqueLongGen()),
('c_bool', BooleanGen()),
('c_short', ShortGen()),
('c_int', IntegerGen()),
('c_long', LongGen()),
('c_date', DateGen()),
('c_ts', TimestampGen()),
('c_byte', ByteGen()),
('c_string', StringGen()),
('c_float', FloatGen()),
('c_double', DoubleGen()),
('c_decimal_32', DecimalGen(precision=8, scale=3)),
('c_decimal_64', decimal_gen_64bit),
('c_decimal_128', decimal_gen_128bit),
('c_struct', StructGen(children=[
['child_int', IntegerGen()],
['child_time', DateGen()],
['child_string', StringGen()],
['child_decimal_32', DecimalGen(precision=8, scale=3)],
['child_decimal_64', decimal_gen_64bit],
['child_decimal_128', decimal_gen_128bit]])),
('c_array', ArrayGen(int_gen)),
('c_map', simple_string_to_string_map_gen)]
# SortExec does not support array type, so sort the result locally.
@ignore_order(local=True)
def test_window_aggs_for_rows_collect_list():
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, _gen_data_for_collect_list),
"window_collect_table",
'''
select
collect_list(c_bool) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_bool,
collect_list(c_short) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_short,
collect_list(c_int) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_int,
collect_list(c_long) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_long,
collect_list(c_date) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_date,
collect_list(c_ts) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_ts,
collect_list(c_byte) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_byte,
collect_list(c_string) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_string,
collect_list(c_float) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_float,
collect_list(c_double) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_double,
collect_list(c_decimal_32) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_decimal_32,
collect_list(c_decimal_64) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_decimal_64,
collect_list(c_decimal_128) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_decimal_128,
collect_list(c_struct) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_struct,
collect_list(c_array) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_array,
collect_list(c_map) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as collect_map
from window_collect_table
''')
# SortExec does not support array type, so sort the result locally.
@ignore_order(local=True)
# This test is more directed at Databricks and their running window optimization instead of ours
# this is why we do not validate that we inserted in a GpuRunningWindowExec, yet.
def test_running_window_function_exec_for_all_aggs():
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, _gen_data_for_collect_list),
"window_collect_table",
'''
select
sum(c_int) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as sum_int,
min(c_long) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as min_long,
max(c_date) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as max_date,
count(1) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as count_1,
count(*) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as count_star,
row_number() over
(partition by a order by b,c_int) as row_num,
rank() over
(partition by a order by b,c_int) as rank_val,
dense_rank() over
(partition by a order by b,c_int) as dense_rank_val,
collect_list(c_float) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as collect_float,
collect_list(c_decimal_32) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as collect_decimal_32,
collect_list(c_decimal_64) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as collect_decimal_64,
collect_list(c_decimal_128) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as collect_decimal_128,
collect_list(c_struct) over
(partition by a order by b,c_int rows between UNBOUNDED PRECEDING AND CURRENT ROW) as collect_struct
from window_collect_table
''')
# Test the Databricks WindowExec which combines a WindowExec with a ProjectExec and provides the output
# fields that we need to handle with an extra GpuProjectExec and we need the input expressions to compute
# a window function of another window function case
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', integral_gens, ids=idfn)
def test_join_sum_window_of_window(data_gen):
def do_it(spark):
agg_table = gen_df(spark, StructGen([('a_1', UniqueLongGen()), ('c', data_gen)], nullable=False))
part_table = gen_df(spark, StructGen([('a_2', UniqueLongGen()), ('b', byte_gen)], nullable=False))
agg_table.createOrReplaceTempView("agg")
part_table.createOrReplaceTempView("part")
# Note that if we include `c` in the select clause here (the output projection), the bug described
# in https://github.com/NVIDIA/spark-rapids/issues/6531 does not manifest
return spark.sql("""
select
b,
sum(c) as sum_c,
sum(c)/sum(sum(c)) over (partition by b) as ratio_sum,
(b + c)/sum(sum(c)) over (partition by b) as ratio_bc
from agg, part
where a_1 = a_2
group by b, c
order by b, ratio_sum, ratio_bc""")
assert_gpu_and_cpu_are_equal_collect(do_it)
# Generates some repeated values to test the deduplication of GpuCollectSet.
# And GpuCollectSet does not yet support struct type.
_gen_data_for_collect_set = [
('a', RepeatSeqGen(LongGen(), length=20)),
('b', UniqueLongGen()),
('c_bool', RepeatSeqGen(BooleanGen(), length=15)),
('c_int', RepeatSeqGen(IntegerGen(), length=15)),
('c_long', RepeatSeqGen(LongGen(), length=15)),
('c_short', RepeatSeqGen(ShortGen(), length=15)),
('c_date', RepeatSeqGen(DateGen(), length=15)),
('c_timestamp', RepeatSeqGen(TimestampGen(), length=15)),
('c_byte', RepeatSeqGen(ByteGen(), length=15)),
('c_string', RepeatSeqGen(StringGen(), length=15)),
('c_float', RepeatSeqGen(FloatGen(), length=15)),
('c_double', RepeatSeqGen(DoubleGen(), length=15)),
('c_decimal_32', RepeatSeqGen(DecimalGen(precision=8, scale=3), length=15)),
('c_decimal_64', RepeatSeqGen(decimal_gen_64bit, length=15)),
('c_decimal_128', RepeatSeqGen(decimal_gen_128bit, length=15)),
# case to verify the NAN_UNEQUAL strategy
('c_fp_nan', RepeatSeqGen(FloatGen().with_special_case(math.nan, 200.0), length=5)),
]
_gen_data_for_collect_set_nested = [
('a', RepeatSeqGen(LongGen(), length=20)),
('b', UniqueLongGen()),
('c_int', RepeatSeqGen(IntegerGen(), length=15)),
('c_struct_array_1', RepeatSeqGen(struct_array_gen, length=15)),
('c_struct_array_2', RepeatSeqGen(StructGen([
['c0', struct_array_gen], ['c1', int_gen]]), length=14)),
('c_array_struct', RepeatSeqGen(ArrayGen(all_basic_struct_gen), length=15)),
('c_array_array_bool', RepeatSeqGen(ArrayGen(ArrayGen(BooleanGen())), length=15)),
('c_array_array_int', RepeatSeqGen(ArrayGen(ArrayGen(IntegerGen())), length=15)),
('c_array_array_long', RepeatSeqGen(ArrayGen(ArrayGen(LongGen())), length=15)),
('c_array_array_short', RepeatSeqGen(ArrayGen(ArrayGen(ShortGen())), length=15)),
('c_array_array_date', RepeatSeqGen(ArrayGen(ArrayGen(DateGen())), length=15)),
('c_array_array_timestamp', RepeatSeqGen(ArrayGen(ArrayGen(TimestampGen())), length=15)),
('c_array_array_byte', RepeatSeqGen(ArrayGen(ArrayGen(ByteGen())), length=15)),
('c_array_array_string', RepeatSeqGen(ArrayGen(ArrayGen(StringGen())), length=15)),
('c_array_array_float', RepeatSeqGen(ArrayGen(ArrayGen(FloatGen())), length=15)),
('c_array_array_double', RepeatSeqGen(ArrayGen(ArrayGen(DoubleGen())), length=15)),
('c_array_array_decimal_32', RepeatSeqGen(ArrayGen(ArrayGen(DecimalGen(precision=8, scale=3))), length=15)),
('c_array_array_decimal_64', RepeatSeqGen(ArrayGen(ArrayGen(decimal_gen_64bit)), length=15)),
('c_array_array_decimal_128', RepeatSeqGen(ArrayGen(ArrayGen(decimal_gen_128bit)), length=15)),
]
# SortExec does not support array type, so sort the result locally.
@ignore_order(local=True)
def test_window_aggs_for_rows_collect_set():
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, _gen_data_for_collect_set),
"window_collect_table",
'''
select a, b,
sort_array(cc_bool),
sort_array(cc_int),
sort_array(cc_long),
sort_array(cc_short),
sort_array(cc_date),
sort_array(cc_ts),
sort_array(cc_byte),
sort_array(cc_str),
sort_array(cc_float),
sort_array(cc_double),
sort_array(cc_decimal_32),
sort_array(cc_decimal_64),
sort_array(cc_decimal_128),
sort_array(cc_fp_nan)
from (
select a, b,
collect_set(c_bool) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_bool,
collect_set(c_int) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_int,
collect_set(c_long) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_long,
collect_set(c_short) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_short,
collect_set(c_date) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_date,
collect_set(c_timestamp) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_ts,
collect_set(c_byte) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_byte,
collect_set(c_string) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_str,
collect_set(c_float) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_float,
collect_set(c_double) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_double,
collect_set(c_decimal_32) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_decimal_32,
collect_set(c_decimal_64) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_decimal_64,
collect_set(c_decimal_128) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_decimal_128,
collect_set(c_fp_nan) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_fp_nan
from window_collect_table
) t
''')
# Note, using sort_array() on the CPU, because sort_array() does not yet
# support sorting certain nested/arbitrary types on the GPU
# See https://github.com/NVIDIA/spark-rapids/issues/3715
# and https://github.com/rapidsai/cudf/issues/11222
@ignore_order(local=True)
@allow_non_gpu("ProjectExec", "SortArray")
def test_window_aggs_for_rows_collect_set_nested_array():
conf = copy_and_update(_float_conf, {
"spark.rapids.sql.castFloatToString.enabled": "true",
"spark.rapids.sql.expression.SortArray": "false"
})
def do_it(spark):
df = gen_df(spark, _gen_data_for_collect_set_nested, length=512)
df.createOrReplaceTempView("window_collect_table")
df = spark.sql(
"""select a, b,
collect_set(c_struct_array_1) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_struct_array_1,
collect_set(c_struct_array_2) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_struct_array_2,
collect_set(c_array_struct) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_struct,
collect_set(c_array_array_bool) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_bool,
collect_set(c_array_array_int) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_int,
collect_set(c_array_array_long) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_long,
collect_set(c_array_array_short) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_short,
collect_set(c_array_array_date) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_date,
collect_set(c_array_array_timestamp) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_ts,
collect_set(c_array_array_byte) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_byte,
collect_set(c_array_array_string) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_str,
collect_set(c_array_array_float) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_float,
collect_set(c_array_array_double) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_double,
collect_set(c_array_array_decimal_32) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_decimal_32,
collect_set(c_array_array_decimal_64) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_decimal_64,
collect_set(c_array_array_decimal_128) over
(partition by a order by b,c_int rows between CURRENT ROW and UNBOUNDED FOLLOWING) as cc_array_array_decimal_128
from window_collect_table
""")
df = spark.createDataFrame(df.rdd, schema=df.schema)
# pull out the rdd and schema and create a new dataframe to run SortArray
# to handle Databricks 10.4+ optimization that moves SortArray from ProjectExec
# to ObjectHashAggregateExec
df.createOrReplaceTempView("window_collect_table_2")
return spark.sql("""select a, b,
sort_array(cc_struct_array_1),
sort_array(cc_struct_array_2),
sort_array(cc_array_struct),
sort_array(cc_array_array_bool),
sort_array(cc_array_array_int),
sort_array(cc_array_array_long),
sort_array(cc_array_array_short),
sort_array(cc_array_array_date),
sort_array(cc_array_array_ts),
sort_array(cc_array_array_byte),
sort_array(cc_array_array_str),
sort_array(cc_array_array_float),
sort_array(cc_array_array_double),
sort_array(cc_array_array_decimal_32),
sort_array(cc_array_array_decimal_64),
sort_array(cc_array_array_decimal_128)
from window_collect_table_2
""")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
# In a distributed setup the order of the partitions returned might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
# Arrays and struct of struct (more than single level nesting) are not supported
@pytest.mark.parametrize('part_gen', [ArrayGen(long_gen), StructGen([["a", StructGen([["a1", long_gen]])]])], ids=meta_idfn('partBy:'))
# For arrays the sort and hash partition are also not supported
@allow_non_gpu('WindowExec', 'Alias', 'WindowExpression', 'AggregateExpression', 'Count', 'WindowSpecDefinition', 'SpecifiedWindowFrame', 'Literal', 'SortExec', 'SortOrder', 'ShuffleExchangeExec', 'HashPartitioning')
def test_nested_part_fallback(part_gen):
data_gen = [
('a', RepeatSeqGen(part_gen, length=20)),
('b', UniqueLongGen()),
('c', int_gen)]
window_spec = Window.partitionBy('a').orderBy('b').rowsBetween(-5, 5)
def do_it(spark):
return gen_df(spark, data_gen, length=2048) \
.withColumn('rn', f.count('c').over(window_spec))
assert_gpu_fallback_collect(do_it, 'WindowExec')
@ignore_order(local=True)
# single-level structs (no nested structs) are now supported by the plugin
@pytest.mark.parametrize('part_gen', [StructGen([["a", long_gen]])], ids=meta_idfn('partBy:'))
def test_nested_part_struct(part_gen):
data_gen = [
('a', RepeatSeqGen(part_gen, length=20)),
('b', UniqueLongGen()),
('c', int_gen)]
window_spec = Window.partitionBy('a').orderBy('b').rowsBetween(-5, 5)
def do_it(spark):
return gen_df(spark, data_gen, length=2048) \
.withColumn('rn', f.count('c').over(window_spec))
assert_gpu_and_cpu_are_equal_collect(do_it)
# In a distributed setup the order of the partitions returend might be different, so we must ignore the order
# but small batch sizes can make sort very slow, so do the final order by locally
@ignore_order(local=True)
@pytest.mark.parametrize('ride_along', all_basic_gens + decimal_gens + array_gens_sample + struct_gens_sample + map_gens_sample, ids=idfn)
def test_window_ride_along(ride_along):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, [('a', UniqueLongGen()), ('b', ride_along)]),
"window_agg_table",
'select *,'
' row_number() over (order by a) as row_num '
'from window_agg_table ')
@approximate_float
@ignore_order
@pytest.mark.parametrize('preceding', [Window.unboundedPreceding, -4], ids=idfn)
@pytest.mark.parametrize('following', [Window.unboundedFollowing, 3], ids=idfn)
def test_window_range_stddev(preceding, following):
window_spec_agg = Window.partitionBy("_1").orderBy("_2").rangeBetween(preceding, following)
def do_it(spark):
# rangBetween uses the actual value of the column on which we are doing the aggregation
# which is why we are generating values between LONG_MIN_VALUE - min(preceding) and LONG_MAX_VALUE - max(following)
# otherwise it will cause an overflow
gen = LongGen(min_val=-(1 << 63) + 4, max_val=(1 << 63) - 4)
data_gen = [('_1', RepeatSeqGen(gen, length=20)), ('_2', gen)]
df = gen_df(spark, data_gen)
return df.withColumn("standard_dev", f.stddev("_2").over(window_spec_agg)) \
.selectExpr("standard_dev")
assert_gpu_and_cpu_are_equal_collect(do_it, conf={ 'spark.rapids.sql.window.range.long.enabled': 'true'})
@approximate_float
@ignore_order
@pytest.mark.parametrize('preceding', [Window.unboundedPreceding, -4], ids=idfn)
@pytest.mark.parametrize('following', [Window.unboundedFollowing, 3], ids=idfn)
def test_window_rows_stddev(preceding, following):
window_spec_agg = Window.partitionBy("_1").orderBy("_2").rowsBetween(preceding, following)
def do_it(spark):
data_gen = [('_1', RepeatSeqGen(IntegerGen(), length=20)), ('_2', DoubleGen())]
df = gen_df(spark, data_gen)
return df.withColumn("standard_dev", f.stddev("_2").over(window_spec_agg)) \
.selectExpr("standard_dev")
assert_gpu_and_cpu_are_equal_collect(do_it)
@ignore_order
def test_unbounded_to_unbounded_window():
# This is specifically to test a bug that caused overflow issues when calculating
# the range for some row based queries. The bug applied to more than just
# unbounded to unbounded, but this is the simplest to write
assert_gpu_and_cpu_are_equal_collect(lambda spark : spark.range(1024).selectExpr(
'SUM(id) OVER ()',
'COUNT(1) OVER ()'))
_nested_gens = array_gens_sample + struct_gens_sample + map_gens_sample + [binary_gen]
exprs_for_nth_first_last = \
'first(a) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'first(a) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'first(a) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'first(a) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'last (a) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'last (a) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'last (a) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'last (a) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'NTH_VALUE(a, 1) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'NTH_VALUE(a, 2) OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'NTH_VALUE(a, 3) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), ' \
'NTH_VALUE(a, 3) OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), '\
'first(a, true) OVER (PARTITION BY b ORDER BY c), ' \
'last (a, true) OVER (PARTITION BY b ORDER BY c), ' \
'last (a, true) OVER (PARTITION BY b ORDER BY c) '
exprs_for_nth_first_last_ignore_nulls = \
'NTH_VALUE(a, 1) IGNORE NULLS OVER (PARTITION BY b ORDER BY c RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'first(a) IGNORE NULLS OVER (PARTITION BY b ORDER BY c ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), ' \
'last(a) IGNORE NULLS OVER (PARTITION BY b ORDER BY c) '
@pytest.mark.parametrize('data_gen', all_basic_gens_no_null + decimal_gens + _nested_gens, ids=idfn)
def test_window_first_last_nth(data_gen):
assert_gpu_and_cpu_are_equal_sql(
# Coalesce is to make sure that first and last, which are non-deterministic become deterministic
lambda spark: three_col_df(spark, data_gen, string_gen, int_gen, num_slices=1).coalesce(1),
"window_agg_table",
'SELECT a, b, c, ' + exprs_for_nth_first_last +
'FROM window_agg_table')
@pytest.mark.skipif(is_before_spark_320(), reason='IGNORE NULLS clause is not supported for FIRST(), LAST() and NTH_VALUE in Spark 3.1.x')
@pytest.mark.parametrize('data_gen', all_basic_gens_no_null + decimal_gens + _nested_gens, ids=idfn)
def test_window_first_last_nth_ignore_nulls(data_gen):
assert_gpu_and_cpu_are_equal_sql(
# Coalesce is to make sure that first and last, which are non-deterministic become deterministic
lambda spark: three_col_df(spark, data_gen, string_gen, int_gen, num_slices=1).coalesce(1),
"window_agg_table",
'SELECT a, b, c, ' + exprs_for_nth_first_last_ignore_nulls +
'FROM window_agg_table')
@ignore_order(local=True)
def test_to_date_with_window_functions():
"""
This test ensures that date expressions participating alongside window aggregations
are initialized correctly. (See: https://github.com/NVIDIA/spark-rapids/issues/5984)
For certain vendor-specific Spark versions, the date expression might be evaluated
directly in the WindowExec, instead of being projected upstream. For instance,
the query in this test might produce this plan on CPU:
```
Window [cast(gettimestamp(cast(date_1#1 as string), yyyy-MM-dd, TimestampType, Some(Etc/UTC), false) as date)...]
+- Sort [id#0L ASC NULLS FIRST, date_2#2 ASC NULLS FIRST], false, 0
+- Exchange hashpartitioning(id#0L, 200), ENSURE_REQUIREMENTS, [id=#136]
+- *(1) Project [date_1#1, id#0L, date_2#2]
```
This might trip up the GPU plan, by incompletely initializing `GpuGetTimeStamp` for `date_1` thus:
```
+- GpuProject [cast(gpugettimestamp(cast(date_1#1 as string), yyyy-MM-dd, null, null, None) as date) AS my_date#6]
```
The correct initialization should have yielded:
```
+- GpuProject [cast(gpugettimestamp(cast(date_1#1 as string), yyyy-MM-dd, yyyy-MM-dd, %Y-%m-%d, None) as date)]
```
"""
assert_gpu_and_cpu_are_equal_sql(
df_fun=lambda spark: gen_df(spark, [('id', RepeatSeqGen(int_gen, 20)),
('date_1', DateGen()),
('date_2', DateGen())]),
table_name="window_input",
sql="""
SELECT TO_DATE( CAST(date_1 AS STRING), 'yyyy-MM-dd' ) AS my_date,
SUM(1) OVER(PARTITION BY id ORDER BY date_2) AS my_sum
FROM window_input
"""
)
def test_lru_cache_datagen():
# log cache info at the end of integration tests, not related to window functions
info = gen_df_help.cache_info()
warnings.warn("Cache info: {}".format(info))
gen_df_help.cache_clear()
| spark-rapids-branch-23.10 | integration_tests/src/main/python/window_function_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
allow_non_gpu_databricks = pytest.mark.allow_non_gpu_databricks
allow_non_gpu = pytest.mark.allow_non_gpu
validate_execs_in_gpu_plan = pytest.mark.validate_execs_in_gpu_plan
approximate_float = pytest.mark.approximate_float
ignore_order = pytest.mark.ignore_order
incompat = pytest.mark.incompat
inject_oom = pytest.mark.inject_oom
limit = pytest.mark.limit
qarun = pytest.mark.qarun
cudf_udf = pytest.mark.cudf_udf
shuffle_test = pytest.mark.shuffle_test
nightly_gpu_mem_consuming_case = pytest.mark.nightly_gpu_mem_consuming_case
nightly_host_mem_consuming_case = pytest.mark.nightly_host_mem_consuming_case
fuzz_test = pytest.mark.fuzz_test
iceberg = pytest.mark.iceberg
delta_lake = pytest.mark.delta_lake
large_data_test = pytest.mark.large_data_test | spark-rapids-branch-23.10 | integration_tests/src/main/python/marks.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_equal
from data_gen import *
import pyspark.sql.functions as f
from marks import ignore_order
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
# Many Spark versions have issues sorting large decimals,
# see https://issues.apache.org/jira/browse/SPARK-40089.
@ignore_order(local=True)
def test_expand_exec(data_gen):
def op_df(spark, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', data_gen),
('b', IntegerGen())], nullable=False), length=length, seed=seed).rollup(f.col("a"), f.col("b")).agg(f.col("b"))
assert_gpu_and_cpu_are_equal_collect(op_df)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/expand_exec_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import stat
import sys
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
_CONF_ENV_PREFIX = 'PYSP_TEST_'
_EXECUTOR_ENV_PREFIX = 'spark_executorEnv_'
def env_for_conf(spark_conf_name):
# escape underscores
escaped_conf = spark_conf_name.replace('_', r'__')
return _CONF_ENV_PREFIX + escaped_conf.replace('.', '_')
def conf_for_env(env_name):
conf_key = env_name[len(_CONF_ENV_PREFIX):]
if conf_key.startswith(_EXECUTOR_ENV_PREFIX):
res = _EXECUTOR_ENV_PREFIX.replace('_', '.') + conf_key[len(_EXECUTOR_ENV_PREFIX):]
else:
# replace standalone underscores
res1 = re.sub(r'(?<!_)_(?!_)', '.', conf_key)
# unescape: remove duplicate underscores
res = res1.replace('__', '_')
return res
_DRIVER_ENV = env_for_conf('spark.driver.extraJavaOptions')
_SPARK_JARS = env_for_conf("spark.jars")
_SPARK_JARS_PACKAGES = env_for_conf("spark.jars.packages")
spark_jars_env = {
_SPARK_JARS,
_SPARK_JARS_PACKAGES
}
def findspark_init():
import findspark
findspark.init()
logging.info("Checking if add_jars/packages to findspark required")
spark_jars = os.getenv(_SPARK_JARS)
spark_jars_packages = os.getenv(_SPARK_JARS_PACKAGES)
if spark_jars is not None:
logging.info(f"Adding to findspark jars: {spark_jars}")
findspark.add_jars(spark_jars)
if spark_jars_packages is not None:
logging.info(f"Adding to findspark packages: {spark_jars_packages}")
findspark.add_packages(spark_jars_packages)
def running_with_xdist(session, is_worker):
try:
import xdist
return xdist.is_xdist_worker(session) if is_worker\
else xdist.is_xdist_master(session)
except ImportError:
return False
def pyspark_ready():
try:
import pyspark
return True
except ImportError:
return False
def global_init():
logging.info("Executing global initialization tasks before test launches")
create_tmp_hive()
def create_tmp_hive():
path = os.environ.get('PYSP_TEST_spark_hadoop_hive_exec_scratchdir', '/tmp/hive')
mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
logging.info(f"Creating directory {path} with permissions {oct(mode)}")
try:
os.makedirs(path, mode, exist_ok=True)
os.chmod(path, mode)
except Exception as e:
logging.warn(f"Failed to setup the hive scratch dir {path}. Error {e}")
def pytest_sessionstart(session):
# initializations that must happen globally once before tests start
# if xdist in the coordinator, if not xdist in the pytest process
if not running_with_xdist(session, is_worker=True):
global_init()
if running_with_xdist(session, is_worker = True):
logging.info("Initializing findspark because running with xdist worker")
findspark_init()
elif running_with_xdist(session, is_worker = False):
logging.info("Skipping findspark init because on xdist master")
return
elif not pyspark_ready():
logging.info("Initializing findspark because pyspark unimportable on a standalone Pytest instance")
findspark_init()
import pyspark
# Force the RapidsPlugin to be enabled, so it blows up if the classpath is not set properly
# DO NOT SET ANY OTHER CONFIGS HERE!!!
# due to bugs in pyspark/pytest it looks like any configs set here
# can be reset in the middle of a test if specific operations are done (some types of cast etc)
_sb = pyspark.sql.SparkSession.builder
_sb.config('spark.plugins', 'com.nvidia.spark.SQLPlugin') \
.config("spark.sql.adaptive.enabled", "false") \
.config('spark.sql.queryExecutionListeners', 'org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback')
for key, value in os.environ.items():
if key.startswith(_CONF_ENV_PREFIX) and key != _DRIVER_ENV and key not in spark_jars_env:
_sb.config(conf_for_env(key), value)
driver_opts = os.environ.get(_DRIVER_ENV, "")
if ('PYTEST_XDIST_WORKER' in os.environ):
wid = os.environ['PYTEST_XDIST_WORKER']
_handle_derby_dir(_sb, driver_opts, wid)
_handle_event_log_dir(_sb, wid)
_handle_ivy_cache_dir(_sb, wid)
else:
_sb.config('spark.driver.extraJavaOptions', driver_opts)
_handle_event_log_dir(_sb, 'gw0')
# enableHiveSupport() is needed for parquet bucket tests
_s = _sb.enableHiveSupport() \
.appName('rapids spark plugin integration tests (python)').getOrCreate()
#TODO catch the ClassNotFound error that happens if the classpath is not set up properly and
# make it a better error message
_s.sparkContext.setLogLevel("WARN")
global _spark
_spark = _s
def _handle_derby_dir(sb, driver_opts, wid):
d = "./derby_{}".format(wid)
if not os.path.exists(d):
os.makedirs(d)
sb.config('spark.driver.extraJavaOptions', driver_opts + ' -Dderby.system.home={}'.format(d))
def _handle_event_log_dir(sb, wid):
if os.environ.get('SPARK_EVENTLOG_ENABLED', str(True)).lower() in [
str(False).lower(), 'off', '0'
]:
logging.info('Automatic configuration for spark event log disabled')
return
import pyspark
spark_conf = pyspark.SparkConf()
master_url = os.environ.get(env_for_conf('spark.master'),
spark_conf.get("spark.master", 'local'))
event_log_config = os.environ.get(env_for_conf('spark.eventLog.enabled'),
spark_conf.get('spark.eventLog.enabled', str(False).lower()))
event_log_codec = os.environ.get(env_for_conf('spark.eventLog.compression.codec'), 'zstd')
if not master_url.startswith('local') or event_log_config != str(False).lower():
logging.info("SPARK_EVENTLOG_ENABLED is ignored for non-local Spark master and when "
"it's pre-configured by the user")
return
d = "./eventlog_{}".format(wid)
if not os.path.exists(d):
# Set 'exist_ok' as True to avoid raising 'FileExistsError' as the folder might be created
# by other tests when they are executed in parallel
os.makedirs(d, exist_ok=True)
logging.info('Spark event logs will appear under {}. Set the environmnet variable '
'SPARK_EVENTLOG_ENABLED=false if you want to disable it'.format(d))
sb\
.config('spark.eventLog.dir', "file://{}".format(os.path.abspath(d))) \
.config('spark.eventLog.compress', True) \
.config('spark.eventLog.enabled', True) \
.config('spark.eventLog.compression.codec', event_log_codec)
def _handle_ivy_cache_dir(sb, wid):
if os.environ.get('SPARK_IVY_CACHE_ENABLED', str(True)).lower() in [
str(False).lower(), 'off', '0'
]:
logging.info('Automatic configuration for spark ivy cache dir disabled')
return
sb.config('spark.jars.ivy', '/tmp/.ivy2_{}'.format(wid))
def get_spark_i_know_what_i_am_doing():
"""
Get the current SparkSession.
This should almost never be called directly instead you should call
with_spark_session, with_cpu_session, or with_gpu_session for spark_session.
This is to guarantee that the session and it's config is setup in a repeatable way.
"""
return _spark
def spark_version():
return _spark.version
| spark-rapids-branch-23.10 | integration_tests/src/main/python/spark_init_internal.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from conftest import is_at_least_precommit_run
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version, require_minimum_pandas_version
try:
require_minimum_pandas_version()
except Exception as e:
if is_at_least_precommit_run():
raise AssertionError("incorrect pandas version during required testing " + str(e))
pytestmark = pytest.mark.skip(reason=str(e))
try:
require_minimum_pyarrow_version()
except Exception as e:
if is_at_least_precommit_run():
raise AssertionError("incorrect pyarrow version during required testing " + str(e))
pytestmark = pytest.mark.skip(reason=str(e))
import pandas as pd
import time
from typing import Iterator
from pyspark.sql import Window
from pyspark.sql.functions import pandas_udf, PandasUDFType
from spark_session import with_cpu_session, with_gpu_session
from marks import cudf_udf
_conf = {
'spark.rapids.sql.exec.AggregateInPandasExec': 'true',
'spark.rapids.sql.exec.FlatMapCoGroupsInPandasExec': 'true',
'spark.rapids.sql.exec.WindowInPandasExec': 'true',
'spark.rapids.sql.python.gpu.enabled': 'true'
}
small_data = [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)]
large_data = list(map(lambda i: (i, i/1.0), range(1, 512))) * 2
def _create_df(spark, data=large_data):
return spark.createDataFrame(data, ("id", "v"))
# since this test requires to run different functions on CPU and GPU(need cudf),
# create its own assert function
def _assert_cpu_gpu(cpu_func, gpu_func, cpu_conf={}, gpu_conf={}, is_sort=False):
print('### CPU RUN ###')
cpu_start = time.time()
cpu_ret = with_cpu_session(cpu_func, conf=cpu_conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
gpu_ret = with_gpu_session(gpu_func, conf=gpu_conf)
gpu_end = time.time()
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
if is_sort:
assert cpu_ret.sort() == gpu_ret.sort()
else:
assert cpu_ret == gpu_ret
# ======= Test Scalar =======
@cudf_udf
@pytest.mark.parametrize('data', [small_data, large_data], ids=['small data', 'large data'])
def test_with_column(enable_cudf_udf, data):
@pandas_udf('int')
def _plus_one_cpu_func(v: pd.Series) -> pd.Series:
return v + 1
@pandas_udf('int')
def _plus_one_gpu_func(v: pd.Series) -> pd.Series:
import cudf
gpu_series = cudf.Series(v)
gpu_series = gpu_series + 1
return gpu_series.to_pandas()
def cpu_run(spark):
df = _create_df(spark, data)
return df.withColumn("v1", _plus_one_cpu_func(df.v)).collect()
def gpu_run(spark):
df = _create_df(spark, data)
return df.withColumn("v1", _plus_one_gpu_func(df.v)).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf)
@cudf_udf
def test_sql(enable_cudf_udf):
@pandas_udf('int')
def _plus_one_cpu_func(v: pd.Series) -> pd.Series:
return v + 1
@pandas_udf('int')
def _plus_one_gpu_func(v: pd.Series) -> pd.Series:
import cudf
gpu_series = cudf.Series(v)
gpu_series = gpu_series + 1
return gpu_series.to_pandas()
def cpu_run(spark):
_ = spark.udf.register("add_one_cpu", _plus_one_cpu_func)
_create_df(spark).createOrReplaceTempView("test_table_cpu")
return spark.sql("SELECT add_one_cpu(id) FROM test_table_cpu").collect()
def gpu_run(spark):
_ = spark.udf.register("add_one_gpu", _plus_one_gpu_func)
_create_df(spark).createOrReplaceTempView("test_table_gpu")
return spark.sql("SELECT add_one_gpu(id) FROM test_table_gpu").collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf)
# ======= Test Scalar Iterator =======
@cudf_udf
def test_select(enable_cudf_udf):
@pandas_udf("long")
def _plus_one_cpu_iter_func(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in iterator:
yield s + 1
@pandas_udf("long")
def _plus_one_gpu_iter_func(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
import cudf
for s in iterator:
gpu_serises = cudf.Series(s)
gpu_serises = gpu_serises + 1
yield gpu_serises.to_pandas()
def cpu_run(spark):
df = _create_df(spark)
return df.select(_plus_one_cpu_iter_func(df.v)).collect()
def gpu_run(spark):
df = _create_df(spark)
return df.select(_plus_one_gpu_iter_func(df.v)).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf)
# ======= Test Flat Map In Pandas =======
@cudf_udf
def test_map_in_pandas(enable_cudf_udf):
def cpu_run(spark):
def _filter_cpu_func(iterator):
for pdf in iterator:
yield pdf[pdf.id == 1]
df = _create_df(spark)
return df.mapInPandas(_filter_cpu_func, df.schema).collect()
def gpu_run(spark):
def _filter_gpu_func(iterator):
import cudf
for pdf in iterator:
gdf = cudf.from_pandas(pdf)
yield gdf[gdf.id == 1].to_pandas()
df = _create_df(spark)
return df.mapInPandas(_filter_gpu_func, df.schema).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf)
# ======= Test Grouped Map In Pandas =======
# To solve: Invalid udf: the udf argument must be a pandas_udf of type GROUPED_MAP
# need to add udf type
@cudf_udf
def test_group_apply(enable_cudf_udf):
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def _normalize_cpu_func(df):
v = df.v
return df.assign(v=(v - v.mean()) / v.std())
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def _normalize_gpu_func(df):
import cudf
gdf = cudf.from_pandas(df)
v = gdf.v
return gdf.assign(v=(v - v.mean()) / v.std()).to_pandas()
def cpu_run(spark):
df = _create_df(spark)
return df.groupby("id").apply(_normalize_cpu_func).collect()
def gpu_run(spark):
df = _create_df(spark)
return df.groupby("id").apply(_normalize_gpu_func).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
@cudf_udf
def test_group_apply_in_pandas(enable_cudf_udf):
def cpu_run(spark):
def _normalize_cpu_in_pandas_func(df):
v = df.v
return df.assign(v=(v - v.mean()) / v.std())
df = _create_df(spark)
return df.groupby("id").applyInPandas(_normalize_cpu_in_pandas_func, df.schema).collect()
def gpu_run(spark):
def _normalize_gpu_in_pandas_func(df):
import cudf
gdf = cudf.from_pandas(df)
v = gdf.v
return gdf.assign(v=(v - v.mean()) / v.std()).to_pandas()
df = _create_df(spark)
return df.groupby("id").applyInPandas(_normalize_gpu_in_pandas_func, df.schema).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
# ======= Test Aggregate In Pandas =======
@cudf_udf
def test_group_agg(enable_cudf_udf):
@pandas_udf("int")
def _sum_cpu_func(v: pd.Series) -> int:
return v.sum()
@pandas_udf("integer")
def _sum_gpu_func(v: pd.Series) -> int:
import cudf
gpu_series = cudf.Series(v)
return gpu_series.sum()
def cpu_run(spark):
df = _create_df(spark)
return df.groupby("id").agg(_sum_cpu_func(df.v)).collect()
def gpu_run(spark):
df = _create_df(spark)
return df.groupby("id").agg(_sum_gpu_func(df.v)).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
@cudf_udf
def test_sql_group(enable_cudf_udf):
@pandas_udf("int")
def _sum_cpu_func(v: pd.Series) -> int:
return v.sum()
@pandas_udf("integer")
def _sum_gpu_func(v: pd.Series) -> int:
import cudf
gpu_series = cudf.Series(v)
return gpu_series.sum()
def cpu_run(spark):
_ = spark.udf.register("sum_cpu_udf", _sum_cpu_func)
q = "SELECT sum_cpu_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
return spark.sql(q).collect()
def gpu_run(spark):
_ = spark.udf.register("sum_gpu_udf", _sum_gpu_func)
q = "SELECT sum_gpu_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
return spark.sql(q).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
# ======= Test Window In Pandas =======
@cudf_udf
def test_window(enable_cudf_udf):
@pandas_udf("int")
def _sum_cpu_func(v: pd.Series) -> int:
return v.sum()
@pandas_udf("integer")
def _sum_gpu_func(v: pd.Series) -> int:
import cudf
gpu_series = cudf.Series(v)
return gpu_series.sum()
def cpu_run(spark):
df = _create_df(spark)
w = Window.partitionBy('id').rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
return df.withColumn('sum_v', _sum_cpu_func('v').over(w)).collect()
def gpu_run(spark):
df = _create_df(spark)
w = Window.partitionBy('id').rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
return df.withColumn('sum_v', _sum_gpu_func('v').over(w)).collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
# ======= Test CoGroup Map In Pandas =======
@cudf_udf
def test_cogroup(enable_cudf_udf):
def cpu_run(spark):
def _cpu_join_func(l, r):
return pd.merge(l, r, on="time")
df1 = spark.createDataFrame(
[(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
("time", "id", "v1"))
df2 = spark.createDataFrame(
[(20000101, 1, "x"), (20000101, 2, "y")],
("time", "id", "v2"))
return df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(_cpu_join_func,
schema="time int, id_x int, id_y int, v1 double, v2 string").collect()
def gpu_run(spark):
def _gpu_join_func(l, r):
import cudf
gl = cudf.from_pandas(l)
gr = cudf.from_pandas(r)
return gl.merge(gr, on="time").to_pandas()
df1 = spark.createDataFrame(
[(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
("time", "id", "v1"))
df2 = spark.createDataFrame(
[(20000101, 1, "x"), (20000101, 2, "y")],
("time", "id", "v2"))
return df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(_gpu_join_func,
schema="time int, id_x int, id_y int, v1 double, v2 string").collect()
_assert_cpu_gpu(cpu_run, gpu_run, gpu_conf=_conf, is_sort=True)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/udf_cudf_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error, \
assert_gpu_fallback_collect, assert_cpu_and_gpu_are_equal_collect_with_capture
from data_gen import *
from conftest import is_databricks_runtime
from marks import allow_non_gpu, ignore_order
from spark_session import is_before_spark_330, is_databricks104_or_later, is_databricks113_or_later, is_spark_33X, is_spark_340_or_later
from pyspark.sql.functions import create_map, col, lit, row_number
from pyspark.sql.types import *
from pyspark.sql.types import IntegralType
from pyspark.sql.window import Window
basic_struct_gen = StructGen([
['child' + str(ind), sub_gen]
for ind, sub_gen in enumerate([StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
BooleanGen(), DateGen(), TimestampGen(), null_gen] + decimal_gens)],
nullable=False)
maps_with_binary_value = [MapGen(IntegerGen(nullable=False), BinaryGen(max_length=5))]
# we need to fix https://github.com/NVIDIA/spark-rapids/issues/8985 and add to
# map_keys, map_values, and map_entries tests
maps_with_binary_key = [MapGen(BinaryGen(nullable=False), BinaryGen(max_length=5))]
maps_with_array_key = [
MapGen(ArrayGen(IntegerGen(), nullable=False, max_length=5, convert_to_tuple=True),
IntegerGen())]
maps_with_struct_key = [
MapGen(StructGen([['child0', IntegerGen()],
['child1', IntegerGen()]], nullable=False),
IntegerGen())]
supported_key_map_gens = \
map_gens_sample + \
maps_with_binary_value + \
decimal_64_map_gens + \
decimal_128_map_gens
not_supported_get_map_value_keys_map_gens = \
maps_with_binary_key + \
maps_with_array_key + \
maps_with_struct_key
@pytest.mark.parametrize('data_gen', supported_key_map_gens, ids=idfn)
def test_map_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
# Technically the order of the keys could change, and be different and still correct
# but it works this way for now so lets see if we can maintain it.
# Good thing too, because we cannot support sorting all of the types that could be
# in here yet, and would need some special case code for checking equality
'map_keys(a)'))
@pytest.mark.parametrize('data_gen', supported_key_map_gens, ids=idfn)
def test_map_values(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
# Technically the order of the values could change, and be different and still correct
# but it works this way for now so lets see if we can maintain it.
# Good thing too, because we cannot support sorting all of the types that could be
# in here yet, and would need some special case code for checking equality
'map_values(a)'))
@pytest.mark.parametrize('data_gen', supported_key_map_gens, ids=idfn)
def test_map_entries(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
# Technically the order of the values could change, and be different and still correct
# but it works this way for now so lets see if we can maintain it.
# Good thing too, because we cannot support sorting all of the types that could be
# in here yet, and would need some special case code for checking equality
'map_entries(a)'))
def get_map_value_gens(precision=18, scale=0):
def simple_struct_value_gen():
return StructGen([["child", IntegerGen()]])
def nested_struct_value_gen():
return StructGen([["child", simple_struct_value_gen()]])
def nested_map_value_gen():
return MapGen(StringGen(pattern='key_[0-9]', nullable=False), IntegerGen(), max_length=6)
def array_value_gen():
return ArrayGen(IntegerGen(), max_length=6)
def decimal_value_gen():
return DecimalGen(precision, scale)
return [ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen,
StringGen, DateGen, TimestampGen, decimal_value_gen, BinaryGen,
simple_struct_value_gen, nested_struct_value_gen, nested_map_value_gen, array_value_gen]
@pytest.mark.parametrize('data_gen',
[MapGen(StringGen(pattern='key_[0-9]', nullable=False), value(), max_length=6)
for value in get_map_value_gens()],
ids=idfn)
def test_get_map_value_string_keys(data_gen):
index_gen = StringGen()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, [("a", data_gen), ("ix", index_gen)]).selectExpr(
'a[ix]',
'a["key_0"]',
'a["key_1"]',
'a[null]',
'a["key_9"]',
'a["NOT_FOUND"]',
'a["key_5"]'))
numeric_key_gens = [
key(nullable=False) if key in [FloatGen, DoubleGen, DecimalGen]
else key(nullable=False, min_val=0, max_val=100)
for key in [ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DecimalGen]]
numeric_key_map_gens = [MapGen(key, value(), max_length=6)
for key in numeric_key_gens for value in get_map_value_gens()]
@pytest.mark.parametrize('data_gen', numeric_key_map_gens, ids=idfn)
def test_get_map_value_numeric_keys(data_gen):
key_gen = data_gen._key_gen
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, [("a", data_gen), ("ix", key_gen)]).selectExpr(
'a[ix]',
'a[0]',
'a[1]',
'a[null]',
'a[-9]',
'a[999]'))
@pytest.mark.parametrize('data_gen', supported_key_map_gens, ids=idfn)
def test_get_map_value_supported_keys(data_gen):
key_gen = data_gen._key_gen
# first expression is not guaranteed to hit
# the second expression with map_keys will hit on the first key, or null
# on an empty dictionary generated in `a`
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: gen_df(spark, [("a", data_gen), ("ix", key_gen)]) \
.selectExpr('a[ix]', 'a[map_keys(a)[0]]'),
exist_classes="GpuGetMapValue,GpuMapKeys")
@allow_non_gpu("ProjectExec")
@pytest.mark.parametrize('data_gen', not_supported_get_map_value_keys_map_gens, ids=idfn)
def test_get_map_value_fallback_keys(data_gen):
key_gen = data_gen._key_gen
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, [("a", data_gen), ("ix", key_gen)]) \
.selectExpr('a[ix]'),
cpu_fallback_class_name="GetMapValue")
@pytest.mark.parametrize('key_gen', numeric_key_gens, ids=idfn)
def test_basic_scalar_map_get_map_value(key_gen):
def query_map_scalar(spark):
return unary_op_df(spark, key_gen).selectExpr('map(0, "zero", 1, "one")[a]')
assert_cpu_and_gpu_are_equal_collect_with_capture(
query_map_scalar,
# check that GpuGetMapValue wasn't optimized out
exist_classes="GpuGetMapValue",
conf = {"spark.rapids.sql.explain": "NONE",
# this is set to True so we don't fall back due to float/double -> int
# casting (because the keys of the scalar map are integers)
"spark.rapids.sql.castFloatToIntegralTypes.enabled": True})
@allow_non_gpu('WindowLocalExec')
@pytest.mark.parametrize('data_gen', supported_key_map_gens, ids=idfn)
def test_map_scalars_supported_key_types(data_gen):
key_gen = data_gen._key_gen
def query_map_scalar(spark):
key_df = gen_df(spark, [("key", key_gen)], length=100).orderBy(col("key"))\
.select(col("key"),
row_number().over(Window().orderBy(col('key')))\
.alias("row_num"))
key_df.select(col("key").alias("key_at_ix"))\
.where(col("row_num") == 5)\
.repartition(100)\
.createOrReplaceTempView("single_key_tbl")
key_df.select(col("key").alias("key_at_ix_next")) \
.where(col("row_num") == 6) \
.repartition(100) \
.createOrReplaceTempView("single_key_tbl_next")
# There will be a single row in single_key_tbl (the row that matched key_ix).
# We repartition this table to create several empty tables to test empty partitions,
# and also because the window operation put everything into a one partition prior.
# Because this is a single key, first(ignore_nulls = true) will be deterministic.
return spark.sql(
"select key_at_ix, " +
" (select first(map(key_at_ix, 'value'), true) " +
" from single_key_tbl)[key_at_ix], " +
# this one is on purpose using `key_at_ix` to guarantee we won't match the key
" (select first(map(key_at_ix_next, 'value'), true) " +
" from single_key_tbl_next)[key_at_ix] " +
"from single_key_tbl")
assert_cpu_and_gpu_are_equal_collect_with_capture(
query_map_scalar,
# check that GpuGetMapValue wasn't optimized out
exist_classes="GpuGetMapValue",
conf = {"spark.rapids.sql.explain": "NONE"})
@pytest.mark.parametrize('data_gen',
[MapGen(DateGen(nullable=False), value(), max_length=6)
for value in get_map_value_gens()], ids=idfn)
def test_get_map_value_date_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a[date "1997"]',
'a[date "2022-01-01"]',
'a[null]'))
@pytest.mark.parametrize('data_gen',
[MapGen(TimestampGen(nullable=False), value(), max_length=6)
for value in get_map_value_gens()], ids=idfn)
def test_get_map_value_timestamp_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a[timestamp "1997"]',
'a[timestamp "2022-01-01"]',
'a[null]'))
def test_map_side_effects():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.range(10).selectExpr(
'id',
'if(id == 0, null, map(id, id, id DIV 2, id)) as m'),
conf={'spark.sql.mapKeyDedupPolicy': 'EXCEPTION'})
@pytest.mark.parametrize('key_gen', [StringGen(nullable=False), IntegerGen(nullable=False), basic_struct_gen], ids=idfn)
@pytest.mark.parametrize('value_gen', [StringGen(nullable=True), IntegerGen(nullable=True), basic_struct_gen], ids=idfn)
def test_single_entry_map(key_gen, value_gen):
data_gen = [('a', key_gen), ('b', value_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map("literal_key", b) as map1',
'map(a, b) as map2'))
def test_map_expr_no_pairs():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map() as m1'))
def test_map_expr_multiple_pairs():
# we don't hit duplicate keys in this test due to the high cardinality of the generated strings
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map("key1", b, "key2", a) as m1',
'map(a, b, b, a) as m2'))
def test_map_expr_expr_keys_dupe_last_win():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map(a, b, a, b) as m2'),
conf={'spark.sql.mapKeyDedupPolicy':'LAST_WIN'})
def test_map_expr_expr_keys_dupe_exception():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_error(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map(a, b, a, b) as m2').collect(),
conf={'spark.sql.mapKeyDedupPolicy':'EXCEPTION'},
error_message = "Duplicate map key")
def test_map_keys_null_exception():
assert_gpu_and_cpu_error(
lambda spark: spark.sql(
"select map(x, -1) from (select explode(array(1,null)) as x)").collect(),
conf = {},
error_message = "Cannot use null as map key")
def test_map_expr_literal_keys_dupe_last_win():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map("key1", b, "key1", a) as m1'),
conf={'spark.sql.mapKeyDedupPolicy':'LAST_WIN'})
def test_map_expr_literal_keys_dupe_exception():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_error(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map("key1", b, "key1", a) as m1').collect(),
conf={'spark.sql.mapKeyDedupPolicy':'EXCEPTION'},
error_message = "Duplicate map key")
def test_map_expr_multi_non_literal_keys():
data_gen = [('a', StringGen(nullable=False)), ('b', StringGen(nullable=False))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'map(a, b, b, a) as m1'))
def test_map_scalar_project():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.range(2).selectExpr(
"map(1, 2, 3, 4) as i",
"map('a', 'b', 'c', 'd') as s",
"map('a', named_struct('foo', 10, 'bar', 'bar')) as st"
"id"))
def test_str_to_map_expr_fixed_pattern_input():
# Test pattern "key1:val1,key2:val2".
# In order to prevent duplicate keys, the first key starts with a number [0-9] and the second
# key start with a letter [a-zA-Z].
data_gen = [('a', StringGen(pattern='[0-9].{0,10}:.{0,10},[a-zA-Z].{0,10}:.{0,10}',
nullable=True))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a) as m1',
'str_to_map(a, ",") as m2',
'str_to_map(a, ",", ":") as m3'))
def test_str_to_map_expr_fixed_delimiters():
data_gen = [('a', StringGen(pattern='[0-9a-zA-Z:,]{0,100}', nullable=True)
.with_special_pattern('[abc]:.{0,20},[abc]:.{0,20}', weight=100))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a) as m1',
'str_to_map(a, ",") as m2',
'str_to_map(a, ",", ":") as m3'
), conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
def test_str_to_map_expr_random_delimiters():
data_gen = [('a', StringGen(pattern='[0-9a-z:,]{0,100}', nullable=True)
.with_special_pattern('[abc]:.{0,20},[abc]:.{0,20}', weight=100))]
delim_gen = StringGen(pattern='[0-9a-z :,]', nullable=False)
(pair_delim, keyval_delim) = ('', '')
while pair_delim == keyval_delim:
(pair_delim, keyval_delim) = gen_scalars_for_sql(delim_gen, 2, force_no_nulls=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a) as m1',
'str_to_map(a, {}) as m2'.format(pair_delim),
'str_to_map(a, {}, {}) as m3'.format(pair_delim, keyval_delim)
), conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
def test_str_to_map_expr_input_no_delimiter():
# Test input strings that contain either one delimiter or do not contain delimiters at all.
data_gen = [('a', StringGen(pattern='[0-9:,]{0,100}', nullable=True)
.with_special_pattern('[abc]:.{0,20},[abc]:.{0,20}', weight=100))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a, "A", ":") as m1', # input doesn't contain pair delimiter
'str_to_map(a, ",", "A") as m2', # input doesn't contain key-value delimiter
'str_to_map(a, "A", "A") as m3' # input doesn't contain any delimiter
), conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
def test_str_to_map_expr_with_regex_and_non_regex_delimiters():
data_gen = [('a', StringGen(pattern='(([bf]:{0,5}){1,7},{0,5}[0-9]{1,10}){0,10}',
nullable=True))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a, "[,]") as m1',
'str_to_map(a, "[,]{1,5}") as m2',
'str_to_map(a, "[,b]") as m3',
'str_to_map(a, ",", "[:]") as m4',
'str_to_map(a, ",", "[:f]") as m5',
'str_to_map(a, ",", "[:]{1,10}") as m6'
), conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
def test_str_to_map_expr_with_all_regex_delimiters():
data_gen = [('a', StringGen(pattern='(([bf]:{0,5}){1,7},{0,5}[0-9]{1,10}){0,10}',
nullable=True))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).selectExpr(
'str_to_map(a, "[,]") as m1',
'str_to_map(a, "[,]", "[:]") as m2',
'str_to_map(a, "[,b]", "[:f]") as m3',
'str_to_map(a, "[,]", "[:]{1,10}") as m4',
'str_to_map(a, "[,]{1,10}", "[:]{1,10}") as m5'
), conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
@pytest.mark.skipif(not is_before_spark_330(),
reason="Only in Spark 3.1.1+ (< 3.3.0) + ANSI mode, map key throws on no such element")
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_simple_get_map_value_ansi_fail(data_gen):
message = "org.apache.spark.SparkNoSuchElementException" if is_databricks104_or_later() else "java.util.NoSuchElementException"
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a["NOT_FOUND"]').collect(),
conf=ansi_enabled_conf,
error_message=message)
@pytest.mark.skipif(is_before_spark_340() and not is_databricks113_or_later(),
reason="Only in Spark 3.4+ with ANSI mode, map key returns null on no such element")
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_simple_get_map_value_ansi_null(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a["NOT_FOUND"]'),
conf=ansi_enabled_conf)
@pytest.mark.skipif(not is_spark_33X() or is_databricks_runtime(),
reason="Only in Spark 3.3.X + ANSI mode + Strict Index, map key throws on no such element")
@pytest.mark.parametrize('strict_index', ['true', 'false'])
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_simple_get_map_value_with_strict_index(strict_index, data_gen):
message = "org.apache.spark.SparkNoSuchElementException"
test_conf = copy_and_update(ansi_enabled_conf, {'spark.sql.ansi.strictIndexOperator': strict_index})
if strict_index == 'true':
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a["NOT_FOUND"]').collect(),
conf=test_conf,
error_message=message)
else:
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a["NOT_FOUND"]'),
conf=test_conf)
@pytest.mark.parametrize('data_gen',
[MapGen(StringGen(pattern='key_[0-9]', nullable=False), value(), max_length=6)
for value in get_map_value_gens()],
ids=idfn)
def test_element_at_map_string_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, "key_0")',
'element_at(a, "key_1")',
'element_at(a, "null")',
'element_at(a, "key_9")',
'element_at(a, "NOT_FOUND")',
'element_at(a, "key_5")'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen', numeric_key_map_gens, ids=idfn)
def test_element_at_map_numeric_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, 0)',
'element_at(a, 1)',
'element_at(a, null)',
'element_at(a, -9)',
'element_at(a, 999)'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen',
[MapGen(DecimalGen(precision=35, scale=2, nullable=False), value(), max_length=6)
for value in get_map_value_gens(precision=37, scale=0)],
ids=idfn)
def test_get_map_value_element_at_map_dec_col_keys(data_gen):
keys = DecimalGen(precision=35, scale=2)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'element_at(a, b)', 'a[b]'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen',
[MapGen(StringGen(pattern='key', nullable=False),
IntegerGen(nullable=False), max_length=1, min_length=1, nullable=False)],
ids=idfn)
@pytest.mark.parametrize('ansi', [True, False], ids=idfn)
def test_get_map_value_element_at_map_string_col_keys_ansi(data_gen, ansi):
keys = StringGen(pattern='key', nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'element_at(a, b)', 'a[b]'),
conf={'spark.sql.ansi.enabled': ansi})
@pytest.mark.parametrize('data_gen',
[MapGen(StringGen(pattern='key_[0-9]', nullable=False), value(), max_length=6)
for value in get_map_value_gens(precision=37, scale=0)],
ids=idfn)
def test_get_map_value_element_at_map_string_col_keys(data_gen):
keys = StringGen(pattern='key_[0-9]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'element_at(a, b)', 'a[b]'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
@pytest.mark.skipif(is_spark_340_or_later() or is_databricks113_or_later(),
reason="Since Spark3.4 and DB11.3, null will always be returned on invalid access to map")
def test_element_at_map_string_col_keys_ansi_fail(data_gen):
keys = StringGen(pattern='NOT_FOUND')
message = "org.apache.spark.SparkNoSuchElementException" if (not is_before_spark_330() or is_databricks104_or_later()) else "java.util.NoSuchElementException"
# For 3.3.X strictIndexOperator should not affect element_at
test_conf = copy_and_update(ansi_enabled_conf, {'spark.sql.ansi.strictIndexOperator': 'false'})
assert_gpu_and_cpu_error(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'element_at(a, b)').collect(),
conf=test_conf,
error_message=message)
@pytest.mark.skipif(is_before_spark_340() and not is_databricks113_or_later(),
reason="Only in Spark 3.4 + with ANSI mode, map key returns null on no such element")
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_element_at_map_string_col_keys_ansi_null(data_gen):
keys = StringGen(pattern='NOT_FOUND')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'element_at(a, b)'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
@pytest.mark.skipif(is_spark_340_or_later() or is_databricks113_or_later(),
reason="Since Spark3.4 and DB11.3, null will always be returned on invalid access to map")
def test_get_map_value_string_col_keys_ansi_fail(data_gen):
keys = StringGen(pattern='NOT_FOUND')
message = "org.apache.spark.SparkNoSuchElementException" if (not is_before_spark_330() or is_databricks104_or_later()) else "java.util.NoSuchElementException"
assert_gpu_and_cpu_error(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'a[b]').collect(),
conf=ansi_enabled_conf,
error_message=message)
@pytest.mark.skipif(is_before_spark_340() and not is_databricks113_or_later(),
reason="Only in Spark 3.4 + ANSI mode, map key returns null on no such element")
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_get_map_value_string_col_keys_ansi_null(data_gen):
keys = StringGen(pattern='NOT_FOUND')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, keys).selectExpr(
'a[b]'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen',
[MapGen(DateGen(nullable=False), value(), max_length=6)
for value in get_map_value_gens()], ids=idfn)
def test_element_at_map_date_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, date "1997")',
'element_at(a, date "2022-01-01")',
'element_at(a, null)'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen',
[MapGen(TimestampGen(nullable=False), value(), max_length=6)
for value in get_map_value_gens()],
ids=idfn)
def test_element_at_map_timestamp_keys(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, timestamp "1997")',
'element_at(a, timestamp "2022-01-01")',
'element_at(a, null)'),
conf={'spark.sql.ansi.enabled': False})
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
@pytest.mark.skipif(is_spark_340_or_later() or is_databricks113_or_later(),
reason="Since Spark3.4 and DB11.3, null will always be returned on invalid access to map")
def test_map_element_at_ansi_fail(data_gen):
message = "org.apache.spark.SparkNoSuchElementException" if (not is_before_spark_330() or is_databricks104_or_later()) else "java.util.NoSuchElementException"
# For 3.3.0+ strictIndexOperator should not affect element_at
test_conf = copy_and_update(ansi_enabled_conf, {'spark.sql.ansi.strictIndexOperator': 'false'})
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, "NOT_FOUND")').collect(),
conf=test_conf,
error_message=message)
@pytest.mark.skipif(is_before_spark_340() and not is_databricks113_or_later(),
reason="Only in Spark 3.4 + ANSI mode, map key returns null on no such element")
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_map_element_at_ansi_null(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'element_at(a, "NOT_FOUND")'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', map_gens_sample, ids=idfn)
def test_transform_values(data_gen):
def do_it(spark):
columns = ['a', 'b',
'transform_values(a, (key, value) -> value) as ident',
'transform_values(a, (key, value) -> null) as n',
'transform_values(a, (key, value) -> 1) as one',
'transform_values(a, (key, value) -> key) as indexed',
'transform_values(a, (key, value) -> b) as b_val']
value_type = data_gen.data_type.valueType
# decimal types can grow too large so we are avoiding those here for now
if isinstance(value_type, IntegralType):
columns.extend([
'transform_values(a, (key, value) -> value + 1) as add',
'transform_values(a, (key, value) -> value + value) as mul',
'transform_values(a, (key, value) -> value + b) as all_add'])
if isinstance(value_type, StringType):
columns.extend(['transform_values(a, (key, value) -> concat(value, "-test")) as con'])
if isinstance(value_type, ArrayType):
columns.extend([
'transform_values(a, '
' (key, value) -> transform(value, sub_entry -> 1)) as sub_one',
'transform_values(a, '
' (key, value) -> transform(value, (sub_entry, sub_index) -> sub_index)) as sub_index',
'transform_values(a, '
' (key, value) -> transform(value, (sub_entry, sub_index) -> sub_index + b)) as add_indexes'])
if isinstance(value_type, MapType):
columns.extend([
'transform_values(a, (key, value) -> transform_values(value, (sub_key, sub_value) -> 1)) as sub_one'])
return two_col_df(spark, data_gen, byte_gen).selectExpr(columns)
assert_gpu_and_cpu_are_equal_collect(do_it)
@pytest.mark.parametrize('data_gen', map_gens_sample + decimal_128_map_gens + decimal_64_map_gens, ids=idfn)
def test_transform_keys(data_gen):
# The processing here is very limited, because we need to be sure we do not create duplicate keys.
# This can happen because of integer overflow, round off errors in floating point, etc. So for now
# we really are only looking at a very basic transformation.
def do_it(spark):
columns = ['a', 'transform_keys(a, (key, value) -> key) as ident']
key_type = data_gen.data_type.keyType
if isinstance(key_type, StringType):
columns.extend(['transform_keys(a, (key, value) -> concat(key, "-test")) as con'])
return unary_op_df(spark, data_gen).selectExpr(columns)
assert_gpu_and_cpu_are_equal_collect(do_it)
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_transform_keys_null_fail(data_gen):
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'transform_keys(a, (key, value) -> CAST(null as INT))').collect(),
conf={},
error_message='Cannot use null as map key')
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_transform_keys_duplicate_fail(data_gen):
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'transform_keys(a, (key, value) -> 1)').collect(),
conf={},
error_message='Duplicate map key')
@pytest.mark.parametrize('data_gen', [simple_string_to_string_map_gen], ids=idfn)
def test_transform_keys_last_win(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('transform_keys(a, (key, value) -> 1)'),
conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
@pytest.mark.parametrize('data_gen', [MapGen(IntegerGen(nullable=False), long_gen)], ids=idfn)
def test_transform_keys_last_win2(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('transform_keys(a, (key, value) -> key % 2)'),
conf={'spark.sql.mapKeyDedupPolicy': 'LAST_WIN'})
# We add in several types of processing for foldable functions because the output
# can be different types.
@pytest.mark.parametrize('query', [
'map_from_arrays(sequence(1, 5), sequence(1, 5)) as m_a',
'map("a", "a", "b", "c") as m',
'map(1, sequence(1, 5)) as m'], ids=idfn)
def test_sql_map_scalars(query):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.sql('SELECT {}'.format(query)))
@pytest.mark.parametrize('data_gen', map_gens_sample, ids=idfn)
def test_map_filter(data_gen):
columns = ['map_filter(a, (key, value) -> isnotnull(value) )',
'map_filter(a, (key, value) -> isnull(value) )',
'map_filter(a, (key, value) -> isnull(key) or isnotnull(value) )',
'map_filter(a, (key, value) -> isnotnull(key) and isnull(value) )']
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(columns))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/map_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error, assert_gpu_and_cpu_row_counts_equal, assert_gpu_fallback_write, \
assert_cpu_and_gpu_are_equal_collect_with_capture, assert_gpu_fallback_collect
from conftest import get_non_gpu_allowed
from datetime import datetime, timezone
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import with_cpu_session, is_before_spark_330, is_spark_340_or_later, is_before_spark_340
_acq_schema = StructType([
StructField('loan_id', LongType()),
StructField('orig_channel', StringType()),
StructField('seller_name', StringType()),
StructField('orig_interest_rate', DoubleType()),
StructField('orig_upb', IntegerType()),
StructField('orig_loan_term', IntegerType()),
StructField('orig_date', StringType()),
StructField('first_pay_date', StringType()),
StructField('orig_ltv', DoubleType()),
StructField('orig_cltv', DoubleType()),
StructField('num_borrowers', DoubleType()),
StructField('dti', DoubleType()),
StructField('borrower_credit_score', DoubleType()),
StructField('first_home_buyer', StringType()),
StructField('loan_purpose', StringType()),
StructField('property_type', StringType()),
StructField('num_units', IntegerType()),
StructField('occupancy_status', StringType()),
StructField('property_state', StringType()),
StructField('zip', IntegerType()),
StructField('mortgage_insurance_percent', DoubleType()),
StructField('product_type', StringType()),
StructField('coborrow_credit_score', DoubleType()),
StructField('mortgage_insurance_type', DoubleType()),
StructField('relocation_mortgage_indicator', StringType())])
_perf_schema = StructType([
StructField('loan_id', LongType()),
StructField('monthly_reporting_period', StringType()),
StructField('servicer', StringType()),
StructField('interest_rate', DoubleType()),
StructField('current_actual_upb', DoubleType()),
StructField('loan_age', DoubleType()),
StructField('remaining_months_to_legal_maturity', DoubleType()),
StructField('adj_remaining_months_to_maturity', DoubleType()),
StructField('maturity_date', StringType()),
StructField('msa', DoubleType()),
StructField('current_loan_delinquency_status', IntegerType()),
StructField('mod_flag', StringType()),
StructField('zero_balance_code', StringType()),
StructField('zero_balance_effective_date', StringType()),
StructField('last_paid_installment_date', StringType()),
StructField('foreclosed_after', StringType()),
StructField('disposition_date', StringType()),
StructField('foreclosure_costs', DoubleType()),
StructField('prop_preservation_and_repair_costs', DoubleType()),
StructField('asset_recovery_costs', DoubleType()),
StructField('misc_holding_expenses', DoubleType()),
StructField('holding_taxes', DoubleType()),
StructField('net_sale_proceeds', DoubleType()),
StructField('credit_enhancement_proceeds', DoubleType()),
StructField('repurchase_make_whole_proceeds', StringType()),
StructField('other_foreclosure_proceeds', DoubleType()),
StructField('non_interest_bearing_upb', DoubleType()),
StructField('principal_forgiveness_upb', StringType()),
StructField('repurchase_make_whole_proceeds_flag', StringType()),
StructField('foreclosure_principal_write_off_amount', StringType()),
StructField('servicing_activity_indicator', StringType())])
_date_schema = StructType([
StructField('date', DateType())])
_ts_schema = StructType([
StructField('ts', TimestampType())])
_bad_str_schema = StructType([
StructField('string', StringType())])
_good_str_schema = StructType([
StructField('Something', StringType())])
_three_str_schema = StructType([
StructField('a', StringType()),
StructField('b', StringType()),
StructField('c', StringType())])
_trucks_schema = StructType([
StructField('make', StringType()),
StructField('model', StringType()),
StructField('year', IntegerType()),
StructField('price', StringType()),
StructField('comment', StringType())])
_bool_schema = StructType([
StructField('boolean', BooleanType())])
_byte_schema = StructType([
StructField('number', ByteType())])
_short_schema = StructType([
StructField('number', ShortType())])
_int_schema = StructType([
StructField('number', IntegerType())])
_long_schema = StructType([
StructField('number', LongType())])
_float_schema = StructType([
StructField('number', FloatType())])
_double_schema = StructType([
StructField('number', DoubleType())])
_decimal_10_2_schema = StructType([
StructField('number', DecimalType(10, 2))])
_decimal_10_3_schema = StructType([
StructField('number', DecimalType(10, 3))])
_number_as_string_schema = StructType([
StructField('number', StringType())])
_empty_byte_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', ByteType()),
StructField('ignored_b', StringType())])
_empty_short_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', ShortType()),
StructField('ignored_b', StringType())])
_empty_int_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', IntegerType()),
StructField('ignored_b', StringType())])
_empty_long_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', LongType()),
StructField('ignored_b', StringType())])
_empty_float_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', FloatType()),
StructField('ignored_b', StringType())])
_empty_double_schema = StructType([
StructField('ignored_a', StringType()),
StructField('number', DoubleType()),
StructField('ignored_b', StringType())])
_enable_all_types_conf = {'spark.rapids.sql.csv.read.float.enabled': 'true',
'spark.rapids.sql.csv.read.double.enabled': 'true',
'spark.rapids.sql.csv.read.decimal.enabled': 'true',
'spark.sql.legacy.timeParserPolicy': 'CORRECTED'}
def read_csv_df(data_path, schema, spark_tmp_table_factory_ignored, options = {}):
def read_impl(spark):
reader = spark.read
if not schema is None:
reader = reader.schema(schema)
for key, value in options.items():
reader = reader.option(key, value)
return debug_df(reader.csv(data_path))
return read_impl
def read_csv_sql(data_path, schema, spark_tmp_table_factory, options = {}):
opts = options
if not schema is None:
opts = copy_and_update(options, {'schema': schema})
def read_impl(spark):
tmp_name = spark_tmp_table_factory.get()
return spark.catalog.createTable(tmp_name, source='csv', path=data_path, **opts)
return read_impl
@approximate_float
@pytest.mark.parametrize('name,schema,options', [
('Acquisition_2007Q3.txt', _acq_schema, {'sep': '|'}),
('Performance_2007Q3.txt_0', _perf_schema, {'sep': '|'}),
('ts.csv', _date_schema, {}),
('date.csv', _date_schema, {}),
('ts.csv', _ts_schema, {}),
('str.csv', _ts_schema, {}),
('str.csv', _bad_str_schema, {'header': 'true'}),
('str.csv', _good_str_schema, {'header': 'true'}),
('no-comments.csv', _three_str_schema, {}),
('empty.csv', _three_str_schema, {}),
('just_comments.csv', _three_str_schema, {'comment': '#'}),
('trucks.csv', _trucks_schema, {'header': 'true'}),
('trucks.tsv', _trucks_schema, {'sep': '\t', 'header': 'true'}),
('trucks-different.csv', _trucks_schema, {'sep': '|', 'header': 'true', 'quote': "'"}),
('trucks-blank-names.csv', _trucks_schema, {'header': 'true'}),
('trucks-windows.csv', _trucks_schema, {'header': 'true'}),
('trucks-empty-values.csv', _trucks_schema, {'header': 'true'}),
('trucks-extra-columns.csv', _trucks_schema, {'header': 'true'}),
pytest.param('trucks-comments.csv', _trucks_schema, {'header': 'true', 'comment': '~'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/2066')),
('trucks-more-comments.csv', _trucks_schema, {'header': 'true', 'comment': '#'}),
pytest.param('trucks-missing-quotes.csv', _trucks_schema, {'header': 'true'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/130')),
pytest.param('trucks-null.csv', _trucks_schema, {'header': 'true', 'nullValue': 'null'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/2068')),
pytest.param('trucks-null.csv', _trucks_schema, {'header': 'true'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/1986')),
pytest.param('simple_int_values.csv', _byte_schema, {'header': 'true'}),
pytest.param('simple_int_values.csv', _short_schema, {'header': 'true'}),
pytest.param('simple_int_values.csv', _int_schema, {'header': 'true'}),
pytest.param('simple_int_values.csv', _long_schema, {'header': 'true'}),
('simple_int_values.csv', _float_schema, {'header': 'true'}),
('simple_int_values.csv', _double_schema, {'header': 'true'}),
('simple_int_values.csv', _decimal_10_2_schema, {'header': 'true'}),
('decimals.csv', _decimal_10_2_schema, {'header': 'true'}),
('decimals.csv', _decimal_10_3_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_byte_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_short_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_int_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_long_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_float_schema, {'header': 'true'}),
pytest.param('empty_int_values.csv', _empty_double_schema, {'header': 'true'}),
pytest.param('nan_and_inf.csv', _float_schema, {'header': 'true'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/125')),
pytest.param('floats_invalid.csv', _float_schema, {'header': 'true'}),
pytest.param('floats_invalid.csv', _double_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _byte_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _short_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _int_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _long_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _float_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _double_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _decimal_10_2_schema, {'header': 'true'}),
pytest.param('simple_float_values.csv', _decimal_10_3_schema, {'header': 'true'}),
pytest.param('simple_boolean_values.csv', _bool_schema, {'header': 'true'}),
pytest.param('ints_with_whitespace.csv', _number_as_string_schema, {'header': 'true'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/2069')),
pytest.param('ints_with_whitespace.csv', _byte_schema, {'header': 'true'}, marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/130'))
], ids=idfn)
@pytest.mark.parametrize('read_func', [read_csv_df, read_csv_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
def test_basic_csv_read(std_input_path, name, schema, options, read_func, v1_enabled_list, ansi_enabled, spark_tmp_table_factory):
updated_conf=copy_and_update(_enable_all_types_conf, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.ansi.enabled': ansi_enabled
})
assert_gpu_and_cpu_are_equal_collect(read_func(std_input_path + '/' + name, schema, spark_tmp_table_factory, options),
conf=updated_conf)
@pytest.mark.parametrize('name,schema,options', [
pytest.param('small_float_values.csv', _float_schema, {'header': 'true'}),
pytest.param('small_float_values.csv', _double_schema, {'header': 'true'}),
], ids=idfn)
@pytest.mark.parametrize('read_func', [read_csv_df, read_csv_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
def test_csv_read_small_floats(std_input_path, name, schema, options, read_func, v1_enabled_list, ansi_enabled, spark_tmp_table_factory):
updated_conf=copy_and_update(_enable_all_types_conf, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.ansi.enabled': ansi_enabled
})
assert_gpu_and_cpu_are_equal_collect(read_func(std_input_path + '/' + name, schema, spark_tmp_table_factory, options),
conf=updated_conf)
csv_supported_gens = [
# Spark does not escape '\r' or '\n' even though it uses it to mark end of record
# This would require multiLine reads to work correctly so we avoid these chars
StringGen('(\\w| |\t|\ud720){0,10}', nullable=False),
StringGen('[aAbB ]{0,10}'),
StringGen('[nN][aA][nN]'),
StringGen('[+-]?[iI][nN][fF]([iI][nN][iI][tT][yY])?'),
byte_gen, short_gen, int_gen, long_gen, boolean_gen, date_gen,
DoubleGen(no_nans=False),
pytest.param(double_gen),
pytest.param(FloatGen(no_nans=False)),
pytest.param(float_gen),
TimestampGen()]
@approximate_float
@pytest.mark.parametrize('data_gen', csv_supported_gens, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
def test_round_trip(spark_tmp_path, data_gen, v1_enabled_list):
gen = StructGen([('a', data_gen)], nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(schema).csv(data_path),
conf=updated_conf)
@allow_non_gpu('org.apache.spark.sql.execution.LeafExecNode')
@pytest.mark.parametrize('read_func', [read_csv_df, read_csv_sql])
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.csv.enabled', 'spark.rapids.sql.format.csv.read.enabled'])
def test_csv_fallback(spark_tmp_path, read_func, disable_conf, spark_tmp_table_factory):
data_gens =[
StringGen('(\\w| |\t|\ud720){0,10}', nullable=False),
byte_gen, short_gen, int_gen, long_gen, boolean_gen, date_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {disable_conf: 'false'})
reader = read_func(data_path, schema, spark_tmp_table_factory)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(data_path))
assert_gpu_fallback_collect(
lambda spark : reader(spark).select(f.col('*'), f.col('_c2') + f.col('_c3')),
# TODO add support for lists
cpu_fallback_class_name=get_non_gpu_allowed()[0],
conf=updated_conf)
csv_supported_date_formats = ['yyyy-MM-dd', 'yyyy/MM/dd', 'yyyy-MM', 'yyyy/MM',
'MM-yyyy', 'MM/yyyy', 'MM-dd-yyyy', 'MM/dd/yyyy', 'dd-MM-yyyy', 'dd/MM/yyyy']
@pytest.mark.parametrize('date_format', csv_supported_date_formats, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
@pytest.mark.parametrize('time_parser_policy', [
pytest.param('LEGACY', marks=pytest.mark.allow_non_gpu('BatchScanExec,FileSourceScanExec')),
'CORRECTED',
'EXCEPTION'
])
def test_date_formats_round_trip(spark_tmp_path, date_format, v1_enabled_list, ansi_enabled, time_parser_policy):
gen = StructGen([('a', DateGen())], nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': time_parser_policy})
with_cpu_session(
lambda spark : gen_df(spark, gen).write\
.option('dateFormat', date_format)\
.csv(data_path))
if time_parser_policy == 'LEGACY':
expected_class = 'FileSourceScanExec'
if v1_enabled_list == '':
expected_class = 'BatchScanExec'
assert_gpu_fallback_collect(
lambda spark : spark.read \
.schema(schema) \
.option('dateFormat', date_format) \
.csv(data_path),
expected_class,
conf=updated_conf)
else:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read\
.schema(schema)\
.option('dateFormat', date_format)\
.csv(data_path),
conf=updated_conf)
@pytest.mark.parametrize('filename', ["date.csv"])
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
@pytest.mark.parametrize('time_parser_policy', [
pytest.param('LEGACY', marks=pytest.mark.allow_non_gpu('BatchScanExec,FileSourceScanExec')),
'CORRECTED',
'EXCEPTION'
])
def test_read_valid_and_invalid_dates(std_input_path, filename, v1_enabled_list, ansi_enabled, time_parser_policy):
data_path = std_input_path + '/' + filename
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': time_parser_policy})
if time_parser_policy == 'EXCEPTION':
assert_gpu_and_cpu_error(
lambda spark : spark.read \
.schema(_date_schema) \
.csv(data_path)
.collect(),
conf=updated_conf,
error_message='DateTimeException')
else:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read \
.schema(_date_schema) \
.csv(data_path),
conf=updated_conf)
csv_supported_ts_parts = ['', # Just the date
"'T'HH:mm:ss.SSSXXX",
"'T'HH:mm:ss[.SSS][XXX]",
"'T'HH:mm:ss.SSS",
"'T'HH:mm:ss[.SSS]",
"'T'HH:mm:ss",
"'T'HH:mm[:ss]",
"'T'HH:mm"]
@pytest.mark.parametrize('ts_part', csv_supported_ts_parts)
@pytest.mark.parametrize('date_format', csv_supported_date_formats)
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
def test_ts_formats_round_trip(spark_tmp_path, date_format, ts_part, v1_enabled_list):
full_format = date_format + ts_part
data_gen = TimestampGen()
gen = StructGen([('a', data_gen)], nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
schema = gen.data_type
with_cpu_session(
lambda spark : gen_df(spark, gen).write\
.option('timestampFormat', full_format)\
.csv(data_path))
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read\
.schema(schema)\
.option('timestampFormat', full_format)\
.csv(data_path),
conf=updated_conf)
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
def test_input_meta(spark_tmp_path, v1_enabled_list):
gen = StructGen([('a', long_gen), ('b', long_gen)], nullable=False)
first_data_path = spark_tmp_path + '/CSV_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(first_data_path))
second_data_path = spark_tmp_path + '/CSV_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(second_data_path))
data_path = spark_tmp_path + '/CSV_DATA'
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(gen.data_type)\
.csv(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=updated_conf)
@allow_non_gpu('ProjectExec', 'Alias', 'InputFileName', 'InputFileBlockStart', 'InputFileBlockLength',
'FilterExec', 'And', 'IsNotNull', 'GreaterThan', 'Literal',
'FileSourceScanExec',
'BatchScanExec', 'CsvScan')
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.csv.enabled', 'spark.rapids.sql.format.csv.read.enabled'])
def test_input_meta_fallback(spark_tmp_path, v1_enabled_list, disable_conf):
gen = StructGen([('a', long_gen), ('b', long_gen)], nullable=False)
first_data_path = spark_tmp_path + '/CSV_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(first_data_path))
second_data_path = spark_tmp_path + '/CSV_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.csv(second_data_path))
data_path = spark_tmp_path + '/CSV_DATA'
updated_conf = copy_and_update(_enable_all_types_conf, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
disable_conf: 'false'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(gen.data_type)\
.csv(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=updated_conf)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
def test_csv_save_as_table_fallback(spark_tmp_path, spark_tmp_table_factory):
gen = TimestampGen()
data_path = spark_tmp_path + '/CSV_DATA'
assert_gpu_fallback_write(
lambda spark, path: unary_op_df(spark, gen).coalesce(1).write.format("csv").mode('overwrite').option("path", path).saveAsTable(spark_tmp_table_factory.get()),
lambda spark, path: spark.read.csv(path),
data_path,
'DataWritingCommandExec')
@pytest.mark.skipif(is_before_spark_330(), reason='Hidden file metadata columns are a new feature of Spark 330')
@allow_non_gpu(any = True)
@pytest.mark.parametrize('metadata_column', ["file_path", "file_name", "file_size", "file_modification_time"])
def test_csv_scan_with_hidden_metadata_fallback(spark_tmp_path, metadata_column):
data_path = spark_tmp_path + "/hidden_metadata.csv"
with_cpu_session(lambda spark : spark.range(10) \
.selectExpr("id") \
.write \
.mode("overwrite") \
.csv(data_path))
def do_csv_scan(spark):
df = spark.read.csv(data_path).selectExpr("_c0", "_metadata.{}".format(metadata_column))
return df
assert_cpu_and_gpu_are_equal_collect_with_capture(
do_csv_scan,
exist_classes= "FileSourceScanExec",
non_exist_classes= "GpuBatchScanExec")
@pytest.mark.skipif(is_before_spark_330(), reason='Reading day-time interval type is supported from Spark3.3.0')
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
def test_round_trip_for_interval(spark_tmp_path, v1_enabled_list):
csv_interval_gens = [
DayTimeIntervalGen(start_field="day", end_field="day"),
DayTimeIntervalGen(start_field="day", end_field="hour"),
DayTimeIntervalGen(start_field="day", end_field="minute"),
DayTimeIntervalGen(start_field="day", end_field="second"),
DayTimeIntervalGen(start_field="hour", end_field="hour"),
DayTimeIntervalGen(start_field="hour", end_field="minute"),
DayTimeIntervalGen(start_field="hour", end_field="second"),
DayTimeIntervalGen(start_field="minute", end_field="minute"),
DayTimeIntervalGen(start_field="minute", end_field="second"),
DayTimeIntervalGen(start_field="second", end_field="second"),
]
gen = StructGen([('_c' + str(i), csv_interval_gens[i]) for i in range(0, len(csv_interval_gens))], nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark: gen_df(spark, gen).write.csv(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema).csv(data_path),
conf=updated_conf)
@allow_non_gpu(any = True)
def test_csv_read_case_insensitivity(spark_tmp_path):
gen_list = [('one', int_gen), ('tWo', byte_gen), ('THREE', boolean_gen)]
data_path = spark_tmp_path + '/CSV_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.option('header', True).csv(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.option('header', True).csv(data_path).select('one', 'two', 'three'),
{'spark.sql.caseSensitive': 'false'}
)
@allow_non_gpu('FileSourceScanExec', 'CollectLimitExec', 'DeserializeToObjectExec')
def test_csv_read_count(spark_tmp_path):
data_gens = [byte_gen, short_gen, int_gen, long_gen, boolean_gen, date_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
data_path = spark_tmp_path + '/CSV_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.csv(data_path))
assert_gpu_and_cpu_row_counts_equal(lambda spark: spark.read.csv(data_path))
@allow_non_gpu('FileSourceScanExec', 'CollectLimitExec', 'DeserializeToObjectExec')
@pytest.mark.skipif(is_before_spark_340(), reason='`preferDate` is only supported in Spark 340+')
def test_csv_prefer_date_with_infer_schema(spark_tmp_path):
# start date ""0001-01-02" required due to: https://github.com/NVIDIA/spark-rapids/issues/5606
data_gens = [byte_gen, short_gen, int_gen, long_gen, boolean_gen, timestamp_gen, DateGen(start=date(1, 1, 2))]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
data_path = spark_tmp_path + '/CSV_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.csv(data_path))
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.read.option("inferSchema", "true").csv(data_path))
assert_gpu_and_cpu_are_equal_collect(lambda spark: spark.read.option("inferSchema", "true").option("preferDate", "false").csv(data_path))
@allow_non_gpu('FileSourceScanExec')
@pytest.mark.skipif(is_before_spark_340(), reason='enableDateTimeParsingFallback is supported from Spark3.4.0')
@pytest.mark.parametrize('filename,schema',[("date.csv", _date_schema), ("date.csv", _ts_schema,),
("ts.csv", _ts_schema)])
def test_csv_datetime_parsing_fallback_cpu_fallback(std_input_path, filename, schema):
data_path = std_input_path + "/" + filename
assert_gpu_fallback_collect(
lambda spark : spark.read.schema(schema).option('enableDateTimeParsingFallback', "true").csv(data_path),
'FileSourceScanExec',
conf=_enable_all_types_conf)
@pytest.mark.skipif(is_before_spark_340(), reason='enableDateTimeParsingFallback is supported from Spark3.4.0')
@pytest.mark.parametrize('filename,schema', [("simple_int_values.csv", _int_schema), ("str.csv", _good_str_schema)])
def test_csv_datetime_parsing_fallback_no_datetime(std_input_path, filename, schema):
data_path = std_input_path + "/" + filename
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(schema).option('enableDateTimeParsingFallback', "true").csv(data_path),
conf=_enable_all_types_conf)
@pytest.mark.parametrize('read_func', [read_csv_df, read_csv_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "csv"])
@pytest.mark.parametrize('col_name', ['K0', 'k0', 'K3', 'k3', 'V0', 'v0'], ids=idfn)
@ignore_order
def test_read_case_col_name(spark_tmp_path, spark_tmp_table_factory, read_func, v1_enabled_list, col_name):
all_confs = {'spark.sql.sources.useV1SourceList': v1_enabled_list}
gen_list =[('k0', LongGen(nullable=False, min_val=0, max_val=0)),
('k1', LongGen(nullable=False, min_val=1, max_val=1)),
('k2', LongGen(nullable=False, min_val=2, max_val=2)),
('k3', LongGen(nullable=False, min_val=3, max_val=3)),
('v0', LongGen()),
('v1', LongGen()),
('v2', LongGen()),
('v3', LongGen())]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/CSV_DATA'
reader = read_func(data_path, gen.data_type, spark_tmp_table_factory)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.partitionBy('k0', 'k1', 'k2', 'k3').csv(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : reader(spark).selectExpr(col_name),
conf=all_confs)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/csv_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_iterator
from marks import approximate_float, incompat, ignore_order, allow_non_gpu, limit
@incompat
@approximate_float
@limit
@ignore_order
@allow_non_gpu(any=True)
def test_mortgage(mortgage):
assert_gpu_and_cpu_are_equal_iterator(
lambda spark : mortgage.do_test_query(spark))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/mortgage_test.py |
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from spark_session import is_before_spark_320
from asserts import assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from pyspark.sql.types import *
from marks import *
_grouping_set_gen = [
('a', StringGen()),
('b', StringGen())]
_grouping_set_sqls = [
'SELECT a, b, count(1) FROM testData GROUP BY a, GROUPING SETS(ROLLUP(a, b))',
'SELECT a, b, count(1) FROM testData GROUP BY a, GROUPING SETS(GROUPING SETS((a, b), (a), ()))',
'SELECT a, b, count(1) FROM testData '
'GROUP BY a, GROUPING SETS((a, b), GROUPING SETS(ROLLUP(a, b)))',
'SELECT a, b, count(1) FROM testData '
'GROUP BY a, GROUPING SETS((a, b, a, b), (a, b, a), (a, b))',
'SELECT a, b, count(1) FROM testData GROUP BY a, '
'GROUPING SETS(GROUPING SETS((a, b, a, b), (a, b, a), (a, b)))',
'SELECT a, b, count(1) FROM testData GROUP BY a, GROUPING SETS(ROLLUP(a, b), CUBE(a, b))',
'SELECT a, b, count(1) FROM testData '
'GROUP BY a, GROUPING SETS(GROUPING SETS((a, b), (a), ()), '
'GROUPING SETS((a, b), (a), (b), ()))',
'SELECT a, b, count(1) FROM testData '
'GROUP BY a, GROUPING SETS((a, b), (a), (), (a, b), (a), (b), ())',
]
# test nested syntax of grouping set, rollup and cube
@ignore_order
@pytest.mark.parametrize('data_gen', [_grouping_set_gen], ids=idfn)
@pytest.mark.parametrize('sql', _grouping_set_sqls, ids=idfn)
@pytest.mark.skipif(is_before_spark_320(),
reason='Nested grouping sets is not supported before spark 3.2.0')
def test_nested_grouping_sets_rollup_cube(data_gen, sql):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
"testData",
sql)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/grouping_sets_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error
from data_gen import *
from pyspark.sql.types import *
from string_test import mk_str_gen
import pyspark.sql.functions as f
import pyspark.sql.utils
from spark_session import with_cpu_session, with_gpu_session
nested_gens = [ArrayGen(LongGen()), ArrayGen(decimal_gen_128bit),
StructGen([("a", LongGen()), ("b", decimal_gen_128bit)]),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), StringGen()),
ArrayGen(BinaryGen(max_length=5)),
MapGen(IntegerGen(nullable=False), BinaryGen(max_length=5))]
# additional test for NonNull Array because of https://github.com/rapidsai/cudf/pull/8181
non_nested_array_gens = [ArrayGen(sub_gen, nullable=nullable)
for nullable in [True, False]
for sub_gen in all_gen + [null_gen]]
@pytest.mark.parametrize('data_gen', non_nested_array_gens + nested_array_gens_sample, ids=idfn)
def test_concat_list(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, data_gen, data_gen, data_gen).selectExpr(
'concat()',
'concat(a)',
'concat(a, b)',
'concat(a, b, c)')
)
@pytest.mark.parametrize('dg', non_nested_array_gens, ids=idfn)
def test_concat_double_list_with_lit(dg):
data_gen = ArrayGen(dg, max_length=2)
array_lit = gen_scalar(data_gen)
array_lit2 = gen_scalar(data_gen)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
f.concat(f.col('a'),
f.col('b'),
f.lit(array_lit).cast(data_gen.data_type))))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
f.concat(f.lit(array_lit).cast(data_gen.data_type),
f.col('a'),
f.lit(array_lit2).cast(data_gen.data_type))))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
f.concat(f.lit(array_lit).cast(data_gen.data_type),
f.lit(array_lit2).cast(data_gen.data_type))))
@pytest.mark.parametrize('data_gen', non_nested_array_gens, ids=idfn)
def test_concat_list_with_lit(data_gen):
lit_col1 = f.lit(gen_scalar(data_gen)).cast(data_gen.data_type)
lit_col2 = f.lit(gen_scalar(data_gen)).cast(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
f.concat(f.col('a'), f.col('b'), lit_col1),
f.concat(lit_col1, f.col('a'), lit_col2),
f.concat(lit_col1, lit_col2)))
def test_concat_string():
gen = mk_str_gen('.{0,5}')
(s1, s2) = gen_scalars(gen, 2, force_no_nulls=True)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, gen).select(
f.concat(),
f.concat(f.col('a')),
f.concat(s1),
f.concat(f.col('a'), f.col('b')),
f.concat(f.col('a'), f.col('b'), f.col('a')),
f.concat(s1, f.col('b')),
f.concat(f.col('a'), s2),
f.concat(f.lit(None).cast('string'), f.col('b')),
f.concat(f.col('a'), f.lit(None).cast('string')),
f.concat(f.lit(''), f.col('b')),
f.concat(f.col('a'), f.lit(''))))
@pytest.mark.parametrize('data_gen', map_gens_sample + decimal_64_map_gens + decimal_128_map_gens, ids=idfn)
def test_map_concat(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, data_gen, data_gen, data_gen
).selectExpr('map_concat()',
'map_concat(a)',
'map_concat(b, c)',
'map_concat(a, b, c)'),
{"spark.sql.mapKeyDedupPolicy": "LAST_WIN"}
)
@pytest.mark.parametrize('data_gen', map_gens_sample + decimal_64_map_gens + decimal_128_map_gens, ids=idfn)
def test_map_concat_with_lit(data_gen):
lit_col1 = f.lit(gen_scalar(data_gen)).cast(data_gen.data_type)
lit_col2 = f.lit(gen_scalar(data_gen)).cast(data_gen.data_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: binary_op_df(spark, data_gen).select(
f.map_concat(f.col('a'), f.col('b'), lit_col1),
f.map_concat(lit_col1, f.col('a'), lit_col2),
f.map_concat(lit_col1, lit_col2)),
{"spark.sql.mapKeyDedupPolicy": "LAST_WIN"}
)
@pytest.mark.parametrize('data_gen', all_gen + nested_gens, ids=idfn)
@pytest.mark.parametrize('size_of_null', ['true', 'false'], ids=idfn)
def test_size_of_array(data_gen, size_of_null):
gen = ArrayGen(data_gen)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr('size(a)'),
conf={'spark.sql.legacy.sizeOfNull': size_of_null})
@pytest.mark.parametrize('data_gen', map_gens_sample, ids=idfn)
@pytest.mark.parametrize('size_of_null', ['true', 'false'], ids=idfn)
def test_size_of_map(data_gen, size_of_null):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('size(a)'),
conf={'spark.sql.legacy.sizeOfNull': size_of_null})
@pytest.mark.parametrize('data_gen', array_gens_sample + [string_gen], ids=idfn)
def test_reverse(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('reverse(a)'))
_sort_array_gens = non_nested_array_gens + [
ArrayGen(all_basic_struct_gen, max_length=6),
ArrayGen(StructGen([['b', byte_gen], ['s', StructGen([['c', byte_gen], ['d', byte_gen]])]]), max_length=10)
]
@pytest.mark.parametrize('data_gen', _sort_array_gens, ids=idfn)
def test_sort_array(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).select(
f.sort_array(f.col('a'), True),
f.sort_array(f.col('a'), False)))
@pytest.mark.parametrize('data_gen', _sort_array_gens, ids=idfn)
def test_sort_array_lit(data_gen):
array_lit = gen_scalar(data_gen)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, length=10).select(
f.sort_array(f.lit(array_lit), True),
f.sort_array(f.lit(array_lit), False)))
@pytest.mark.parametrize('data_gen', [ArrayGen(IntegerGen())], ids=idfn)
def test_illegal_args_sort_array(data_gen):
def check_analysis_exception(spark, sql_text):
try:
gen_df(spark, [("a", data_gen), ("b", boolean_gen)], length=10).selectExpr(sql_text)
raise Exception("sort_array should not plan with invalid arguments %s" % sql_text)
except pyspark.sql.utils.AnalysisException as e:
pass
def doit(spark):
check_analysis_exception(spark, "sort_array(a, b)")
check_analysis_exception(spark, "sort_array(array(), b)")
with_cpu_session(lambda spark: doit(spark))
with_gpu_session(lambda spark: doit(spark))
def test_sort_array_normalize_nans():
"""
When the average length of array is > 100,
and there are `-Nan`s in the data, the sorting order
of `Nan` is inconsistent in cuDF (https://github.com/rapidsai/cudf/issues/11630).
`GpuSortArray` fixes the inconsistency by normalizing `Nan`s.
"""
bytes1 = struct.pack('L', 0x7ff83cec2c05b870)
bytes2 = struct.pack('L', 0xfff5101d3f1cd31b)
bytes3 = struct.pack('L', 0x7c22453f18c407a8)
nan1 = struct.unpack('d', bytes1)[0]
nan2 = struct.unpack('d', bytes2)[0]
other = struct.unpack('d', bytes3)[0]
data1 = [([nan2] + [other for _ in range(256)] + [nan1],)]
# array of struct
data2 = [([(nan2, nan1)] + [(other, nan2) for _ in range(256)] + [(nan1, nan2)],)]
for data in [data1, data2]:
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.createDataFrame(data).selectExpr('sort_array(_1, true)', 'sort_array(_1, false)')
)
# For functionality test, the sequence length in each row should be limited,
# to avoid the exception as below,
# "Too long sequence: 2147483745. Should be <= 2147483632"
# And the input data should follow the rules below,
# (step > 0 && start <= stop)
# or (step < 0 && start >= stop)
# or (step == 0 && start == stop)
sequence_normal_integral_gens = [
# (step > 0 && start <= stop)
(ByteGen(min_val=-10, max_val=20, special_cases=[]),
ByteGen(min_val=20, max_val=50, special_cases=[]),
ByteGen(min_val=1, max_val=5, special_cases=[])),
(ShortGen(min_val=-10, max_val=20, special_cases=[]),
ShortGen(min_val=20, max_val=50, special_cases=[]),
ShortGen(min_val=1, max_val=5, special_cases=[])),
(IntegerGen(min_val=-10, max_val=20, special_cases=[]),
IntegerGen(min_val=20, max_val=50, special_cases=[]),
IntegerGen(min_val=1, max_val=5, special_cases=[])),
(LongGen(min_val=-10, max_val=20, special_cases=[None]),
LongGen(min_val=20, max_val=50, special_cases=[None]),
LongGen(min_val=1, max_val=5, special_cases=[None])),
# (step < 0 && start >= stop)
(ByteGen(min_val=20, max_val=50, special_cases=[]),
ByteGen(min_val=-10, max_val=20, special_cases=[]),
ByteGen(min_val=-5, max_val=-1, special_cases=[])),
(ShortGen(min_val=20, max_val=50, special_cases=[]),
ShortGen(min_val=-10, max_val=20, special_cases=[]),
ShortGen(min_val=-5, max_val=-1, special_cases=[])),
(IntegerGen(min_val=20, max_val=50, special_cases=[]),
IntegerGen(min_val=-10, max_val=20, special_cases=[]),
IntegerGen(min_val=-5, max_val=-1, special_cases=[])),
(LongGen(min_val=20, max_val=50, special_cases=[None]),
LongGen(min_val=-10, max_val=20, special_cases=[None]),
LongGen(min_val=-5, max_val=-1, special_cases=[None])),
# (step == 0 && start == stop)
(ByteGen(min_val=20, max_val=20, special_cases=[]),
ByteGen(min_val=20, max_val=20, special_cases=[]),
ByteGen(min_val=0, max_val=0, special_cases=[])),
(ShortGen(min_val=20, max_val=20, special_cases=[]),
ShortGen(min_val=20, max_val=20, special_cases=[]),
ShortGen(min_val=0, max_val=0, special_cases=[])),
(IntegerGen(min_val=20, max_val=20, special_cases=[]),
IntegerGen(min_val=20, max_val=20, special_cases=[]),
IntegerGen(min_val=0, max_val=0, special_cases=[])),
(LongGen(min_val=20, max_val=20, special_cases=[None]),
LongGen(min_val=20, max_val=20, special_cases=[None]),
LongGen(min_val=0, max_val=0, special_cases=[None])),
]
sequence_normal_no_step_integral_gens = [(gens[0], gens[1]) for
gens in sequence_normal_integral_gens]
@pytest.mark.parametrize('start_gen,stop_gen', sequence_normal_no_step_integral_gens, ids=idfn)
def test_sequence_without_step(start_gen, stop_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, start_gen, stop_gen).selectExpr(
"sequence(a, b)",
"sequence(a, 20)",
"sequence(20, b)"))
@pytest.mark.parametrize('start_gen,stop_gen,step_gen', sequence_normal_integral_gens, ids=idfn)
def test_sequence_with_step(start_gen, stop_gen, step_gen):
# Get a step scalar from the 'step_gen' which follows the rules.
step_gen.start(random.Random(0))
step_lit = step_gen.gen()
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, start_gen, stop_gen, step_gen).selectExpr(
"sequence(a, b, c)",
"sequence(a, b, {})".format(step_lit),
"sequence(a, 20, c)",
"sequence(a, 20, {})".format(step_lit),
"sequence(20, b, c)",
"sequence(20, 20, c)",
"sequence(20, b, {})".format(step_lit)))
# Illegal sequence boundaries:
# step > 0, but start > stop
# step < 0, but start < stop
# step == 0, but start != stop
#
# All integral types share the same check implementation, so each case
# will not run over all the types in the tests.
sequence_illegal_boundaries_integral_gens = [
# step > 0, but start > stop
(ShortGen(min_val=20, max_val=50, special_cases=[]),
ShortGen(min_val=-10, max_val=19, special_cases=[]),
ShortGen(min_val=1, max_val=5, special_cases=[])),
(LongGen(min_val=20, max_val=50, special_cases=[None]),
LongGen(min_val=-10, max_val=19, special_cases=[None]),
LongGen(min_val=1, max_val=5, special_cases=[None])),
# step < 0, but start < stop
(ByteGen(min_val=-10, max_val=19, special_cases=[]),
ByteGen(min_val=20, max_val=50, special_cases=[]),
ByteGen(min_val=-5, max_val=-1, special_cases=[])),
(IntegerGen(min_val=-10, max_val=19, special_cases=[]),
IntegerGen(min_val=20, max_val=50, special_cases=[]),
IntegerGen(min_val=-5, max_val=-1, special_cases=[])),
# step == 0, but start != stop
(IntegerGen(min_val=-10, max_val=19, special_cases=[]),
IntegerGen(min_val=20, max_val=50, special_cases=[]),
IntegerGen(min_val=0, max_val=0, special_cases=[]))
]
@pytest.mark.parametrize('start_gen,stop_gen,step_gen', sequence_illegal_boundaries_integral_gens, ids=idfn)
def test_sequence_illegal_boundaries(start_gen, stop_gen, step_gen):
assert_gpu_and_cpu_error(
lambda spark:three_col_df(spark, start_gen, stop_gen, step_gen).selectExpr(
"sequence(a, b, c)").collect(),
conf = {}, error_message = "Illegal sequence boundaries")
# Exceed the max length of a sequence
# "Too long sequence: xxxxxxxxxx. Should be <= 2147483632"
sequence_too_long_length_gens = [
IntegerGen(min_val=2147483633, max_val=2147483633, special_cases=[]),
LongGen(min_val=2147483635, max_val=2147483635, special_cases=[None])
]
@pytest.mark.parametrize('stop_gen', sequence_too_long_length_gens, ids=idfn)
def test_sequence_too_long_sequence(stop_gen):
assert_gpu_and_cpu_error(
# To avoid OOM, reduce the row number to 1, it is enough to verify this case.
lambda spark:unary_op_df(spark, stop_gen, 1).selectExpr(
"sequence(0, a)").collect(),
conf = {}, error_message = "Too long sequence")
def get_sequence_cases_mixed_df(spark, length=2048):
# Generate the sequence data following the 3 rules mixed in a single dataset.
# (step > num.zero && start <= stop) ||
# (step < num.zero && start >= stop) ||
# (step == num.zero && start == stop)
data_gen = IntegerGen(nullable=False, min_val=-10, max_val=10, special_cases=[])
def get_sequence_data(gen, len):
gen.start(random.Random(0))
list = []
for index in range(len):
start = gen.gen()
stop = gen.gen()
step = gen.gen()
# decide the direction of step
if start < stop:
step = abs(step) + 1
elif start == stop:
step = 0
else:
step = -(abs(step) + 1)
list.append(tuple([start, stop, step]))
# add special case
list.append(tuple([2, 2, 0]))
return list
mixed_schema = StructType([
StructField('a', data_gen.data_type),
StructField('b', data_gen.data_type),
StructField('c', data_gen.data_type)])
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize(get_sequence_data(data_gen, length)),
mixed_schema)
# test for 3 cases mixed in a single dataset
def test_sequence_with_step_mixed_cases():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: get_sequence_cases_mixed_df(spark)
.selectExpr("sequence(a, b, c)"))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/collection_ops_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error
from data_gen import *
from spark_session import is_before_spark_330
from marks import incompat, approximate_float
from pyspark.sql.types import *
import pyspark.sql.functions as f
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_and(data_gen, ansi_enabled):
ansi_conf = {'spark.sql.ansi.enabled': ansi_enabled}
data_type = data_gen.data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') & f.lit(True),
f.lit(False) & f.col('b'),
f.lit(None).cast(data_type) & f.col('a'),
f.col('b') & f.lit(None).cast(data_type),
f.col('a') & f.col('b')),
conf=ansi_conf)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_or(data_gen, ansi_enabled):
data_type = data_gen.data_type
ansi_conf = {'spark.sql.ansi.enabled': ansi_enabled}
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).select(
f.col('a') | f.lit(True),
f.lit(False) | f.col('b'),
f.lit(None).cast(data_type) | f.col('a'),
f.col('b') | f.lit(None).cast(data_type),
f.col('a') | f.col('b')),
conf=ansi_conf)
@pytest.mark.parametrize('data_gen', boolean_gens, ids=idfn)
def test_not(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr('!a'))
# AND/OR on the CPU in Spark will process the LHS unconditionally. But they will only
# process the RHS if they cannot figure out the result from just the LHS.
# Tests the GPU short-circuits the predicates without throwing Exception in ANSI mode.
@pytest.mark.parametrize('logic_op', ['AND', 'OR'])
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
@pytest.mark.parametrize('int_arg', [INT_MAX, 0])
@pytest.mark.parametrize('lhs_arg', ['NULL', 'a', 'b'])
def test_logical_with_side_effect(ansi_enabled, lhs_arg, int_arg, logic_op):
def do_it(spark, lhs_bool_arg, arith_arg, op):
schema = StructType([
StructField("a", BooleanType()),
StructField("b", BooleanType()),
StructField("c", IntegerType())])
return spark.createDataFrame(
[(False, True, arith_arg), (False, True, 1), (False, True, -5)],
schema=schema
).selectExpr('{} {} (c + 2) > 0'.format(lhs_bool_arg, op))
ansi_conf = {'spark.sql.ansi.enabled': ansi_enabled}
bypass_map = {'AND': 'a', 'OR': 'b'}
expect_error = int_arg == INT_MAX and (lhs_arg == 'NULL' or bypass_map[logic_op] != lhs_arg)
if ansi_enabled == 'true' and expect_error:
assert_gpu_and_cpu_error(
df_fun=lambda spark: do_it(spark, lhs_arg, int_arg, logic_op).collect(),
conf=ansi_conf,
error_message="java.lang.ArithmeticException" if is_before_spark_330() else "SparkArithmeticException")
else:
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: do_it(spark, lhs_arg, int_arg, logic_op),
conf=ansi_conf) | spark-rapids-branch-23.10 | integration_tests/src/main/python/logic_test.py |
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_databricks_runtime
from spark_session import with_spark_session
import pytest
SELECT_SQL = [
# (" FUNCTIONAL CHECKING", "FUNCTIONAL CHECKING"),
# (" MATH functions", "MATH functions"),
("SELECT abs(intF*(-1)) FROM test_table", "abs(intF*(-1))"),
("SELECT abs(floatF*(-1)) FROM test_table", "abs(floatF*(-1))"),
("SELECT abs(doubleF*(-1)) FROM test_table", "abs(doubleF*(-1))"),
("SELECT acosh(intF) FROM test_table", "acosh(intF)"),
("SELECT acosh(floatF) FROM test_table", "acosh(floatF)"),
("SELECT acosh(doubleF) FROM test_table", "acosh(doubleF)"),
("SELECT asin(intF) FROM test_table", "asin(intF)"),
("SELECT asin(floatF) FROM test_table", "asin(floatF)"),
("SELECT asin(doubleF) FROM test_table", "asin(doubleF)"),
("SELECT asinh(intF*(-1)) FROM test_table", "asinh(intF*(-1))"),
("SELECT asinh(doubleF) FROM test_table", "asinh(doubleF)"),
("SELECT asinh(floatF) FROM test_table", "asinh(floatF)"),
("SELECT atan(byteF) FROM test_table", "atan(byteF)"),
("SELECT atan(shortF) FROM test_table", "atan(shortF)"),
("SELECT atan(intF) FROM test_table", "atan(intF)"),
("SELECT atan(longF) FROM test_table", "atan(longF)"),
("SELECT atan(floatF) FROM test_table", "atan(floatF)"),
("SELECT atan(doubleF) FROM test_table", "atan(doubleF)"),
("SELECT atanh(intF) FROM test_table", "atanh(intF)"),
("SELECT atanh(byteF) FROM test_table", "atanh(byteF)"),
("SELECT atanh(shortF) FROM test_table", "atanh(shortF)"),
("SELECT atanh(longF) FROM test_table", "atanh(longF)"),
("SELECT atanh(floatF) FROM test_table", "atanh(floatF)"),
("SELECT atanh(doubleF) FROM test_table", "atanh(doubleF)"),
("SELECT cbrt(intF) FROM test_table", "cbrt(intF)"),
("SELECT cbrt(byteF) FROM test_table", "cbrt(byteF)"),
("SELECT cbrt(shortF) FROM test_table", "cbrt(shortF)"),
("SELECT cbrt(longF) FROM test_table", "cbrt(longF)"),
("SELECT cbrt(floatF) FROM test_table", "cbrt(floatF)"),
("SELECT cbrt(doubleF) FROM test_table", "cbrt(doubleF)"),
("SELECT ceil(intF) FROM test_table ", "ceil(intF)"),
("SELECT ceil(byteF) FROM test_table ", "ceil(byteF)"),
("SELECT ceil(shortF) FROM test_table ", "ceil(shortF)"),
("SELECT ceil(longF) FROM test_table ", "ceil(longF)"),
("SELECT ceil(floatF) FROM test_table ", "ceil(floatF)"),
("SELECT ceil(doubleF) FROM test_table ", "ceil(doubleF)"),
("SELECT cos(intF) FROM test_table", "cos(intF)"),
("SELECT cos(byteF) FROM test_table", "cos(byteF)"),
("SELECT cos(shortF) FROM test_table", "cos(shortF)"),
("SELECT cos(longF) FROM test_table", "cos(longF)"),
("SELECT cos(floatF) FROM test_table", "cos(floatF)"),
("SELECT cos(doubleF) FROM test_table", "cos(doubleF)"),
("SELECT cot(byteF) FROM test_table", "cot(byteF)"),
("SELECT cot(shortF) FROM test_table", "cot(shortF)"),
("SELECT cot(intF) FROM test_table", "cot(intF)"),
("SELECT cot(longF) FROM test_table", "cot(longF)"),
("SELECT cot(floatF) FROM test_table", "cot(floatF)"),
("SELECT cot(doubleF) FROM test_table", "cot(doubleF)"),
("SELECT cot(byteF) FROM test_table", "cot(byteF)"),
("SELECT cot(shortF) FROM test_table", "cot(shortF)"),
("SELECT cot(intF) FROM test_table", "cot(intF)"),
("SELECT cot(longF) FROM test_table", "cot(longF)"),
("SELECT cot(floatF) FROM test_table", "cot(floatF)"),
("SELECT cot(doubleF) FROM test_table", "cot(doubleF)"),
("SELECT e()*doubleF FROM test_table", "e()*doubleF"),
("SELECT exp(intF) FROM test_table", "exp(intF)"),
("SELECT exp(byteF) FROM test_table", "exp(byteF)"),
("SELECT exp(shortF) FROM test_table", "exp(shortF)"),
("SELECT exp(longF) FROM test_table", "exp(longF)"),
("SELECT exp(floatF) FROM test_table", "exp(floatF)"),
("SELECT exp(doubleF) FROM test_table", "exp(doubleF)"),
("SELECT expm1(intF) FROM test_table", "expm1(intF)"),
("SELECT expm1(byteF) FROM test_table", "expm1(byteF)"),
("SELECT expm1(shortF) FROM test_table", "expm1(shortF)"),
("SELECT expm1(longF) FROM test_table", "expm1(longF)"),
("SELECT expm1(floatF) FROM test_table", "expm1(floatF)"),
("SELECT expm1(doubleF) FROM test_table", "expm1(doubleF)"),
("SELECT floor(intF) FROM test_table", "floor(intF)"),
("SELECT floor(byteF) FROM test_table", "floor(byteF)"),
("SELECT floor(shortF) FROM test_table", "floor(shortF)"),
("SELECT floor(longF) FROM test_table", "floor(longF)"),
("SELECT floor(floatF) FROM test_table", "floor(floatF)"),
("SELECT floor(doubleF) FROM test_table", "floor(doubleF)"),
("SELECT log2(intF) FROM test_table", "log2(intF)"),
("SELECT log2(shortF) FROM test_table", "log2(shortF)"),
("SELECT log2(byteF) FROM test_table", "log2(byteF)"),
("SELECT log2(longF) FROM test_table", "log2(longF)"),
("SELECT log2(floatF) FROM test_table", "log2(floatF)"),
("SELECT log2(doubleF) FROM test_table", "log2(doubleF)"),
("SELECT log10(intF) FROM test_table", "log10(intF)"),
("SELECT log10(byteF) FROM test_table", "log10(byteF)"),
("SELECT log10(shortF) FROM test_table", "log10(shortF)"),
("SELECT log10(longF) FROM test_table", "log10(longF)"),
("SELECT log10(floatF) FROM test_table", "log10(floatF)"),
("SELECT log10(doubleF) FROM test_table", "log10(doubleF)"),
("SELECT log1p(intF) FROM test_table", "log1p(intF)"),
("SELECT log1p(byteF) FROM test_table", "log1p(byteF)"),
("SELECT log1p(shortF) FROM test_table", "log1p(shortF)"),
("SELECT log1p(longF) FROM test_table", "log1p(longF)"),
("SELECT log1p(floatF) FROM test_table", "log1p(floatF)"),
("SELECT log1p(doubleF) FROM test_table", "log1p(doubleF)"),
("SELECT log(intF, intF) FROM test_table", "log(intF, intF)"),
("SELECT log(byteF, byteF) FROM test_table", "log(byteF, byteF)"),
("SELECT log(shortF, shortF) FROM test_table", "log(shortF, shortF)"),
("SELECT log(longF, longF) FROM test_table", "log(longF, longF)"),
("SELECT log(floatF, floatF) FROM test_table", "log(floatF, floatF)"),
("SELECT log(doubleF, doubleF) FROM test_table", "log(doubleF, doubleF)"),
("SELECT MOD(intF,10) FROM test_table", "MOD(intF,10)"),
("SELECT MOD(byteF,10) FROM test_table", "MOD(byteF,10)"),
("SELECT MOD(shortF,10) FROM test_table", "MOD(shortF,10)"),
("SELECT MOD(longF,10) FROM test_table", "MOD(longF,10)"),
("SELECT MOD(floatF,10) FROM test_table", "MOD(floatF,10)"),
("SELECT MOD(doubleF,10) FROM test_table", "MOD(doubleF,10)"),
("SELECT pi()*intF FROM test_table", "pi()*intF"),
("SELECT pi()*floatF*10 FROM test_table", "pi()*floatF*10"),
("SELECT pow(intF,2) FROM test_table", "pow(intF,2)"),
("SELECT pow(byteF,2) FROM test_table", "pow(byteF,2)"),
("SELECT pow(shortF,2) FROM test_table", "pow(shortF,2)"),
("SELECT pow(longF,2) FROM test_table", "pow(longF,2)"),
("SELECT pow(floatF,2) FROM test_table", "pow(floatF,2)"),
("SELECT pow(doubleF,2) FROM test_table", "pow(doubleF,2)"),
("SELECT rint(intF) FROM test_table", "rint(intF)"),
("SELECT rint(byteF) FROM test_table", "rint(byteF)"),
("SELECT rint(shortF) FROM test_table", "rint(shortF)"),
("SELECT rint(longF) FROM test_table", "rint(longF)"),
("SELECT rint(floatF) FROM test_table", "rint(floatF)"),
("SELECT rint(doubleF) FROM test_table", "rint(doubleF)"),
("SELECT signum(intF) FROM test_table", "signum(intF)"),
("SELECT signum(byteF) FROM test_table", "signum(byteF)"),
("SELECT signum(shortF) FROM test_table", "signum(shortF)"),
("SELECT signum(longF) FROM test_table", "signum(longF)"),
("SELECT signum(floatF) FROM test_table", "signum(floatF)"),
("SELECT signum(doubleF) FROM test_table", "signum(doubleF)"),
("SELECT sin(intF) FROM test_table", "sin(intF)"),
("SELECT sin(byteF) FROM test_table", "sin(byteF)"),
("SELECT sin(shortF) FROM test_table", "sin(shortF)"),
("SELECT sin(longF) FROM test_table", "sin(longF)"),
("SELECT sin(floatF) FROM test_table", "sin(floatF)"),
("SELECT sin(doubleF) FROM test_table", "sin(doubleF)"),
("SELECT tan(intF) FROM test_table", "tan(intF)"),
("SELECT tan(byteF) FROM test_table", "tan(byteF)"),
("SELECT tan(shortF) FROM test_table", "tan(shortF)"),
("SELECT tan(longF) FROM test_table", "tan(longF)"),
("SELECT tan(floatF) FROM test_table", "tan(floatF)"),
("SELECT tan(doubleF) FROM test_table", "tan(doubleF)"),
("SELECT intF+intF FROM test_table", "intF+intF"),
("SELECT byteF+byteF FROM test_table", "byteF+byteF"),
("SELECT shortF+shortF FROM test_table", "shortF+shortF"),
("SELECT longF+longF FROM test_table", "longF+longF"),
("SELECT floatF+floatF FROM test_table", "floatF+floatF"),
("SELECT doubleF+doubleF FROM test_table", "doubleF+doubleF"),
("SELECT byteF-doubleF FROM test_table", "byteF-doubleF"),
("SELECT shortF-byteF FROM test_table", "shortF-byteF"),
("SELECT intF-byteF FROM test_table", "intF-byteF"),
("SELECT longF-byteF FROM test_table", "longF-byteF"),
("SELECT floatF-intF FROM test_table", "floatF-intF"),
("SELECT doubleF-floatF FROM test_table", "doubleF-floatF"),
("SELECT intF*intF FROM test_table", "intF*intF"),
("SELECT byteF*byteF FROM test_table", "byteF*byteF"),
("SELECT shortF*shortF FROM test_table", "shortF*shortF"),
("SELECT longF*longF FROM test_table", "longF*longF"),
("SELECT floatF*floatF FROM test_table", "floatF*floatF"),
("SELECT doubleF*doubleF FROM test_table", "doubleF*doubleF"),
("SELECT intF/intF FROM test_table", "intF/intF"),
("SELECT byteF/floatF FROM test_table", "byteF/floatF"),
("SELECT shortF/doubleF FROM test_table", "shortF/doubleF"),
("SELECT longF/intF FROM test_table", "longF/intF"),
("SELECT floatF/floatF FROM test_table", "floatF/floatF"),
("SELECT doubleF/doubleF FROM test_table", "doubleF/doubleF"),
("SELECT intF%floatF FROM test_table", "intF%floatF"),
("SELECT byteF%10 FROM test_table", "byteF%10"),
("SELECT shortF%10 FROM test_table", "shortF%10"),
("SELECT longF%floatF FROM test_table", "longF%floatF"),
("SELECT floatF%10 FROM test_table", "floatF%10"),
("SELECT doubleF%10 FROM test_table", "doubleF%10"),
# ("STRING", "STRING"),
("SELECT Lower(strF) FROM test_table", "Lower(strF)"),
("SELECT Upper(strF) FROM test_table", "Upper(strF)"),
("SELECT Substring(strF, 1, 3) FROM test_table", "Substring(strF, 1, 3)"),
("SELECT Substring(strF, -1, 5) FROM test_table", "Substring(strF, -1, 5)"),
("SELECT * FROM test_table WHERE strF LIKE 'Yuan' ", "* WHERE strF LIKE 'Yuan'"),
("SELECT * FROM test_table WHERE strF LIKE '%Yuan%' ", "* WHERE strF LIKE '%Yuan%'"),
("SELECT * FROM test_table WHERE strF LIKE 'Y%'", "* WHERE strF LIKE 'Y%'"),
("SELECT * FROM test_table WHERE strF LIKE '%an' ", "* WHERE strF LIKE '%an'"),
("SELECT REPLACE(strF, 'Yuan', 'Eric') FROM test_table", "REPLACE(strF, 'Yuan', 'Eric')"),
#("SELECT REGEXP_REPLACE(strF, 'Y*', 'Eric') FROM test_table", "REGEXP_REPLACE(strF, 'Y*', 'Eric')"),
("SELECT CONCAT(strF, strF) FROM test_table", "CONCAT(strF, strF)"),
# (" DATETIME", "DATETIME"),
("SELECT dayofmonth(timestampF) from test_table", "dayofmonth(timestampF)"),
("SELECT hour(timestampF) from test_table", "hour(timestampF)"),
("SELECT minute(timestampF) from test_table", "minute(timestampF)"),
("SELECT second(timestampF) from test_table", "second(timestampF)"),
("SELECT year(timestampF) from test_table", "year(timestampF)"),
("SELECT month(timestampF) from test_table", "month(timestampF)"),
# (" CAST", "CAST"),
("SELECT cast(intF as tinyint) from test_table", "cast(intF as tinyint)"),
("SELECT cast(intF as smallint) from test_table", "cast(intF as smallint)"),
("SELECT cast(intF as bigint) from test_table", "cast(intF as bigint)"),
("SELECT cast(intF as long) from test_table", "cast(intF as long)"),
("SELECT cast(intF as float) from test_table", "cast(intF as float)"),
("SELECT cast(intF as double) from test_table", "cast(intF as double)"),
("SELECT cast(byteF as tinyint) from test_table", "cast(byteF as tinyint)"),
("SELECT cast(byteF as smallint) from test_table", "cast(byteF as smallint)"),
("SELECT cast(byteF as Bigint) from test_table", "cast(byteF as Bigint)"),
("SELECT cast(byteF as long) from test_table", "cast(byteF as long)"),
("SELECT cast(byteF as float) from test_table", "cast(byteF as float)"),
("SELECT cast(byteF as double) from test_table", "cast(byteF as double)"),
("SELECT cast(shortF as tinyint) from test_table", "cast(shortF as tinyint)"),
("SELECT cast(shortF as smallint) from test_table", "cast(shortF as smallint)"),
("SELECT cast(shortF as Bigint) from test_table", "cast(shortF as Bigint)"),
("SELECT cast(shortF as long) from test_table", "cast(shortF as long)"),
("SELECT cast(shortF as float) from test_table", "cast(shortF as float)"),
("SELECT cast(shortF as double) from test_table", "cast(shortF as double)"),
("SELECT cast(longF as tinyint) from test_table", "cast(longF as tinyint)"),
("SELECT cast(longF as smallint) from test_table", "cast(longF as smallint)"),
("SELECT cast(longF as Bigint) from test_table", "cast(longF as Bigint)"),
("SELECT cast(longF as long) from test_table", "cast(longF as long)"),
("SELECT cast(longF as float) from test_table", "cast(longF as float)"),
("SELECT cast(longF as double) from test_table", "cast(longF as double)"),
("SELECT cast(floatF as tinyint) from test_table", "cast(floatF as tinyint)"),
("SELECT cast(floatF as smallint) from test_table", "cast(floatF as smallint)"),
("SELECT cast(floatF as Bigint) from test_table", "cast(floatF as Bigint)"),
("SELECT cast(floatF as long) from test_table", "cast(floatF as long)"),
("SELECT cast(floatF as float) from test_table", "cast(floatF as float)"),
("SELECT cast(floatF as double) from test_table", "cast(floatF as double)"),
("SELECT cast(doubleF as tinyint) from test_table", "cast(doubleF as tinyint)"),
("SELECT cast(doubleF as smallint) from test_table", "cast(doubleF as smallint)"),
("SELECT cast(doubleF as Bigint) from test_table", "cast(doubleF as Bigint)"),
("SELECT cast(doubleF as long) from test_table", "cast(doubleF as long)"),
("SELECT cast(doubleF as float) from test_table", "cast(doubleF as float)"),
("SELECT cast(doubleF as double) from test_table", "cast(doubleF as double)"),
# (" COMPARISON", "COMPARISON"),
("SELECT * FROM test_table WHERE intF = 0", "* WHERE intF = 0"),
("SELECT * FROM test_table WHERE byteF = 0", "* WHERE byteF = 0"),
("SELECT * FROM test_table WHERE shortF = 0", "* WHERE shortF = 0"),
("SELECT * FROM test_table WHERE longF = 0", "* WHERE longF = 0"),
("SELECT * FROM test_table WHERE floatF = 0", "* WHERE floatF = 0"),
("SELECT * FROM test_table WHERE doubleF = 0", "* WHERE doubleF = 0"),
("SELECT * FROM test_table WHERE booleanF = true", "* WHERE booleanF = true"),
("SELECT * FROM test_table WHERE strF = 'Alex'", "* WHERE strF = 'Alex'"),
("SELECT * FROM test_table WHERE dateF = '1990-1-1'", "* WHERE dateF = '1990-1-1'"),
("SELECT * FROM test_table WHERE dateF = '2020-05-01 12:01:01' ", "* WHERE dateF = '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF == 0", "* WHERE intF == 0"),
("SELECT * FROM test_table WHERE byteF == 0", "* WHERE byteF == 0"),
("SELECT * FROM test_table WHERE shortF == 0", "* WHERE shortF == 0"),
("SELECT * FROM test_table WHERE longF == 0", "* WHERE longF == 0"),
("SELECT * FROM test_table WHERE floatF == 0", "* WHERE floatF == 0"),
("SELECT * FROM test_table WHERE doubleF == 0", "* WHERE doubleF == 0"),
("SELECT * FROM test_table WHERE booleanF == true", "* WHERE booleanF == true"),
("SELECT * FROM test_table WHERE strF == 'Alex'", "* WHERE strF == 'Alex'"),
("SELECT * FROM test_table WHERE dateF == '1990-1-1'", "* WHERE dateF == '1990-1-1'"),
("SELECT * FROM test_table WHERE dateF == '2020-05-01 12:01:01' ", "* WHERE dateF == '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF > 0", "* WHERE intF > 0"),
("SELECT * FROM test_table WHERE byteF > 0", "* WHERE byteF > 0"),
("SELECT * FROM test_table WHERE shortF > 0", "* WHERE shortF > 0"),
("SELECT * FROM test_table WHERE longF > 0", "* WHERE longF > 0"),
("SELECT * FROM test_table WHERE floatF > 0", "* WHERE floatF > 0"),
("SELECT * FROM test_table WHERE doubleF > 0", "* WHERE doubleF > 0"),
("SELECT * FROM test_table WHERE booleanF > false ", "* WHERE booleanF > false"),
("SELECT * FROM test_table WHERE strF > 'Yuan'", "* WHERE strF > 'Yuan'"),
("SELECT * FROM test_table WHERE dateF > '1990-1-1' ", "* WHERE dateF > '1990-1-1'"),
("SELECT * FROM test_table WHERE dateF > '2020-05-01 12:01:01'", "* WHERE dateF > '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF < 0", "* WHERE intF < 0"),
("SELECT * FROM test_table WHERE byteF < 0", "* WHERE byteF < 0"),
("SELECT * FROM test_table WHERE shortF < 0", "* WHERE shortF < 0"),
("SELECT * FROM test_table WHERE longF < 0", "* WHERE longF < 0"),
("SELECT * FROM test_table WHERE floatF < 0", "* WHERE floatF < 0"),
("SELECT * FROM test_table WHERE doubleF < 0", "* WHERE doubleF < 0"),
("SELECT * FROM test_table WHERE booleanF < true", "* WHERE booleanF < true"),
("SELECT * FROM test_table WHERE strF < 'Yuan'", "* WHERE strF < 'Yuan'"),
("SELECT * FROM test_table WHERE dateF < '1994-01-01' ", "* WHERE dateF < '1994-01-01'"),
("SELECT * FROM test_table WHERE dateF < '2020-05-01 12:01:01' ", "* WHERE dateF < '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF >= 0", "* WHERE intF >= 0"),
("SELECT * FROM test_table WHERE byteF >= 0", "* WHERE byteF >= 0"),
("SELECT * FROM test_table WHERE shortF >= 0", "* WHERE shortF >= 0"),
("SELECT * FROM test_table WHERE longF >= 0", "* WHERE longF >= 0"),
("SELECT * FROM test_table WHERE floatF >= 0", "* WHERE floatF >= 0"),
("SELECT * FROM test_table WHERE doubleF >= 0", "* WHERE doubleF >= 0"),
("SELECT * FROM test_table WHERE booleanF >= false ", "* WHERE booleanF >= false"),
("SELECT * FROM test_table WHERE strF >= 'Yuan'", "* WHERE strF >= 'Yuan'"),
("SELECT * FROM test_table WHERE dateF >= '1994-01-01' ", "* WHERE dateF >= '1994-01-01'"),
("SELECT * FROM test_table WHERE dateF >= '2020-05-01 12:01:01' ", "* WHERE dateF >= '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF <= 0", "* WHERE intF <= 0"),
("SELECT * FROM test_table WHERE byteF <= 0", "* WHERE byteF <= 0"),
("SELECT * FROM test_table WHERE shortF <= 0", "* WHERE shortF <= 0"),
("SELECT * FROM test_table WHERE longF <= 0", "* WHERE longF <= 0"),
("SELECT * FROM test_table WHERE floatF <= 0", "* WHERE floatF <= 0"),
("SELECT * FROM test_table WHERE doubleF <= 0", "* WHERE doubleF <= 0"),
("SELECT * FROM test_table WHERE booleanF <= true ", "* WHERE booleanF <= true"),
("SELECT * FROM test_table WHERE strF <= 'Yuan'", "* WHERE strF <= 'Yuan'"),
("SELECT * FROM test_table WHERE dateF <= '1994-01-01' ", "* WHERE dateF <= '1994-01-01'"),
("SELECT * FROM test_table WHERE dateF <= '2020-05-01 12:01:01' ", "* WHERE dateF <= '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF <> 0", "* WHERE intF <> 0"),
("SELECT * FROM test_table WHERE byteF <> 0", "* WHERE byteF <> 0"),
("SELECT * FROM test_table WHERE shortF <> 0", "* WHERE shortF <> 0"),
("SELECT * FROM test_table WHERE longF <> 0", "* WHERE longF <> 0"),
("SELECT * FROM test_table WHERE floatF <> 0", "* WHERE floatF <> 0"),
("SELECT * FROM test_table WHERE doubleF <> 0", "* WHERE doubleF <> 0"),
("SELECT * FROM test_table WHERE booleanF <> true ", "* WHERE booleanF <> true"),
("SELECT * FROM test_table WHERE strF <> 'Yuan'", "* WHERE strF <> 'Yuan'"),
("SELECT * FROM test_table WHERE dateF <> '1994-01-01' ", "* WHERE dateF <> '1994-01-01'"),
("SELECT * FROM test_table WHERE dateF <> '2020-05-01 12:01:01' ", "* WHERE dateF <> '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE intF != 0", "* WHERE intF != 0"),
("SELECT * FROM test_table WHERE byteF != 0", "* WHERE byteF != 0"),
("SELECT * FROM test_table WHERE shortF != 0", "* WHERE shortF != 0"),
("SELECT * FROM test_table WHERE longF != 0", "* WHERE longF != 0"),
("SELECT * FROM test_table WHERE floatF != 0", "* WHERE floatF != 0"),
("SELECT * FROM test_table WHERE doubleF != 0", "* WHERE doubleF != 0"),
("SELECT * FROM test_table WHERE booleanF != true ", "* WHERE booleanF != true"),
("SELECT * FROM test_table WHERE strF != 'Yuan'", "* WHERE strF != 'Yuan'"),
("SELECT * FROM test_table WHERE dateF != '1994-01-01' ", "* WHERE dateF != '1994-01-01'"),
("SELECT * FROM test_table WHERE dateF != '2020-05-01 12:01:01' ", "* WHERE dateF != '2020-05-01 12:01:01'"),
("SELECT * FROM test_table WHERE strF= 'Yuan' AND intF=10", "* WHERE strF= 'Yuan' AND intF=10"),
("SELECT * FROM test_table WHERE strF= 'Yuan' OR intF=10", "* WHERE strF= 'Yuan' OR intF=10"),
("SELECT * FROM test_table WHERE NOT strF= 'Alex' ", "* WHERE NOT strF= 'Alex'"),
("SELECT * FROM test_table WHERE byteF IN (10, 100)", "* WHERE byteF IN (10, 100)"),
("SELECT * FROM test_table WHERE shortF IN (10, 100)", "* WHERE shortF IN (10, 100)"),
("SELECT * FROM test_table WHERE longF IN (10, 100)", "* WHERE longF IN (10.0, 100.0)"),
("SELECT * FROM test_table WHERE intF IN (10, 100)", "* WHERE intF IN (10, 100)"),
("SELECT * FROM test_table WHERE floatF IN (10.0, 100.0)", "* WHERE floatF IN (10.0, 100.0)"),
("SELECT * FROM test_table WHERE doubleF IN (10.0, 100.0)", "* WHERE doubleF IN (10.0, 100.0)"),
# (" BITWISE", "BITWISE"),
("SELECT intF & 3 FROM test_table", "intF & 3"),
("SELECT intF & intF FROM test_table", "intF & intF"),
("SELECT intF | 5 FROM test_table", "intF | 5"),
("SELECT intF | intF FROM test_table", "intF | intF"),
("SELECT ~intF FROM test_table", "~intF"),
("SELECT intF^intF FROM test_table", "intF^intF"),
("SELECT shiftleft(intF, 1) FROM test_table", "shiftleft(intF, 1)"),
("SELECT shiftright(intF, 1) FROM test_table", "shiftright(intF, 1)"),
("SELECT shiftrightunsigned(intF, 1) FROM test_table", "shiftrightunsigned(intF, 1)"),
("SELECT shortF & 3 FROM test_table", "shortF & 3"),
("SELECT shortF & shortF FROM test_table", "shortF & shortF"),
("SELECT shortF | shortF FROM test_table", "shortF | shortF"),
("SELECT ~shortF FROM test_table", "~shortF"),
("SELECT shortF^shortF FROM test_table", "shortF^shortF"),
("SELECT shiftleft(shortF, 1) FROM test_table", "shiftleft(shortF, 1)"),
("SELECT shiftright(shortF, 1) FROM test_table", "shiftright(shortF, 1)"),
("SELECT shiftrightunsigned(shortF, 1) FROM test_table", "shiftrightunsigned(shortF, 1)"),
("SELECT byteF & 3 FROM test_table", "byteF & 3"),
("SELECT byteF & byteF FROM test_table", "byteF & byteF"),
("SELECT byteF | 5 FROM test_table", "byteF | 5"),
("SELECT byteF | byteF FROM test_table", "byteF | byteF"),
("SELECT ~byteF FROM test_table", "~byteF"),
("SELECT byteF^byteF FROM test_table", "byteF^byteF"),
("SELECT shiftleft(byteF, 1) FROM test_table", "shiftleft(byteF, 1)"),
("SELECT shiftright(byteF, 1) FROM test_table", "shiftright(byteF, 1)"),
("SELECT shiftrightunsigned(byteF, 1) FROM test_table", "shiftrightunsigned(byteF, 1)"),
("SELECT longF & 3 FROM test_table", "longF & 3"),
("SELECT longF & longF FROM test_table", "longF & longF"),
("SELECT longF | 5 FROM test_table", "longF | 5"),
("SELECT ~longF FROM test_table", "~longF"),
("SELECT longF^longF FROM test_table", "longF^longF"),
("SELECT shiftleft(longF, 1) FROM test_table", "shiftleft(longF, 1)"),
("SELECT shiftright(longF, 1) FROM test_table", "shiftright(longF, 1)"),
("SELECT shiftrightunsigned(longF, 1) FROM test_table", "shiftrightunsigned(longF, 1)"),
# (" OTHER", "OTHER"),
# ("MULTIPLE CASES ", "MULTIPLE CASES"),
("SELECT CASE WHEN byteF > 5 THEN 5*byteF WHEN intF > 5 THEN 5*shortF ELSE doubleF/2 END FROM test_table", "CASE WHEN byteF > 5 THEN 5*byteF WHEN intF > 5 THEN 5*shortF ELSE doubleF/2 END"),
# ("IF", "IF"),
("SELECT intF, IF (intF > 100, 10, 5) FROM test_table", "intF, IF (intF > 100, 10, 5)"),
("SELECT byteF, IF (byteF > 100, 10, 5) FROM test_table", "byteF, IF (byteF > 100, 10, 5)"),
("SELECT shortF, IF (shortF > 100, 10, 5) FROM test_table", "shortF, IF (shortF > 100, 10, 5)"),
("SELECT longF, IF (longF > 100, 10, 5) FROM test_table", "longF, IF (longF > 100, 10, 5)"),
("SELECT floatF, IF (floatF > 100.0, 10, 5) as if_float FROM test_table", "floatF, IF (floatF > 100.0, 10, 5)"),
("SELECT doubleF, IF (longF > 100.0, 10, 5) FROM test_table", "doubleF, IF (longF > 100.0, 10, 5)"),
("SELECT booleanF, IF (booleanF = True, False, True) FROM test_table", "booleanF, IF (booleanF = True, False, True)"),
("SELECT strF, IF (strF = 'Alex', 10, 5) FROM test_table", "strF, IF (strF = 'Alex', 10, 5)"),
("SELECT dateF, IF (dateF > '1994-01-01', 1990, 1980) FROM test_table", "dateF, IF (dateF > '1994-01-01', 1990, 1980)"),
("SELECT dateF, IF (dateF > '2020-05-01 12:01:015', 1990, 1980) FROM test_table", "dateF, IF (dateF > '2020-05-01 12:01:015', 1990, 1980)"),
# ("ISNAN", "ISNAN"),
("SELECT ISNAN(intF/byteF) FROM test_table", "ISNAN(intF/byteF)"),
("SELECT ISNAN(byteF/byteF) FROM test_table", "ISNAN(byteF/byteF)"),
("SELECT ISNAN(shortF/byteF) FROM test_table", "ISNAN(shortF/byteF)"),
("SELECT ISNAN(longF/byteF) FROM test_table", "ISNAN(longF/byteF)"),
("SELECT ISNAN(floatF/byteF) FROM test_table", "ISNAN(floatF/byteF)"),
("SELECT ISNAN(doubleF/byteF) FROM test_table", "ISNAN(doubleF/byteF)"),
("SELECT ISNAN(strF/strF) FROM test_table", "ISNAN(strF/strF)"),
("SELECT NANVL(byteF/byteF, 0) FROM test_table", "NANVL(byteF/byteF, 0)"),
("SELECT NANVL(shortF/byteF, 0) FROM test_table", "NANVL(shortF/byteF, 0)"),
("SELECT NANVL(intF/byteF, 0) FROM test_table", "NANVL(intF/byteF, 0)"),
("SELECT NANVL(longF/byteF, 0) FROM test_table", "NANVL(longF/byteF, 0)"),
("SELECT NANVL(floatF/byteF, 0) FROM test_table", "NANVL(floatF/byteF, 0)"),
("SELECT NANVL(doubleF/byteF, 0) FROM test_table", "NANVL(doubleF/byteF, 0)"),
("SELECT NANVL(strF/byteF, 0) FROM test_table", "NANVL(strF/byteF, 0)"),
("SELECT IFNULL(byteF, 'nobody') as if_null FROM test_table", "IFNULL(byteF, 'nobody')"),
("SELECT IFNULL(shortF, 'nobody') as if_null FROM test_table", "IFNULL(shortF, 'nobody')"),
("SELECT IFNULL(intF, 'nobody') as if_null FROM test_table", "IFNULL(intF, 'nobody')"),
("SELECT IFNULL(longF, 'nobody') as if_null FROM test_table", "IFNULL(longF, 'nobody')"),
("SELECT IFNULL(doubleF, 'nobody') as if_null FROM test_table", "IFNULL(doubleF, 'nobody')"),
("SELECT IFNULL(booleanF, True) as if_null FROM test_table", "IFNULL(booleanF, True)"),
("SELECT IFNULL(strF, 'nobody') as if_null FROM test_table", "IFNULL(strF, 'nobody')"),
("SELECT IFNULL(dateF, 'nobody') as if_null FROM test_table", "IFNULL(dateF, 'nobody')"),
("SELECT IFNULL(timestampF, 'nobody') as if_null FROM test_table", "IFNULL(timestampF, 'nobody')"),
("SELECT ISNULL(byteF) FROM test_table", "ISNULL(byteF)"),
("SELECT ISNULL(shortF) FROM test_table", "ISNULL(shortF)"),
("SELECT ISNULL(intF) FROM test_table", "ISNULL(intF)"),
("SELECT ISNULL(longF) FROM test_table", "ISNULL(longF)"),
("SELECT ISNULL(floatF ) FROM test_table", "ISNULL(floatF )"),
("SELECT ISNULL(doubleF) FROM test_table", "ISNULL(doubleF)"),
("SELECT ISNULL(booleanF) FROM test_table", "ISNULL(booleanF)"),
("SELECT ISNULL(strF) FROM test_table", "ISNULL(strF)"),
("SELECT ISNULL(dateF) FROM test_table", "ISNULL(dateF)"),
("SELECT ISNULL(timestampF) FROM test_table", "ISNULL(timestampF)"),
("SELECT ISNOTNULL(byteF) FROM test_table", "ISNOTNULL(byteF)"),
("SELECT ISNOTNULL(shortF) FROM test_table", "ISNOTNULL(shortF)"),
("SELECT ISNOTNULL(intF) FROM test_table", "ISNOTNULL(intF)"),
("SELECT ISNOTNULL(longF) FROM test_table", "ISNOTNULL(longF)"),
("SELECT ISNOTNULL(floatF) FROM test_table", "ISNOTNULL(floatF)"),
("SELECT ISNOTNULL(doubleF) FROM test_table", "ISNOTNULL(doubleF)"),
("SELECT ISNOTNULL(booleanF) FROM test_table", "ISNOTNULL(booleanF)"),
("SELECT ISNOTNULL(strF) FROM test_table", "ISNOTNULL(strF)"),
("SELECT ISNOTNULL(dateF) FROM test_table", "ISNOTNULL(dateF)"),
("SELECT ISNOTNULL(timestampF) FROM test_table", "ISNOTNULL(timestampF)"),
("SELECT NULLIF(intF, 0) as null_if FROM test_table", "NULLIF(intF, 0)"),
("SELECT NULLIF(byteF, 0) as null_if FROM test_table", "NULLIF(byteF, 0)"),
("SELECT NULLIF(shortF,0) as null_if FROM test_table", "NULLIF(shortF,0)"),
("SELECT NULLIF(intF, 0) as null_if FROM test_table", "NULLIF(intF, 0)"),
("SELECT NULLIF(longF, 0) as null_if FROM test_table", "NULLIF(longF, 0)"),
("SELECT NULLIF(floatF,0) as null_if FROM test_table", "NULLIF(floatF,0)"),
("SELECT NULLIF(doubleF, 0) as null_if FROM test_table", "NULLIF(doubleF, 0)"),
("SELECT NULLIF(booleanF, True) as null_if FROM test_table", "NULLIF(booleanF, True)"),
("SELECT NULLIF(dateF, '1990-1-1') as null_if FROM test_table", "NULLIF(dateF, '1990-1-1')"),
("SELECT NULLIF(dateF, '2022-12-01 12:01:01') as null_if FROM test_table", "NULLIF(dateF, '2022-12-01 12:01:01')"),
("SELECT NVL(byteF, 0) as nvl_value FROM test_table", "NVL(byteF, 0)"),
("SELECT NVL(shortF, 0) as nvl_value FROM test_table", "NVL(shortF, 0)"),
("SELECT NVL(intF, 0) as nvl_value FROM test_table", "NVL(intF, 0)"),
("SELECT NVL(longF, 0) as nvl_value FROM test_table", "NVL(longF, 0)"),
("SELECT NVL(floatF, 0) as nvl_value FROM test_table", "NVL(floatF, 0)"),
("SELECT NVL(doubleF, 0) as nvl_value FROM test_table", "NVL(doubleF, 0)"),
("SELECT NVL(booleanF, TRUE) as nvl_value FROM test_table", "NVL(booleanF, TRUE)"),
("SELECT NVL(strF, 0) as nvl_value FROM test_table", "NVL(strF, 0)"),
("SELECT NVL(dateF, '1990-1-1') as nvl_value FROM test_table", "NVL(dateF, '1990-1-1')"),
("SELECT NVL(timestampF, '2022-12-01 12:01:01') as nvl_value FROM test_table", "NVL(timestampF, '2022-12-01 12:01:01')"),
("SELECT NVL2(byteF, 'not null value','null value') as nvl2_value FROM test_table", "NVL2(byteF, 'not null value','null value')"),
("SELECT NVL2(shortF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(shortF, 'not null value','null value' )"),
("SELECT NVL2(intF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(intF, 'not null value','null value' )"),
("SELECT NVL2(longF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(longF, 'not null value','null value' )"),
("SELECT NVL2(floatF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(floatF, 'not null value','null value' )"),
("SELECT NVL2(doubleF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(doubleF, 'not null value','null value' )"),
("SELECT NVL2(booleanF, 'not null value','null value') as nvl2_value FROM test_table", "NVL2(booleanF, 'not null value','null value')"),
("SELECT NVL2(strF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(strF, 'not null value','null value' )"),
("SELECT NVL2(dateF, 'not null value','null value' ) as nvl2_value FROM test_table", "NVL2(dateF, 'not null value','null value' )"),
("SELECT NVL2(timestampF, 'not null value','null value') as nvl2_value FROM test_table", "NVL2(timestampF, 'not null value','null value')"),
# (" SQL FILTER", "SQL FILTER"),
("SELECT byteF FROM test_table WHERE byteF = 20", "byteF WHERE byteF = 20"),
("SELECT shortF FROM test_table WHERE shortF = 20", "shortF WHERE shortF = 20"),
("SELECT intF FROM test_table WHERE intF = 20", "intF WHERE intF = 20"),
("SELECT longF FROM test_table WHERE longF = 20", "longF WHERE longF = 20"),
("SELECT floatF FROM test_table WHERE floatF >20", "floatF WHERE floatF >20"),
("SELECT doubleF FROM test_table WHERE doubleF > 20", "doubleF WHERE doubleF > 20"),
("SELECT booleanF FROM test_table WHERE booleanF = True", "booleanF WHERE booleanF = True"),
("SELECT dateF FROM test_table WHERE dateF = '1990-1-1' ", "dateF WHERE dateF = '1990-1-1'"),
("SELECT timestampF FROM test_table WHERE timestampF > '2020-02-01 12:01:01' ", "timestampF WHERE timestampF > '2020-02-01 12:01:01'"),
("SELECT byteF FROM test_table WHERE byteF BETWEEN 10 AND 30", "byteF WHERE byteF BETWEEN 10 AND 30"),
("SELECT shortF FROM test_table WHERE shortF BETWEEN 10 AND 30", "shortF WHERE shortF BETWEEN 10 AND 30"),
("SELECT intF FROM test_table WHERE intF BETWEEN 10 AND 30", "intF WHERE intF BETWEEN 10 AND 30"),
("SELECT longF FROM test_table WHERE longF BETWEEN 10 AND 30", "longF WHERE longF BETWEEN 10 AND 30"),
("SELECT floatF FROM test_table WHERE floatF BETWEEN 10 AND 30", "floatF WHERE floatF BETWEEN 10 AND 30"),
("SELECT doubleF FROM test_table WHERE doubleF BETWEEN 10 AND 30", "doubleF WHERE doubleF BETWEEN 10 AND 30"),
("SELECT dateF FROM test_table WHERE dateF BETWEEN '1995-01-01' AND '1990-02-01' ", "dateF WHERE dateF BETWEEN '1995-01-01' AND '1990-02-01'"),
("SELECT timestampF FROM test_table WHERE timestampF BETWEEN '2020-02-01 12:01:01' AND '2022-12-01 12:01:01' ", "timestampF WHERE timestampF BETWEEN '2020-02-01 12:01:01' AND '2022-12-01 12:01:01'"),
# ("coalesce", "coalesce"),
("SELECT byteF, COALESCE(byteF,'N/A') FROM test_table", "byteF, COALESCE(byteF,'N/A')"),
("SELECT shortF, COALESCE(shortF,'N/A') FROM test_table", "shortF, COALESCE(shortF,'N/A')"),
("SELECT intF, COALESCE(intF,'N/A') FROM test_table", "intF, COALESCE(intF,'N/A')"),
("SELECT longF, COALESCE(longF,'N/A') FROM test_table", "longF, COALESCE(longF,'N/A')"),
("SELECT doubleF, COALESCE(doubleF,'N/A') FROM test_table", "doubleF, COALESCE(doubleF,'N/A')"),
("SELECT booleanF, COALESCE(booleanF, TRUE) FROM test_table", "booleanF, COALESCE(booleanF, TRUE)"),
("SELECT strF, COALESCE(strF,'N/A') FROM test_table", "strF, COALESCE(strF,'N/A')"),
# ("orderby/sort", "orderby/sort"),
("SELECT byteF FROM test_table ORDER BY byteF, doubleF", "byteF ORDER BY byteF, doubleF"),
("SELECT shortF FROM test_table ORDER BY shortF", "shortF ORDER BY shortF"),
("SELECT intF FROM test_table ORDER BY intF", "intF ORDER BY intF"),
("SELECT longF FROM test_table ORDER BY longF", "longF ORDER BY longF"),
("SELECT floatF FROM test_table ORDER BY floatF", "floatF ORDER BY floatF"),
("SELECT doubleF FROM test_table ORDER BY doubleF", "doubleF ORDER BY doubleF"),
("SELECT booleanF FROM test_table ORDER BY booleanF", "booleanF ORDER BY booleanF"),
("SELECT strF FROM test_table ORDER BY strF", "strF ORDER BY strF"),
("SELECT dateF FROM test_table ORDER BY dateF", "dateF ORDER BY dateF"),
("SELECT timestampF FROM test_table ORDER BY timestampF", "timestampF ORDER BY timestampF"),
# (" FUNCTIONAL TEST END", "FUNCTIONAL TEST END"),
# (" RANDOM TEST CHECKING", "RANDOM TEST CHECKING"),
("SELECT shortF FROM test_table", "shortF"),
("SELECT * FROM test_table", "*"),
("SELECT strF, intF, byteF, shortF, longF, booleanF, floatF FROM test_table", "strF, intF, byteF, shortF, longF, booleanF, floatF"),
("SELECT 100 FROM test_table", "100"),
("SELECT intF, 2000 FROM test_table", "intF, 2000"),
("SELECT strF IS NULL FROM test_table", "strF IS NULL"),
("SELECT intF FROM test_table WHERE strF IS NULL", "intF WHERE strF IS NULL"),
("SELECT CASE WHEN strF IS NULL THEN 'TRUE' ELSE 'FALSE' END FROM test_table", "CASE WHEN strF IS NULL THEN 'TRUE' ELSE 'FALSE' END"),
("SELECT Case WHEN intF=1000 THEN 'TRUE' ELSE 'FALSE' END FROM test_table", "Case WHEN intF=1000 THEN 'TRUE' ELSE 'FALSE' END"),
("SELECT intF IS NULL FROM test_table", "intF IS NULL"),
("SELECT intF IS NOT NULL FROM test_table", "intF IS NOT NULL"),
("SELECT * FROM test_table WHERE strF is NULL", "* WHERE strF is NULL"),
("SELECT * FROM test_table WHERE strF IS NOT NULL", "* WHERE strF IS NOT NULL"),
("SELECT * FROM test_table WHERE strF = 'Phuoc' OR strF = 'Yuan'", "* WHERE strF = 'Phuoc' OR strF = 'Yuan'"),
("SELECT * FROM test_table WHERE intF > 1000", "* WHERE intF > 1000"),
("SELECT strF, intF, shortF FROM test_table WHERE intF>1000 AND shortF>200", "strF, intF, shortF WHERE intF>1000 AND shortF>200"),
("SELECT * FROM test_table WHERE intF > 1000 AND intF = 2000", "* WHERE intF > 1000 AND intF = 2000"),
("SELECT strF, intF, longF, intF+longF FROM test_table", "strF, intF, longF, intF+longF"),
("SELECT intF+2000 FROM test_table", "intF+2000"),
("SELECT CAST(intF as long) FROM test_table", "CAST(intF as long)"),
("SELECT strF AS user_name FROM test_table", "strF AS user_name"),
("SELECT COALESCE(strF,'N/A') strF FROM test_table", "COALESCE(strF,'N/A') strF"),
("SELECT SUM(shortF) FROM test_table", "SUM(shortF)"),
("SELECT MIN(intF) FROM test_table", "MIN(intF)"),
("SELECT MAX(intF) FROM test_table", "MAX(intF)"),
("SELECT AVG(intF) FROM test_table", "AVG(intF)"),
("SELECT intF, shortF FROM test_table ORDER by intF, shortF", "intF, shortF ORDER by intF, shortF"),
("SELECT dateF, COALESCE(dateF,'N/A') FROM test_table", "dateF, COALESCE(dateF,'N/A')"),
("SELECT timestampF, COALESCE(timestampF,'N/A') FROM test_table", "timestampF, COALESCE(timestampF,'N/A')"),
("SELECT COUNT(1) FROM test_table", "COUNT(1)"),
("SELECT COUNT('*') FROM test_table", "COUNT('*')"),
("SELECT COUNT(strF) FROM test_table", "COUNT(strF)"),
("SELECT COUNT(strF) FROM test_table WHERE intF=3000", "COUNT(strF) WHERE intF=3000"),
("SELECT CASE WHEN byteF > 5 THEN 5*byteF END FROM test_table", "CASE WHEN byteF > 5 THEN 5*byteF END"),
("SELECT CASE WHEN shortF > 5 THEN 5+shortF END FROM test_table", "CASE WHEN shortF > 5 THEN 5+shortF END"),
("SELECT CASE WHEN intF > 5 THEN 5 END FROM test_table", "CASE WHEN intF > 5 THEN 5 END"),
("SELECT CASE WHEN longF > 5 THEN 5 END FROM test_table", "CASE WHEN longF > 5 THEN 5 END"),
("SELECT CASE WHEN booleanF = True THEN 'TRUE' END FROM test_table", "CASE WHEN booleanF = True THEN 'TRUE' END"),
("SELECT (CASE WHEN floatF > float(5.0) THEN float(5.0) END) as castwhen FROM test_table", "CASE WHEN floatF > 5.0 THEN 5.0 END"),
("SELECT (CASE WHEN doubleF > double(5.0) THEN double(5.0) END) as castwhen FROM test_table", "CASE WHEN doubleF > 5.0 THEN 5.0 END"),
("SELECT CASE WHEN dateF >= '1994-01-01' THEN 'good day' END FROM test_table", "CASE WHEN dateF >= '1994-01-01' THEN 'good day' END"),
("SELECT CASE WHEN timestampF > '2020-05-01 12:01:015' THEN 'good time' END FROM test_table", "CASE WHEN timestampF > '2020-05-01 12:01:015' THEN 'good time' END"),
("SELECT CASE WHEN byteF > 5 THEN 5*byteF ELSE byteF END FROM test_table", "CASE WHEN byteF > 5 THEN 5*byteF ELSE byteF END"),
("SELECT CASE WHEN shortF > 5 THEN 5+shortF ELSE shortF END FROM test_table", "CASE WHEN shortF > 5 THEN 5+shortF ELSE shortF END"),
("SELECT CASE WHEN intF > 5 THEN 5 ELSE 0 END FROM test_table", "CASE WHEN intF > 5 THEN 5 ELSE 0 END"),
("SELECT CASE WHEN longF > 5 THEN 5 ELSE 5*longF END FROM test_table", "CASE WHEN longF > 5 THEN 5 ELSE 5*longF END"),
("SELECT CASE WHEN booleanF = True THEN 'TRUE' ELSE 'false' END FROM test_table", "CASE WHEN booleanF = True THEN 'TRUE' ELSE 'false' END"),
("SELECT (CASE WHEN floatF > float(5.0) THEN float(5.0) ELSE float(6.0) END) as casewhen FROM test_table", "CASE WHEN floatF > 5.0 THEN 5.0 ELSE 6.0 END"),
("SELECT CASE WHEN doubleF > 5 THEN 5 ELSE doubleF/2 END FROM test_table", "CASE WHEN doubleF > 5.0 THEN 5.0 ELSE doubleF/2 END"),
("SELECT CASE WHEN dateF >= '1994-01-01' THEN 'good day' ELSE 'bad day' END FROM test_table", "CASE WHEN dateF >= '1994-01-01' THEN 'good day' ELSE 'bad day' END"),
("SELECT CASE WHEN timestampF > '2020-05-01 12:01:015' THEN 'good time' ELSE 'bad time' END FROM test_table", "CASE WHEN timestampF > '2020-05-01 12:01:015' THEN 'good time' ELSE 'bad time' END"),
]
SELECT_NEEDS_SORT_SQL = [
# (" AGG functions", "AGG functions"),
("SELECT AVG(intF) FROM test_table", "AVG(intF)"),
("SELECT AVG(byteF) FROM test_table", "AVG(byteF)"),
("SELECT AVG(shortF) FROM test_table", "AVG(shortF)"),
("SELECT AVG(longF) FROM test_table", "AVG(longF)"),
("SELECT AVG(floatF) FROM test_table", "AVG(floatF)"),
("SELECT AVG(doubleF) FROM test_table", "AVG(doubleF)"),
("SELECT AVG(byteF) FROM test_table GROUP BY intF", "AVG(byteF) GROUP BY intF"),
("SELECT AVG(shortF) FROM test_table GROUP BY intF", "AVG(shortF) GROUP BY intF"),
("SELECT AVG(intF) FROM test_table GROUP BY byteF", "AVG(intF) GROUP BY byteF"),
("SELECT AVG(longF) FROM test_table GROUP BY byteF", "AVG(longF) GROUP BY byteF"),
("SELECT AVG(floatF) FROM test_table GROUP BY byteF", "AVG(floatF) GROUP BY byteF"),
("SELECT AVG(doubleF) FROM test_table GROUP BY byteF", "AVG(doubleF) GROUP BY byteF"),
("SELECT AVG(intF) FROM test_table GROUP BY byteF, doubleF", "AVG(intF) GROUP BY byteF, doubleF"),
("SELECT AVG(floatF) FROM test_table GROUP BY byteF, shortF, intF ", "AVG(floatF) GROUP BY byteF, shortF, intF"),
("SELECT SUM(byteF) FROM test_table", "SUM(byteF)"),
("SELECT SUM(shortF) FROM test_table", "SUM(shortF)"),
("SELECT SUM(intF) FROM test_table", "SUM(intF)"),
("SELECT SUM(longF) FROM test_table", "SUM(longF)"),
("SELECT SUM(floatF) FROM test_table", "SUM(floatF)"),
("SELECT SUM(doubleF) FROM test_table", "SUM(doubleF)"),
("SELECT SUM(byteF) FROM test_table GROUP BY intF", "SUM(byteF) GROUP BY intF"),
("SELECT SUM(shortF) FROM test_table GROUP BY intF", "SUM(shortF) GROUP BY intF"),
("SELECT SUM(intF) FROM test_table GROUP BY shortF", "SUM(intF) GROUP BY shortF"),
("SELECT SUM(longF) FROM test_table GROUP BY shortF", "SUM(longF) GROUP BY shortF"),
("SELECT SUM(floatF) FROM test_table GROUP BY byteF", "SUM(floatF) GROUP BY byteF"),
("SELECT SUM(doubleF) FROM test_table GROUP BY intF", "SUM(doubleF) GROUP BY intF"),
("SELECT SUM(byteF) FROM test_table GROUP BY intF, shortF", "SUM(byteF) GROUP BY intF, shortF"),
("SELECT SUM(shortF) FROM test_table GROUP BY intF, floatF", "SUM(shortF) GROUP BY intF, floatF"),
("SELECT COUNT(byteF) FROM test_table", "COUNT(byteF)"),
("SELECT COUNT(shortF) FROM test_table", "COUNT(shortF)"),
("SELECT COUNT(intF) FROM test_table", "COUNT(intF)"),
("SELECT COUNT(longF) FROM test_table", "COUNT(longF)"),
("SELECT COUNT(floatF) FROM test_table", "COUNT(floatF)"),
("SELECT COUNT(doubleF) FROM test_table", "COUNT(doubleF)"),
("SELECT COUNT(booleanF) FROM test_table", "COUNT(booleanF)"),
("SELECT COUNT(strF) FROM test_table", "COUNT(strF)"),
("SELECT COUNT(dateF) FROM test_table", "COUNT(dateF)"),
("SELECT COUNT(timestampF) FROM test_table", "COUNT(timestampF)"),
("SELECT COUNT(byteF) FROM test_table GROUP BY intF", "COUNT(byteF) GROUP BY intF"),
("SELECT COUNT(shortF) FROM test_table GROUP BY intF", "COUNT(shortF) GROUP BY intF"),
("SELECT COUNT(intF) FROM test_table GROUP BY byteF", "COUNT(intF) GROUP BY byteF"),
("SELECT COUNT(longF) FROM test_table GROUP BY byteF", "COUNT(longF) GROUP BY byteF"),
("SELECT COUNT(floatF) FROM test_table GROUP BY intF", "COUNT(floatF) GROUP BY intF"),
("SELECT COUNT(doubleF) FROM test_table GROUP BY intF", "COUNT(doubleF) GROUP BY intF"),
("SELECT COUNT(byteF) FROM test_table GROUP BY intF, shortF", "COUNT(byteF) GROUP BY intF, shortF"),
("SELECT COUNT(shortF) FROM test_table GROUP BY intF, byteF", "COUNT(shortF) GROUP BY intF, byteF"),
("SELECT COUNT(intF) FROM test_table GROUP BY byteF, shortF", "COUNT(intF) GROUP BY byteF, shortF"),
("SELECT MIN(byteF) FROM test_table", "MIN(byteF)"),
("SELECT MIN(shortF) FROM test_table", "MIN(shortF)"),
("SELECT MIN(intF) FROM test_table", "MIN(intF)"),
("SELECT MIN(longF) FROM test_table", "MIN(longF)"),
("SELECT MIN(floatF) FROM test_table", "MIN(floatF)"),
("SELECT MIN(doubleF) FROM test_table", "MIN(doubleF)"),
("SELECT MIN(booleanF) FROM test_table", "MIN(booleanF)"),
("SELECT MIN(dateF) FROM test_table", "MIN(dateF)"),
("SELECT MIN(timestampF) FROM test_table", "MIN(timestampF)"),
("SELECT MIN(byteF) FROM test_table GROUP BY intF", "MIN(byteF) GROUP BY intF"),
("SELECT MIN(shortF) FROM test_table GROUP BY intF", "MIN(shortF) GROUP BY intF"),
("SELECT MIN(intF) FROM test_table GROUP BY intF", "MIN(intF) GROUP BY intF"),
("SELECT MIN(longF) FROM test_table GROUP BY intF", "MIN(longF) GROUP BY intF"),
("SELECT MIN(floatF) FROM test_table GROUP BY intF", "MIN(floatF) GROUP BY intF"),
("SELECT MIN(doubleF) FROM test_table GROUP BY intF", "MIN(doubleF) GROUP BY intF"),
("SELECT MIN(booleanF) FROM test_table GROUP BY intF", "MIN(booleanF) GROUP BY intF"),
("SELECT MIN(dateF) FROM test_table GROUP BY intF", "MIN(dateF) GROUP BY intF"),
("SELECT MIN(timestampF) FROM test_table GROUP BY intF", "MIN(timestampF) GROUP BY intF"),
("SELECT MIN(byteF) FROM test_table GROUP BY intF, shortF", "MIN(byteF) GROUP BY intF, shortF"),
("SELECT MIN(shortF) FROM test_table GROUP BY intF, byteF", "MIN(shortF) GROUP BY intF, byteF"),
("SELECT MAX(intF) FROM test_table", "MAX(intF)"),
("SELECT MAX(byteF) FROM test_table", "MAX(byteF)"),
("SELECT MAX(shortF) FROM test_table", "MAX(shortF)"),
("SELECT MAX(longF) FROM test_table", "MAX(longF)"),
("SELECT MAX(floatF) FROM test_table", "MAX(floatF)"),
("SELECT MAX(doubleF) FROM test_table", "MAX(doubleF)"),
("SELECT MAX(booleanF) FROM test_table", "MAX(booleanF)"),
("SELECT MAX(dateF) FROM test_table", "MAX(dateF)"),
("SELECT MAX(timestampF) FROM test_table", "MAX(timestampF)"),
("SELECT MAX(byteF) FROM test_table GROUP BY intF", "MAX(byteF) GROUP BY intF"),
("SELECT MAX(shortF) FROM test_table GROUP BY intF", "MAX(shortF) GROUP BY intF"),
("SELECT MAX(intF) FROM test_table GROUP BY byteF", "MAX(intF) GROUP BY byteF"),
("SELECT MAX(longF) FROM test_table GROUP BY intF", "MAX(longF) GROUP BY intF"),
("SELECT MAX(floatF) FROM test_table GROUP BY intF", "MAX(floatF) GROUP BY intF"),
("SELECT MAX(doubleF) FROM test_table GROUP BY intF", "MAX(doubleF) GROUP BY intF"),
("SELECT MAX(booleanF) FROM test_table GROUP BY intF", "MAX(booleanF) GROUP BY intF"),
("SELECT MAX(dateF) FROM test_table GROUP BY intF", "MAX(dateF) GROUP BY intF"),
("SELECT MAX(timestampF) FROM test_table GROUP BY intF", "MAX(timestampF) GROUP BY intF"),
("SELECT MAX(byteF) FROM test_table GROUP BY intF, shortF", "MAX(byteF) GROUP BY intF, shortF"),
("SELECT MAX(shortF) FROM test_table GROUP BY intF, byteF", "MAX(shortF) GROUP BY intF, byteF"),
("SELECT DISTINCT(byteF) FROM test_table", "DISTINCT(byteF)"),
("SELECT DISTINCT(shortF) FROM test_table", "DISTINCT(shortF)"),
("SELECT DISTINCT(intF) FROM test_table", "DISTINCT(intF)"),
("SELECT DISTINCT(longF) FROM test_table", "DISTINCT(longF)"),
("SELECT DISTINCT(floatF) FROM test_table", "DISTINCT(floatF)"),
("SELECT DISTINCT(doubleF) FROM test_table", "DISTINCT(doubleF)"),
("SELECT DISTINCT(booleanF) FROM test_table", "DISTINCT(booleanF)"),
("SELECT DISTINCT(strF) FROM test_table", "DISTINCT(strF)"),
("SELECT DISTINCT(dateF) FROM test_table", "DISTINCT(dateF)"),
("SELECT DISTINCT(timestampF) FROM test_table", "DISTINCT(timestampF)"),
("SELECT COUNT(DISTINCT(byteF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(byteF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(shortF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(shortF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(intF)) FROM test_table GROUP BY byteF", "COUNT(DISTINCT(intF)) GROUP BY byteF"),
("SELECT COUNT(DISTINCT(longF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(longF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(floatF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(floatF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(doubleF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(doubleF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(booleanF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(booleanF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(strF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(strF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(dateF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(dateF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(timestampF)) FROM test_table GROUP BY intF", "COUNT(DISTINCT(timestampF)) GROUP BY intF"),
("SELECT COUNT(DISTINCT(intF)), COUNT(DISTINCT(byteF)), COUNT(DISTINCT(shortF)), COUNT(DISTINCT(longF)) FROM test_table", "COUNT(DISTINCT(intF)), COUNT(DISTINCT(byteF)), COUNT(DISTINCT(shortF)), COUNT(DISTINCT(longF))"),
("SELECT COUNT(DISTINCT(intF)), COUNT(DISTINCT(floatF)), COUNT(DISTINCT(doubleF)), COUNT(DISTINCT(booleanF)) FROM test_table", "COUNT(DISTINCT(intF)), COUNT(DISTINCT(floatF)), COUNT(DISTINCT(doubleF)), COUNT(DISTINCT(booleanF))"),
("SELECT COUNT(DISTINCT(dateF)), COUNT(DISTINCT(strF)), COUNt(DISTINCT(timestampF)) FROM test_table", "COUNT(DISTINCT(dateF)), COUNT(DISTINCT(strF)), COUNt(DISTINCT(timestampF))"),
("SELECT COUNT(DISTINCT(intF)), COUNT(DISTINCT(byteF)), COUNT(DISTINCT(shortF)) FROM test_table GROUP BY longF ", "COUNT(DISTINCT(intF)), COUNT(DISTINCT(byteF)), COUNT(DISTINCT(shortF)) GROUP BY longF"),
("SELECT COUNT(DISTINCT(intF)), COUNt(DISTINCT(byteF)) FROM test_table GROUP BY longF, floatF", "COUNT(DISTINCT(intF)), COUNt(DISTINCT(byteF)) GROUP BY longF, floatF"),
("SELECT strF, intF, SUM(shortF) FROM test_table GROUP BY strF, intF", "strF, intF, SUM(shortF) GROUP BY strF, intF"),
("SELECT COUNT(byteF) as count, AVG(intF) as avg, (SUM(intF) + MAX(shortF * 3)) as summax FROM test_table GROUP BY intF", "COUNT(byteF), AVG(intF), SUM(intF) + MAX(shortF * 3) GROUP BY intF"),
("SELECT COUNT(byteF) as count, (AVG(intF) * 5.0) as avg, (SUM(intF) + MAX(shortF * 3)) as summax FROM test_table GROUP BY intF*3", "COUNT(byteF), AVG(intF) * 5.0, SUM(intF) + MAX(shortF * 3) GROUP BY intF*3"),
("SELECT COUNT(*) as count, (AVG(intF) * 5.0) as avg, (SUM(intF) + MAX(shortF * 3)) as summax FROM test_table GROUP BY intF*3", "COUNT(*), AVG(intF) * 5.0, SUM(intF) + MAX(shortF * 3) GROUP BY intF*3"),
# ("SELECT SUM(intF) OVER (PARTITION BY byteF ORDER BY shortF) as sum_total FROM test_table", "SUM(intF) OVER (PARTITION BY byteF ORDER BY shortF) as sum_total"),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY byteF ORDER BY byteF) row_num, byteF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY byteF ORDER BY byteF) row_num, byteF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY shortF ORDER BY shortF) row_num, shortF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY shortF ORDER BY shortF) row_num, shortF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY intF ORDER BY intF) row_num, intF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY intF ORDER BY intF) row_num, intF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY longF ORDER BY longF) row_num, longF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY longF ORDER BY longF) row_num, longF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY floatF ORDER BY floatF) row_num, floatF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY floatF ORDER BY floatF) row_num, floatF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY booleanF ORDER BY booleanF) row_num, booleanF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY booleanF ORDER BY booleanF) row_num, booleanF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY strF ORDER BY strF) row_num, strF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY strF ORDER BY strF) row_num, strF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY dateF ORDER BY dateF) row_num, dateF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY dateF ORDER BY dateF) row_num, dateF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
pytest.param(("SELECT ROW_NUMBER() OVER (PARTITION BY timestampF ORDER BY timestampF) row_num, timestampF FROM test_table", "ROW_NUMBER() OVER (PARTITION BY timestampF ORDER BY timestampF) row_num, timestampF"), marks=pytest.mark.xfail(is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/203')),
# ("window/row/range (need change)", "window/row/range (need change)"),
#("SELECT byteF, SUM(byteF) OVER (PARTITION BY byteF ORDER BY byteF RANGE BETWEEN 20 PRECEDING AND 10 FOLLOWING ) as sum_total FROM test_table", "byteF, SUM(byteF) OVER (PARTITION BY byteF ORDER BY byteF RANGE BETWEEN 20 PRECEDING AND 10 FOLLOWING ) as sum_total"),
#("SELECT SUM(intF) OVER (PARTITION BY byteF ORDER BY byteF RANGE BETWEEN 20 PRECEDING AND 10 FOLLOWING ) as sum_total FROM test_table", "SUM(intF) OVER (PARTITION BY byteF ORDER BY byteF RANGE BETWEEN 20 PRECEDING AND 10 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY shortF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY shortF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
#("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY shortF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY shortF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY longF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY longF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
#("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY longF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY longF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY floatF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY floatF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
#("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY floatF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY floatF RANGE BETWEEN 20 PRECEDING AND 5 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY doubleF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY doubleF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
#("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY doubleF RANGE BETWEEN 20 PRECEDING AND 50 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY doubleF RANGE BETWEEN 20 PRECEDING AND 50 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY booleanF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY booleanF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY booleanF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY booleanF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY dateF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY dateF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY timestampF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY timestampF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY CAST(dateF AS TIMESTAMP) RANGE BETWEEN INTERVAL 1 DAYS PRECEDING AND INTERVAL 1 DAYS FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY CAST(dateF AS TIMESTAMP) RANGE BETWEEN INTERVAL 1 DAYS PRECEDING AND INTERVAL 1 DAYS FOLLOWING ) as sum_total"),
# ("not supported, filed bug", "not supported, filed bug"),
# ("SELECT SUM(byteF) OVER (PARTITION BY byteF ORDER BY timestampF RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total FROM test_table", "SUM(byteF) OVER (PARTITION BY byteF ORDER BY timestampF RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_total"),
("SELECT A.shortF, B.intF from test_table A, test_table B where A.intF=B.intF ORDER BY A.intF", "A.shortF, B.intF A, test_table B where A.intF=B.intF ORDER BY A.intF"),
("SELECT A.shortF, B.intF from test_table A, test_table B where A.intF=B.intF", "A.shortF, B.intF A, test_table B where A.intF=B.intF"),
]
SELECT_JOIN_SQL = [
# (" JOIN", "JOIN"),
("SELECT A.byteF as AbyteF, B.byteF as BbyteF from test_table A, test_table B where A.byteF=B.byteF ", "A.byteF, B.byteF A, test_table B where A.byteF=B.byteF"),
("SELECT A.shortF as AshortF, B.shortF as BbyteF from test_table A, test_table B where A.shortF=B.shortF", "A.shortF, B.shortF A, test_table B where A.shortF=B.shortF"),
("SELECT A.intF as AintF, B.intF as BintF from test_table A, test_table B where A.intF=B.intF ", "A.intF, B.intF A, test_table B where A.intF=B.intF"),
("SELECT A.longF as AlongF, B.longF as BlongF from test_table A, test_table B where A.longF=B.longF", "A.longF, B.longF A, test_table B where A.longF=B.longF"),
("SELECT A.floatF as AfloatF, B.floatF as BfloatF from test_table A, test_table B where A.floatF=B.floatF ", "A.floatF, B.floatF A, test_table B where A.floatF=B.floatF"),
("SELECT A.doubleF as AdoubleF, B.doubleF as BdoubleF from test_table A, test_table B where A.doubleF=B.doubleF", "A.doubleF, B.doubleF A, test_table B where A.doubleF=B.doubleF"),
("SELECT A.booleanF as AbooleanF, B.booleanF as BbooleanF from test_table A, test_table B where A.booleanF=B.booleanF ", "A.booleanF, B.booleanF A, test_table B where A.booleanF=B.booleanF"),
("SELECT A.strF as AstrF, B.strF as BstrF from test_table A, test_table B where A.strF=B.strF", "A.strF, B.strF A, test_table B where A.strF=B.strF"),
("SELECT A.dateF as AdateF, B.dateF as BdateF from test_table A, test_table B where A.dateF=B.dateF ", "A.dateF, B.dateF A, test_table B where A.dateF=B.dateF"),
("SELECT A.timestampF as AtimestampF, B.timestampF as BtimestampF from test_table A, test_table B where A.timestampF=B.timestampF", "A.timestampF, B.timestampF A, test_table B where A.timestampF=B.timestampF"),
("SELECT test_table.byteF as byteF, test_table1.byteF as byteF1 from test_table INNER JOIN test_table1 ON test_table.byteF=test_table1.byteF", "test_table.byteF, test_table1.byteF INNER JOIN test_table1 ON test_table.byteF=test_table1.byteF"),
("SELECT test_table.shortF as shortF, test_table1.shortF as shortF1 from test_table INNER JOIN test_table1 ON test_table.shortF=test_table1.shortF", "test_table.shortF, test_table1.shortF INNER JOIN test_table1 ON test_table.shortF=test_table1.shortF"),
("SELECT test_table.intF as intF, test_table1.intF as intF1 from test_table INNER JOIN test_table1 ON test_table.intF=test_table1.intF", "test_table.intF, test_table1.intF INNER JOIN test_table1 ON test_table.intF=test_table1.intF"),
("SELECT test_table.longF as longF, test_table1.longF as longF1 from test_table INNER JOIN test_table1 ON test_table.longF=test_table1.longF", "test_table.longF, test_table1.longF INNER JOIN test_table1 ON test_table.longF=test_table1.longF"),
("SELECT test_table.floatF as floatF, test_table1.floatF as floatF1 from test_table INNER JOIN test_table1 ON test_table.floatF=test_table1.floatF", "test_table.floatF, test_table1.floatF INNER JOIN test_table1 ON test_table.floatF=test_table1.floatF"),
("SELECT test_table.doubleF as doubleF, test_table1.doubleF as doubleF1 from test_table INNER JOIN test_table1 ON test_table.doubleF=test_table1.doubleF", "test_table.doubleF, test_table1.doubleF INNER JOIN test_table1 ON test_table.doubleF=test_table1.doubleF"),
("SELECT test_table.booleanF as booleanF, test_table1.booleanF as booleanF1 from test_table INNER JOIN test_table1 ON test_table.booleanF=test_table1.booleanF", "test_table.booleanF, test_table1.booleanF INNER JOIN test_table1 ON test_table.booleanF=test_table1.booleanF"),
("SELECT test_table.strF as strF, test_table1.strF as strF1 from test_table INNER JOIN test_table1 ON test_table.strF=test_table1.strF", "test_table.strF, test_table1.strF INNER JOIN test_table1 ON test_table.strF=test_table1.strF"),
("SELECT test_table.dateF as dateF, test_table1.dateF as dateF1 from test_table INNER JOIN test_table1 ON test_table.dateF=test_table1.dateF", "test_table.dateF, test_table1.dateF INNER JOIN test_table1 ON test_table.dateF=test_table1.dateF"),
("SELECT test_table.timestampF as timestampF, test_table1.timestampF as timestampF1 from test_table INNER JOIN test_table1 ON test_table.timestampF=test_table1.timestampF", "test_table.timestampF, test_table1.timestampF INNER JOIN test_table1 ON test_table.timestampF=test_table1.timestampF"),
("SELECT test_table.byteF as byteF, test_table1.byteF as byteF1 from test_table LEFT JOIN test_table1 ON test_table.byteF=test_table1.byteF", "test_table.byteF, test_table1.byteF LEFT JOIN test_table1 ON test_table.byteF=test_table1.byteF"),
("SELECT test_table.shortF as shortF, test_table1.shortF as shortF1 from test_table LEFT JOIN test_table1 ON test_table.shortF=test_table1.shortF", "test_table.shortF, test_table1.shortF LEFT JOIN test_table1 ON test_table.shortF=test_table1.shortF"),
("SELECT test_table.intF as intF, test_table1.intF as intF1 from test_table LEFT JOIN test_table1 ON test_table.intF=test_table1.intF", "test_table.intF, test_table1.intF LEFT JOIN test_table1 ON test_table.intF=test_table1.intF"),
("SELECT test_table.longF as longF, test_table1.longF as longF1 from test_table LEFT JOIN test_table1 ON test_table.longF=test_table1.longF", "test_table.longF, test_table1.longF LEFT JOIN test_table1 ON test_table.longF=test_table1.longF"),
("SELECT test_table.floatF as floatF, test_table1.floatF as floatF1 from test_table LEFT JOIN test_table1 ON test_table.floatF=test_table1.floatF", "test_table.floatF, test_table1.floatF LEFT JOIN test_table1 ON test_table.floatF=test_table1.floatF"),
("SELECT test_table.doubleF as doubleF, test_table1.doubleF as doubleF1 from test_table LEFT JOIN test_table1 ON test_table.doubleF=test_table1.doubleF", "test_table.doubleF, test_table1.doubleF LEFT JOIN test_table1 ON test_table.doubleF=test_table1.doubleF"),
("SELECT test_table.booleanF as booleanF, test_table1.booleanF as booleanF1 from test_table LEFT JOIN test_table1 ON test_table.booleanF=test_table1.booleanF", "test_table.booleanF, test_table1.booleanF LEFT JOIN test_table1 ON test_table.booleanF=test_table1.booleanF"),
("SELECT test_table.strF as strF, test_table1.strF as strF1 from test_table LEFT JOIN test_table1 ON test_table.strF=test_table1.strF", "test_table.strF, test_table1.strF LEFT JOIN test_table1 ON test_table.strF=test_table1.strF"),
("SELECT test_table.dateF as dateF, test_table1.dateF as dateF1 from test_table LEFT JOIN test_table1 ON test_table.dateF=test_table1.dateF", "test_table.dateF, test_table1.dateF LEFT JOIN test_table1 ON test_table.dateF=test_table1.dateF"),
("SELECT test_table.timestampF as timestampF, test_table1.timestampF as timestampF1 from test_table LEFT JOIN test_table1 ON test_table.timestampF=test_table1.timestampF", "test_table.timestampF, test_table1.timestampF LEFT JOIN test_table1 ON test_table.timestampF=test_table1.timestampF"),
("SELECT test_table.byteF as byteF, test_table1.byteF as byteF1 from test_table RIGHT JOIN test_table1 ON test_table.byteF=test_table1.byteF", "test_table.byteF, test_table1.byteF RIGHT JOIN test_table1 ON test_table.byteF=test_table1.byteF"),
("SELECT test_table.shortF as shortF, test_table1.shortF as shortF1 from test_table RIGHT JOIN test_table1 ON test_table.shortF=test_table1.shortF", "test_table.shortF, test_table1.shortF RIGHT JOIN test_table1 ON test_table.shortF=test_table1.shortF"),
("SELECT test_table.intF as intF, test_table1.intF as intF1 from test_table RIGHT JOIN test_table1 ON test_table.intF=test_table1.intF", "test_table.intF, test_table1.intF RIGHT JOIN test_table1 ON test_table.intF=test_table1.intF"),
("SELECT test_table.longF as longF, test_table1.longF as longF1 from test_table RIGHT JOIN test_table1 ON test_table.longF=test_table1.longF", "test_table.longF, test_table1.longF RIGHT JOIN test_table1 ON test_table.longF=test_table1.longF"),
("SELECT test_table.floatF as floatF, test_table1.floatF as floatF1 from test_table RIGHT JOIN test_table1 ON test_table.floatF=test_table1.floatF", "test_table.floatF, test_table1.floatF RIGHT JOIN test_table1 ON test_table.floatF=test_table1.floatF"),
("SELECT test_table.doubleF as doubleF, test_table1.doubleF as doubleF1 from test_table RIGHT JOIN test_table1 ON test_table.doubleF=test_table1.doubleF", "test_table.doubleF, test_table1.doubleF RIGHT JOIN test_table1 ON test_table.doubleF=test_table1.doubleF"),
("SELECT test_table.booleanF as booleanF, test_table1.booleanF as booleanF1 from test_table RIGHT JOIN test_table1 ON test_table.booleanF=test_table1.booleanF", "test_table.booleanF, test_table1.booleanF RIGHT JOIN test_table1 ON test_table.booleanF=test_table1.booleanF"),
("SELECT test_table.strF as strF, test_table1.strF as strF1 from test_table RIGHT JOIN test_table1 ON test_table.strF=test_table1.strF", "test_table.strF, test_table1.strF RIGHT JOIN test_table1 ON test_table.strF=test_table1.strF"),
("SELECT test_table.dateF as dateF, test_table1.dateF as dateF1 from test_table RIGHT JOIN test_table1 ON test_table.dateF=test_table1.dateF", "test_table.dateF, test_table1.dateF RIGHT JOIN test_table1 ON test_table.dateF=test_table1.dateF"),
("SELECT test_table.timestampF as timestampF, test_table1.timestampF as timestampF1 from test_table RIGHT JOIN test_table1 ON test_table.timestampF=test_table1.timestampF", "test_table.timestampF, test_table1.timestampF RIGHT JOIN test_table1 ON test_table.timestampF=test_table1.timestampF"),
("SELECT test_table.byteF as byteF, test_table1.byteF as byteF1 from test_table FULL JOIN test_table1 ON test_table.byteF=test_table1.byteF", "test_table.byteF, test_table1.byteF FULL JOIN test_table1 ON test_table.byteF=test_table1.byteF"),
("SELECT test_table.shortF as shortF, test_table1.shortF as shortF1 from test_table FULL JOIN test_table1 ON test_table.shortF=test_table1.shortF", "test_table.shortF, test_table1.shortF FULL JOIN test_table1 ON test_table.shortF=test_table1.shortF"),
("SELECT test_table.intF as intF, test_table1.intF as intF1 from test_table FULL JOIN test_table1 ON test_table.intF=test_table1.intF", "test_table.intF, test_table1.intF FULL JOIN test_table1 ON test_table.intF=test_table1.intF"),
("SELECT test_table.longF as longF, test_table1.longF as longF1 from test_table FULL JOIN test_table1 ON test_table.longF=test_table1.longF", "test_table.longF, test_table1.longF FULL JOIN test_table1 ON test_table.longF=test_table1.longF"),
("SELECT test_table.floatF as floatF, test_table1.floatF as floatF1 from test_table FULL JOIN test_table1 ON test_table.floatF=test_table1.floatF", "test_table.floatF, test_table1.floatF FULL JOIN test_table1 ON test_table.floatF=test_table1.floatF"),
("SELECT test_table.doubleF as doubleF, test_table1.doubleF as doubleF1 from test_table FULL JOIN test_table1 ON test_table.doubleF=test_table1.doubleF", "test_table.doubleF, test_table1.doubleF FULL JOIN test_table1 ON test_table.doubleF=test_table1.doubleF"),
("SELECT test_table.booleanF as booleanF, test_table1.booleanF as booleanF1 from test_table FULL JOIN test_table1 ON test_table.booleanF=test_table1.booleanF", "test_table.booleanF, test_table1.booleanF FULL JOIN test_table1 ON test_table.booleanF=test_table1.booleanF"),
("SELECT test_table.strF as strF, test_table1.strF as strF1 from test_table FULL JOIN test_table1 ON test_table.strF=test_table1.strF", "test_table.strF, test_table1.strF FULL JOIN test_table1 ON test_table.strF=test_table1.strF"),
("SELECT test_table.dateF as dateF, test_table1.dateF as dateF1 from test_table FULL JOIN test_table1 ON test_table.dateF=test_table1.dateF", "test_table.dateF, test_table1.dateF FULL JOIN test_table1 ON test_table.dateF=test_table1.dateF"),
("SELECT test_table.timestampF as timestampF, test_table1.timestampF as timestampF1 from test_table FULL JOIN test_table1 ON test_table.timestampF=test_table1.timestampF", "test_table.timestampF, test_table1.timestampF FULL JOIN test_table1 ON test_table.timestampF=test_table1.timestampF")
]
SELECT_PRE_ORDER_SQL=[
("SELECT FIRST(byteF) as res FROM test_table GROUP BY intF", "FIRST(byteF) GROUP BY intF", "byteF"),
("SELECT FIRST(shortF) as res FROM test_table GROUP BY intF", "FIRST(shortF) GROUP BY intF", "shortF"),
("SELECT FIRST(intF) as res FROM test_table GROUP BY byteF", "FIRST(intF) GROUP BY byteF", "intF"),
("SELECT FIRST(longF) as res FROM test_table GROUP BY intF", "FIRST(longF) GROUP BY intF", "longF"),
("SELECT FIRST(floatF) as res FROM test_table GROUP BY intF", "FIRST(floatF) GROUP BY intF", "floatF"),
("SELECT FIRST(doubleF) as res FROM test_table GROUP BY intF", "FIRST(doubleF) GROUP BY intF", "doubleF"),
("SELECT FIRST(booleanF) as res FROM test_table GROUP BY intF", "FIRST(booleanF) GROUP BY intF", "booleanF"),
("SELECT FIRST(dateF) as res FROM test_table GROUP BY intF", "FIRST(dateF) GROUP BY intF", "dateF"),
("SELECT FIRST(timestampF) as res FROM test_table GROUP BY intF", "FIRST(timestampF) GROUP BY intF", "timestampF"),
("SELECT FIRST(byteF) as res FROM test_table GROUP BY intF, shortF", "FIRST(byteF) GROUP BY intF, shortF", "byteF"),
("SELECT FIRST(shortF) as res FROM test_table GROUP BY intF, byteF", "FIRST(shortF) GROUP BY intF, byteF", "shortF"),
("SELECT LAST(byteF) as res FROM test_table GROUP BY intF", "LAST(byteF) GROUP BY intF", "byteF"),
("SELECT LAST(shortF) as res FROM test_table GROUP BY intF", "LAST(shortF) GROUP BY intF", "shortF"),
("SELECT LAST(intF) as res FROM test_table GROUP BY byteF", "LAST(intF) GROUP BY byteF", "intF"),
("SELECT LAST(longF) as res FROM test_table GROUP BY intF", "LAST(longF) GROUP BY intF", "longF"),
("SELECT LAST(floatF) as res FROM test_table GROUP BY intF", "LAST(floatF) GROUP BY intF", "floatF"),
("SELECT LAST(doubleF) as res FROM test_table GROUP BY intF", "LAST(doubleF) GROUP BY intF", "doubleF"),
("SELECT LAST(booleanF) as res FROM test_table GROUP BY intF", "LAST(booleanF) GROUP BY intF", "booleanF"),
("SELECT LAST(dateF) as res FROM test_table GROUP BY intF", "LAST(dateF) GROUP BY intF", "dateF"),
("SELECT LAST(timestampF) as res FROM test_table GROUP BY intF", "LAST(timestampF) GROUP BY intF", "timestampF"),
("SELECT byteF, SUM(byteF) OVER (PARTITION BY shortF ORDER BY intF ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING ) as res FROM test_table", "byteF, SUM(byteF) OVER (PARTITION BY shortF ORDER BY intF ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING ) as res", "byteF"),
("SELECT SUM(intF) OVER (PARTITION BY byteF ORDER BY byteF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as res FROM test_table", "SUM(intF) OVER (PARTITION BY byteF ORDER BY byteF ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as res", "intF"),
# Aggregations with variable width outputs, like strings, are done using a sort aggregation on the CPU
# There are a number of issues related to this and getting the GPU to match. If either of these
# queries fail it is likely related to sorting in spark, and there may not be a lot that we can
# do to fix this.
("SELECT LAST(strF) as res FROM test_table GROUP BY intF", "LAST(strF) GROUP BY intF", "strF"),
("SELECT FIRST(strF) as res FROM test_table GROUP BY intF", "FIRST(strF) GROUP BY intF", "strF"),
]
'''
("SELECT LAST(byteF) FROM test_table", "LAST(byteF)"),
("SELECT LAST(shortF) FROM test_table", "LAST(shortF)"),
("SELECT LAST(intF) FROM test_table", "LAST(intF)"),
("SELECT LAST(longF) FROM test_table", "LAST(longF)"),
("SELECT LAST(floatF) FROM test_table", "LAST(floatF)"),
("SELECT LAST(doubleF) FROM test_table", "LAST(doubleF)"),
("SELECT LAST(booleanF) FROM test_table", "LAST(booleanF)"),
("SELECT LAST(strF) FROM test_table", "LAST(strF)"),
("SELECT LAST(dateF) FROM test_table", "LAST(dateF)"),
("SELECT LAST(timestampF) FROM test_table", "LAST(timestampF)"),
("SELECT FIRST(byteF) FROM test_table", "FIRST(byteF)"),
("SELECT FIRST(shortF) FROM test_table", "FIRST(shortF)"),
("SELECT FIRST(intF) FROM test_table", "FIRST(intF)"),
("SELECT FIRST(longF) FROM test_table", "FIRST(longF)"),
("SELECT FIRST(floatF) FROM test_table", "FIRST(floatF)"),
("SELECT FIRST(doubleF) FROM test_table", "FIRST(doubleF)"),
("SELECT FIRST(booleanF) FROM test_table", "FIRST(booleanF)"),
("SELECT FIRST(strF) FROM test_table", "FIRST(strF)"),
("SELECT FIRST(dateF) FROM test_table", "FIRST(dateF)"),
("SELECT FIRST(timestampF) FROM test_table", "FIRST(timestampF)"),
'''
SELECT_FLOAT_SQL=[
("SELECT IFNULL(floatF, 0) as if_null FROM test_table", "IFNULL(floatF, 0)"),
("SELECT floatF, COALESCE(floatF, 0) FROM test_table", "floatF, COALESCE(floatF,0)"),
]
SELECT_REGEXP_SQL=[
("SELECT REGEXP_REPLACE(strF, 'Yu', 'Eric') FROM test_table", "REGEXP_REPLACE(strF, 'Yu', 'Eric')"),
] | spark-rapids-branch-23.10 | integration_tests/src/main/python/qa_nightly_sql.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import locale
import pytest
from asserts import assert_gpu_fallback_collect
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import is_jvm_charset_utf8
if is_jvm_charset_utf8():
pytestmark = pytest.mark.skip(reason=str("Current locale uses UTF-8, fallback will not occur"))
_regexp_conf = { 'spark.rapids.sql.regexp.enabled': 'true' }
def mk_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
@allow_non_gpu('ProjectExec', 'RLike')
def test_rlike_no_unicode_fallback():
gen = mk_str_gen('[abcd]{1,3}')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "ab+"'),
'RLike',
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RegExpReplace')
def test_re_replace_no_unicode_fallback():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "[A-Z]+", "PROD")'),
'RegExpReplace',
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'StringSplit')
def test_split_re_no_unicode_fallback():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[o]", 2)'),
'StringSplit',
conf=_regexp_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/regexp_no_unicode_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_fallback_collect
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import is_hive_available, is_spark_330_or_later, with_cpu_session
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.skipif(not (is_hive_available() and is_spark_330_or_later()), reason="Must have Hive on Spark 3.3+")
@pytest.mark.parametrize('fileFormat', ['parquet', 'orc'])
def test_write_hive_bucketed_table_fallback(spark_tmp_table_factory, fileFormat):
"""
fallback because GPU does not support Hive hash partition
"""
table = spark_tmp_table_factory.get()
def create_hive_table(spark):
spark.sql("""create table {0} (a bigint, b bigint, c bigint)
stored as {1}
clustered by (b) into 3 buckets""".format(table, fileFormat))
return None
conf = {"hive.enforce.bucketing": "true",
"hive.exec.dynamic.partition": "true",
"hive.exec.dynamic.partition.mode": "nonstrict"}
with_cpu_session(create_hive_table, conf = conf)
assert_gpu_fallback_collect(
lambda spark: spark.sql("insert into {} values (1, 2, 3)".format(table)),
'DataWritingCommandExec',
conf = conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/datasourcev2_write_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from spark_session import with_spark_session, is_spark_350_or_later
from conftest import skip_unless_precommit_tests
def drop_udf(spark, udfname):
spark.sql("DROP TEMPORARY FUNCTION IF EXISTS {}".format(udfname))
def skip_if_no_hive(spark):
if spark.conf.get("spark.sql.catalogImplementation") != "hive":
skip_unless_precommit_tests('The Spark session does not have Hive support')
def load_hive_udf(spark, udfname, udfclass):
drop_udf(spark, udfname)
# if UDF failed to load, throws AnalysisException, check if the udf class is in the class path
spark.sql("CREATE TEMPORARY FUNCTION {} AS '{}'".format(udfname, udfclass))
@pytest.mark.xfail(condition=is_spark_350_or_later(),
reason='https://github.com/NVIDIA/spark-rapids/issues/9064')
def test_hive_empty_simple_udf():
with_spark_session(skip_if_no_hive)
data_gens = [["i", int_gen], ["s", string_gen]]
def evalfn(spark):
load_hive_udf(spark, "emptysimple", "com.nvidia.spark.rapids.tests.udf.hive.EmptyHiveSimpleUDF")
return gen_df(spark, data_gens)
assert_gpu_and_cpu_are_equal_sql(
evalfn,
"hive_simple_udf_test_table",
"SELECT i, emptysimple(s, 'const_string') FROM hive_simple_udf_test_table",
conf={'spark.rapids.sql.rowBasedUDF.enabled': 'true'})
def test_hive_empty_generic_udf():
with_spark_session(skip_if_no_hive)
def evalfn(spark):
load_hive_udf(spark, "emptygeneric", "com.nvidia.spark.rapids.tests.udf.hive.EmptyHiveGenericUDF")
return gen_df(spark, [["s", string_gen]])
assert_gpu_and_cpu_are_equal_sql(
evalfn,
"hive_generic_udf_test_table",
"SELECT emptygeneric(s) FROM hive_generic_udf_test_table",
conf={'spark.rapids.sql.rowBasedUDF.enabled': 'true'})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/row-based_udf_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyspark.sql.functions as f
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error, assert_gpu_and_cpu_row_counts_equal, assert_gpu_fallback_collect
from data_gen import *
from conftest import is_databricks_runtime
from marks import approximate_float, allow_non_gpu, ignore_order
from spark_session import with_cpu_session, with_gpu_session, is_before_spark_330
json_supported_gens = [
# Spark does not escape '\r' or '\n' even though it uses it to mark end of record
# This would require multiLine reads to work correctly, so we avoid these chars
StringGen('(\\w| |\t|\ud720){0,10}', nullable=False),
StringGen('[aAbB ]{0,10}'),
StringGen('[nN][aA][nN]'),
StringGen('[+-]?[iI][nN][fF]([iI][nN][iI][tT][yY])?'),
byte_gen, short_gen, int_gen, long_gen, boolean_gen,
pytest.param(double_gen),
pytest.param(FloatGen(no_nans=False)),
pytest.param(float_gen),
DoubleGen(no_nans=False)
]
_enable_all_types_conf = {
'spark.rapids.sql.format.json.enabled': 'true',
'spark.rapids.sql.format.json.read.enabled': 'true',
'spark.rapids.sql.json.read.float.enabled': 'true',
'spark.rapids.sql.json.read.double.enabled': 'true',
'spark.rapids.sql.json.read.decimal.enabled': 'true'
}
_bool_schema = StructType([
StructField('number', BooleanType())])
_byte_schema = StructType([
StructField('number', ByteType())])
_short_schema = StructType([
StructField('number', ShortType())])
_int_schema = StructType([
StructField('number', IntegerType())])
_long_schema = StructType([
StructField('number', LongType())])
_float_schema = StructType([
StructField('number', FloatType())])
_double_schema = StructType([
StructField('number', DoubleType())])
_decimal_10_2_schema = StructType([
StructField('number', DecimalType(10, 2))])
_decimal_10_3_schema = StructType([
StructField('number', DecimalType(10, 3))])
_date_schema = StructType([
StructField('number', DateType())])
_timestamp_schema = StructType([
StructField('number', TimestampType())])
_string_schema = StructType([
StructField('a', StringType())])
def read_json_df(data_path, schema, spark_tmp_table_factory_ignored, options = {}):
def read_impl(spark):
reader = spark.read
if not schema is None:
reader = reader.schema(schema)
for key, value in options.items():
reader = reader.option(key, value)
return debug_df(reader.json(data_path))
return read_impl
def read_json_sql(data_path, schema, spark_tmp_table_factory, options = {}):
opts = options
if not schema is None:
opts = copy_and_update(options, {'schema': schema})
def read_impl(spark):
tmp_name = spark_tmp_table_factory.get()
return spark.catalog.createTable(tmp_name, source='json', path=data_path, **opts)
return read_impl
@approximate_float
@pytest.mark.parametrize('data_gen', [
StringGen('(\\w| |\t|\ud720){0,10}', nullable=False),
StringGen('[aAbB ]{0,10}'),
byte_gen, short_gen, int_gen, long_gen, boolean_gen,], ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
@allow_non_gpu('FileSourceScanExec')
def test_json_infer_schema_round_trip(spark_tmp_path, data_gen, v1_enabled_list):
gen = StructGen([('a', data_gen)], nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark : gen_df(spark, gen).write.json(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.json(data_path),
conf=updated_conf)
@approximate_float
@pytest.mark.parametrize('data_gen', json_supported_gens, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
def test_json_round_trip(spark_tmp_path, data_gen, v1_enabled_list):
gen = StructGen([('a', data_gen)], nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark : gen_df(spark, gen).write.json(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(schema).json(data_path),
conf=updated_conf)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
def test_json_input_meta(spark_tmp_path, v1_enabled_list):
gen = StructGen([('a', long_gen), ('b', long_gen), ('c', long_gen)], nullable=False)
first_data_path = spark_tmp_path + '/JSON_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.json(first_data_path))
second_data_path = spark_tmp_path + '/JSON_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.json(second_data_path))
data_path = spark_tmp_path + '/JSON_DATA'
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(gen.data_type)
.json(data_path)
.filter(f.col('b') > 0)
.selectExpr('b',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=updated_conf)
json_supported_date_formats = ['yyyy-MM-dd', 'yyyy/MM/dd', 'yyyy-MM', 'yyyy/MM',
'MM-yyyy', 'MM/yyyy', 'MM-dd-yyyy', 'MM/dd/yyyy', 'dd-MM-yyyy', 'dd/MM/yyyy']
@pytest.mark.parametrize('date_format', json_supported_date_formats, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
def test_json_date_formats_round_trip(spark_tmp_path, date_format, v1_enabled_list):
gen = StructGen([('a', DateGen())], nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark : gen_df(spark, gen).write\
.option('dateFormat', date_format)\
.json(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read\
.schema(schema)\
.option('dateFormat', date_format)\
.json(data_path),
conf=updated_conf)
json_supported_ts_parts = ['', # Just the date
"'T'HH:mm:ss.SSSXXX",
"'T'HH:mm:ss[.SSS][XXX]",
"'T'HH:mm:ss.SSS",
"'T'HH:mm:ss[.SSS]",
"'T'HH:mm:ss",
"'T'HH:mm[:ss]",
"'T'HH:mm"]
@pytest.mark.parametrize('ts_part', json_supported_ts_parts)
@pytest.mark.parametrize('date_format', json_supported_date_formats)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
def test_json_ts_formats_round_trip(spark_tmp_path, date_format, ts_part, v1_enabled_list):
full_format = date_format + ts_part
data_gen = TimestampGen()
gen = StructGen([('a', data_gen)], nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
schema = gen.data_type
with_cpu_session(
lambda spark : gen_df(spark, gen).write\
.option('timestampFormat', full_format)\
.json(data_path))
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read\
.schema(schema)\
.option('timestampFormat', full_format)\
.json(data_path),
conf=updated_conf)
@approximate_float
@pytest.mark.parametrize('filename', [
'boolean.json',
pytest.param('boolean_invalid.json', marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/4779')),
'ints.json',
pytest.param('ints_invalid.json', marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/4793')),
'nan_and_inf.json',
pytest.param('nan_and_inf_strings.json', marks=pytest.mark.skipif(is_before_spark_330(), reason='https://issues.apache.org/jira/browse/SPARK-38060 fixed in Spark 3.3.0')),
'nan_and_inf_invalid.json',
'floats.json',
'floats_leading_zeros.json',
'floats_invalid.json',
pytest.param('floats_edge_cases.json', marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/4647')),
'decimals.json',
'dates.json',
'dates_invalid.json',
])
@pytest.mark.parametrize('schema', [_bool_schema, _byte_schema, _short_schema, _int_schema, _long_schema, \
_float_schema, _double_schema, _decimal_10_2_schema, _decimal_10_3_schema, \
_date_schema])
@pytest.mark.parametrize('read_func', [read_json_df, read_json_sql])
@pytest.mark.parametrize('allow_non_numeric_numbers', ["true", "false"])
@pytest.mark.parametrize('allow_numeric_leading_zeros', ["true"])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
def test_basic_json_read(std_input_path, filename, schema, read_func, allow_non_numeric_numbers, allow_numeric_leading_zeros, ansi_enabled, spark_tmp_table_factory):
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': 'CORRECTED'})
assert_gpu_and_cpu_are_equal_collect(
read_func(std_input_path + '/' + filename,
schema,
spark_tmp_table_factory,
{ "allowNonNumericNumbers": allow_non_numeric_numbers,
"allowNumericLeadingZeros": allow_numeric_leading_zeros}),
conf=updated_conf)
@approximate_float
@pytest.mark.parametrize('filename', [
'dates.json',
])
@pytest.mark.parametrize('schema', [_date_schema])
@pytest.mark.parametrize('read_func', [read_json_df, read_json_sql])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
@pytest.mark.parametrize('time_parser_policy', [
pytest.param('LEGACY', marks=pytest.mark.allow_non_gpu('FileSourceScanExec')),
'CORRECTED',
'EXCEPTION'
])
def test_json_read_valid_dates(std_input_path, filename, schema, read_func, ansi_enabled, time_parser_policy, spark_tmp_table_factory):
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': time_parser_policy})
f = read_func(std_input_path + '/' + filename, schema, spark_tmp_table_factory, {})
if time_parser_policy == 'LEGACY' and ansi_enabled == 'true':
assert_gpu_fallback_collect(
f,
'FileSourceScanExec',
conf=updated_conf)
else:
assert_gpu_and_cpu_are_equal_collect(f, conf=updated_conf)
@approximate_float
@pytest.mark.parametrize('filename', [
'dates_invalid.json',
])
@pytest.mark.parametrize('schema', [_date_schema])
@pytest.mark.parametrize('read_func', [read_json_df, read_json_sql])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
@pytest.mark.parametrize('time_parser_policy', [
pytest.param('LEGACY', marks=pytest.mark.allow_non_gpu('FileSourceScanExec')),
'CORRECTED',
'EXCEPTION'
])
def test_json_read_invalid_dates(std_input_path, filename, schema, read_func, ansi_enabled, time_parser_policy, spark_tmp_table_factory):
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': time_parser_policy })
f = read_func(std_input_path + '/' + filename, schema, spark_tmp_table_factory, {})
if time_parser_policy == 'EXCEPTION':
assert_gpu_and_cpu_error(
df_fun=lambda spark: f(spark).collect(),
conf=updated_conf,
error_message='DateTimeException')
elif time_parser_policy == 'LEGACY' and ansi_enabled == 'true':
assert_gpu_fallback_collect(
f,
'FileSourceScanExec',
conf=updated_conf)
else:
assert_gpu_and_cpu_are_equal_collect(f, conf=updated_conf)
@approximate_float
@pytest.mark.parametrize('filename', [
'timestamps.json',
])
@pytest.mark.parametrize('schema', [_timestamp_schema])
@pytest.mark.parametrize('read_func', [read_json_df, read_json_sql])
@pytest.mark.parametrize('ansi_enabled', ["true", "false"])
@pytest.mark.parametrize('time_parser_policy', [
pytest.param('LEGACY', marks=pytest.mark.allow_non_gpu('FileSourceScanExec')),
'CORRECTED',
'EXCEPTION'
])
def test_json_read_valid_timestamps(std_input_path, filename, schema, read_func, ansi_enabled, time_parser_policy, \
spark_tmp_table_factory):
updated_conf = copy_and_update(_enable_all_types_conf,
{'spark.sql.ansi.enabled': ansi_enabled,
'spark.sql.legacy.timeParserPolicy': time_parser_policy})
f = read_func(std_input_path + '/' + filename, schema, spark_tmp_table_factory, {})
assert_gpu_and_cpu_are_equal_collect(f, conf=updated_conf)
@pytest.mark.parametrize('schema', [_string_schema])
@pytest.mark.parametrize('read_func', [read_json_df, read_json_sql])
@pytest.mark.parametrize('allow_unquoted_chars', ["true"])
@pytest.mark.parametrize('filename', ['unquotedChars.json'])
def test_json_unquotedCharacters(std_input_path, filename, schema, read_func, allow_unquoted_chars, spark_tmp_table_factory):
assert_gpu_and_cpu_are_equal_collect(
read_func(std_input_path + '/' + filename,
schema,
spark_tmp_table_factory,
{"allowUnquotedControlChars": allow_unquoted_chars}),
conf=_enable_all_types_conf)
@ignore_order
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
@pytest.mark.skipif(is_databricks_runtime(), reason="Databricks does not support ignoreCorruptFiles")
def test_json_read_with_corrupt_files(spark_tmp_path, v1_enabled_list):
first_data_path = spark_tmp_path + '/JSON_DATA/first'
with_cpu_session(lambda spark : spark.range(1).toDF("a").write.json(first_data_path))
second_data_path = spark_tmp_path + '/JSON_DATA/second'
with_cpu_session(lambda spark : spark.range(1, 2).toDF("a").write.orc(second_data_path))
third_data_path = spark_tmp_path + '/JSON_DATA/third'
with_cpu_session(lambda spark : spark.range(2, 3).toDF("a").write.json(third_data_path))
all_confs = copy_and_update(_enable_all_types_conf,
{'spark.sql.files.ignoreCorruptFiles': "true",
'spark.sql.sources.useV1SourceList': v1_enabled_list})
schema = StructType([StructField("a", IntegerType())])
# when ignoreCorruptFiles is enabled, gpu reading should not throw exception, while CPU can successfully
# read the three files without ignore corrupt files. So we just check if GPU will throw exception.
with_gpu_session(
lambda spark : spark.read.schema(schema)
.json([first_data_path, second_data_path, third_data_path])
.collect(),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
def test_json_read_count(spark_tmp_path, v1_enabled_list):
gen_list = [byte_gen, short_gen, int_gen, long_gen, boolean_gen]
gen = StructGen([('_c' + str(i), gen) for i, gen in enumerate(gen_list)], nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
schema = gen.data_type
updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_cpu_session(
lambda spark : gen_df(spark, gen).write.json(data_path))
assert_gpu_and_cpu_row_counts_equal(
lambda spark : spark.read.schema(schema).json(data_path),
conf=updated_conf)
def test_from_json_map():
# The test here is working around some inconsistencies in how the keys are parsed for maps
# on the GPU the keys are dense, but on the CPU they are sparse
json_string_gen = StringGen(r'{"a": "[0-9]{0,5}"(, "b": "[A-Z]{0,5}")?}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json(f.col('a'), 'MAP<STRING,STRING>')),
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@allow_non_gpu('ProjectExec', 'JsonToStructs')
def test_from_json_map_fallback():
# The test here is working around some inconsistencies in how the keys are parsed for maps
# on the GPU the keys are dense, but on the CPU they are sparse
json_string_gen = StringGen(r'{"a": \d\d}')
assert_gpu_fallback_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json(f.col('a'), 'MAP<STRING,INT>')),
'JsonToStructs',
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/8558')
@pytest.mark.parametrize('schema', ['struct<a:string>',
'struct<d:string>',
'struct<a:string,b:string>',
'struct<c:int,a:string>',
'struct<a:string,a:string>',
])
def test_from_json_struct(schema):
json_string_gen = StringGen(r'{"a": "[0-9]{0,5}", "b": "[A-Z]{0,5}", "c": 1\d\d\d}').with_special_pattern('', weight=50)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json('a', schema)),
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/8558')
@pytest.mark.parametrize('schema', ['struct<teacher:string>',
'struct<student:struct<name:string,age:int>>',
'struct<teacher:string,student:struct<name:string,age:int>>'])
def test_from_json_struct_of_struct(schema):
json_string_gen = StringGen(r'{"teacher": "[A-Z]{1}[a-z]{2,5}",' \
r'"student": {"name": "[A-Z]{1}[a-z]{2,5}", "age": 1\d}}').with_special_pattern('', weight=50)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json('a', schema)),
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/8558')
@pytest.mark.parametrize('schema', ['struct<teacher:string>',
'struct<student:array<struct<name:string,class:string>>>',
'struct<teacher:string,student:array<struct<name:string,class:string>>>'])
def test_from_json_struct_of_list(schema):
json_string_gen = StringGen(r'{"teacher": "[A-Z]{1}[a-z]{2,5}",' \
r'"student": \[{"name": "[A-Z]{1}[a-z]{2,5}", "class": "junior"},' \
r'{"name": "[A-Z]{1}[a-z]{2,5}", "class": "freshman"}\]}').with_special_pattern('', weight=50)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json('a', schema)),
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/8558')
@pytest.mark.parametrize('schema', ['struct<a:string>', 'struct<a:string,b:int>'])
def test_from_json_struct_all_empty_string_input(schema):
json_string_gen = StringGen('')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, json_string_gen) \
.select(f.from_json('a', schema)),
conf={"spark.rapids.sql.expression.JsonToStructs": True})
@allow_non_gpu('FileSourceScanExec')
@pytest.mark.skipif(is_before_spark_340(), reason='enableDateTimeParsingFallback is supported from Spark3.4.0')
@pytest.mark.parametrize('filename,schema', [("dates.json", _date_schema),("dates.json", _timestamp_schema),
("timestamps.json", _timestamp_schema)])
def test_json_datetime_parsing_fallback_cpu_fallback(std_input_path, filename, schema):
data_path = std_input_path + "/" + filename
assert_gpu_fallback_collect(
lambda spark : spark.read.schema(schema).option('enableDateTimeParsingFallback', "true").json(data_path),
'FileSourceScanExec',
conf=_enable_all_types_conf)
@pytest.mark.skipif(is_before_spark_340(), reason='enableDateTimeParsingFallback is supported from Spark3.4.0')
@pytest.mark.parametrize('filename,schema', [("ints.json", _int_schema)])
def test_json_datetime_parsing_fallback_no_datetime(std_input_path, filename, schema):
data_path = std_input_path + "/" + filename
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(schema).option('enableDateTimeParsingFallback', "true").json(data_path),
conf=_enable_all_types_conf)
@pytest.mark.skip(reason=str("https://github.com/NVIDIA/spark-rapids/issues/8403"))
@pytest.mark.parametrize('v1_enabled_list', ["", "json"])
@pytest.mark.parametrize('col_name', ['K0', 'k0', 'K3', 'k3', 'V0', 'v0'], ids=idfn)
@ignore_order
def test_read_case_col_name(spark_tmp_path, v1_enabled_list, col_name):
all_confs = {'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.rapids.sql.format.json.read.enabled': True,
'spark.rapids.sql.format.json.enabled': True}
gen_list =[('k0', LongGen(nullable=False, min_val=0, max_val=0)),
('k1', LongGen(nullable=False, min_val=1, max_val=1)),
('k2', LongGen(nullable=False, min_val=2, max_val=2)),
('k3', LongGen(nullable=False, min_val=3, max_val=3)),
('v0', LongGen()),
('v1', LongGen()),
('v2', LongGen()),
('v3', LongGen())]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/JSON_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen).write.partitionBy('k0', 'k1', 'k2', 'k3').json(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(gen.data_type).json(data_path).selectExpr(col_name),
conf=all_confs)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/json_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql, assert_gpu_and_cpu_error, assert_gpu_fallback_collect
from data_gen import *
from conftest import is_databricks_runtime
from marks import incompat
from spark_session import is_before_spark_313, is_before_spark_330, is_databricks113_or_later, is_spark_330_or_later, is_databricks104_or_later, is_spark_33X, is_spark_340_or_later, is_spark_330, is_spark_330cdh
from pyspark.sql.types import *
from pyspark.sql.types import IntegralType
from pyspark.sql.functions import array_contains, col, element_at, lit, array
# max_val is a little larger than the default max size(20) of ArrayGen
# so we can get the out-of-bound indices.
array_neg_index_gen = IntegerGen(min_val=-25, max_val=-1, special_cases=[None])
array_out_index_gen = IntegerGen(min_val=25, max_val=100, special_cases=[None])
array_zero_index_gen = IntegerGen(min_val=0, max_val=0, special_cases=[])
array_no_zero_index_gen = IntegerGen(min_val=1, max_val=25,
special_cases=[(-25, 100), (-20, 100), (-10, 100), (-4, 100), (-3, 100), (-2, 100), (-1, 100), (None, 100)])
array_all_null_gen = ArrayGen(int_gen, all_null=True)
array_item_test_gens = array_gens_sample + [array_all_null_gen,
ArrayGen(MapGen(StringGen(pattern='key_[0-9]', nullable=False), StringGen(), max_length=10), max_length=10),
ArrayGen(BinaryGen(max_length=10), max_length=10)]
# Need these for set-based operations
# See https://issues.apache.org/jira/browse/SPARK-39845
_non_neg_zero_float_special_cases = [
FLOAT_MIN,
FLOAT_MAX,
-1.0,
1.0,
0.0,
float('inf'),
float('-inf'),
float('nan'),
NEG_FLOAT_NAN_MAX_VALUE
]
_non_neg_zero_double_special_cases = [
DoubleGen.make_from(1, DOUBLE_MAX_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(0, DOUBLE_MAX_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(1, DOUBLE_MIN_EXP, DOUBLE_MAX_FRACTION),
DoubleGen.make_from(0, DOUBLE_MIN_EXP, DOUBLE_MAX_FRACTION),
-1.0,
1.0,
0.0,
float('inf'),
float('-inf'),
float('nan'),
NEG_DOUBLE_NAN_MAX_VALUE
]
no_neg_zero_all_basic_gens = [byte_gen, short_gen, int_gen, long_gen,
# -0.0 cannot work because of -0.0 == 0.0 in cudf for distinct
# but nans and other default special cases do work
FloatGen(special_cases=_non_neg_zero_float_special_cases),
DoubleGen(special_cases=_non_neg_zero_double_special_cases),
string_gen, boolean_gen, date_gen, timestamp_gen]
no_neg_zero_all_basic_gens_no_nulls = [StringGen(nullable=False), ByteGen(nullable=False),
ShortGen(nullable=False), IntegerGen(nullable=False), LongGen(nullable=False),
BooleanGen(nullable=False), DateGen(nullable=False), TimestampGen(nullable=False),
FloatGen(special_cases=_non_neg_zero_float_special_cases, nullable=False),
DoubleGen(special_cases=_non_neg_zero_double_special_cases, nullable=False)]
decimal_gens_no_nulls = [DecimalGen(precision=7, scale=3, nullable=False),
DecimalGen(precision=12, scale=2, nullable=False),
DecimalGen(precision=20, scale=2, nullable=False)]
no_neg_zero_all_basic_gens_no_nans = [byte_gen, short_gen, int_gen, long_gen,
# -0.0 cannot work because of -0.0 == 0.0 in cudf for distinct
FloatGen(special_cases=[], no_nans=True),
DoubleGen(special_cases=[], no_nans=True),
string_gen, boolean_gen, date_gen, timestamp_gen]
byte_array_index_gen = ByteGen(min_val=-25, max_val=25, special_cases=[None])
short_array_index_gen = ShortGen(min_val=-25, max_val=25, special_cases=[None])
int_array_index_gen = IntegerGen(min_val=-25, max_val=25, special_cases=[None])
# include special case indexes that should be valid indexes after the long is truncated.
# this is becaue Spark will truncarte the long to an int, and we want to catch if it ever changes
# -4294967286 is 0xFFFFFFFF0000000A, but python does not translate it the same way as scala does
# so I had to write it out manually
long_array_index_gen = LongGen(min_val=-25, max_val=25, special_cases=[0x1111111100000000, -4294967286])
array_index_gens = [byte_array_index_gen, short_array_index_gen, int_array_index_gen, long_array_index_gen]
@pytest.mark.parametrize('data_gen', array_item_test_gens, ids=idfn)
@pytest.mark.parametrize('index_gen', array_index_gens, ids=idfn)
def test_array_item(data_gen, index_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, index_gen).selectExpr('a[b]'))
@pytest.mark.parametrize('data_gen', array_item_test_gens, ids=idfn)
def test_array_item_lit_ordinal(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'a[CAST(0 as BYTE)]',
'a[CAST(1 as SHORT)]',
'a[null]',
'a[3]',
'a[CAST(50 as LONG)]',
'a[-1]',
'a[2147483648]',
'a[-2147483648]'))
# No need to test this for multiple data types for array. Only one is enough
@pytest.mark.skipif(not is_spark_33X() or is_databricks_runtime(), reason="'strictIndexOperator' is introduced from Spark 3.3.0 and removed in Spark 3.4.0 and DB11.3")
@pytest.mark.parametrize('strict_index_enabled', [True, False])
@pytest.mark.parametrize('index', [-2, 100, array_neg_index_gen, array_out_index_gen], ids=idfn)
def test_array_item_with_strict_index(strict_index_enabled, index):
message = "SparkArrayIndexOutOfBoundsException"
if isinstance(index, int):
test_df = lambda spark: unary_op_df(spark, ArrayGen(int_gen)).select(col('a')[index])
else:
test_df = lambda spark: two_col_df(spark, ArrayGen(int_gen), index).selectExpr('a[b]')
test_conf = copy_and_update(ansi_enabled_conf, {'spark.sql.ansi.strictIndexOperator': strict_index_enabled})
if strict_index_enabled:
assert_gpu_and_cpu_error(
lambda spark: test_df(spark).collect(),
conf=test_conf,
error_message=message)
else:
assert_gpu_and_cpu_are_equal_collect(
test_df,
conf=test_conf)
# No need to test this for multiple data types for array. Only one is enough, but with two kinds of invalid index.
@pytest.mark.parametrize('index', [-2, 100, array_neg_index_gen, array_out_index_gen], ids=idfn)
def test_array_item_ansi_fail_invalid_index(index):
message = "SparkArrayIndexOutOfBoundsException" if (is_databricks104_or_later() or is_spark_330_or_later()) else "java.lang.ArrayIndexOutOfBoundsException"
if isinstance(index, int):
test_func = lambda spark: unary_op_df(spark, ArrayGen(int_gen)).select(col('a')[index]).collect()
else:
test_func = lambda spark: two_col_df(spark, ArrayGen(int_gen), index).selectExpr('a[b]').collect()
assert_gpu_and_cpu_error(
test_func,
conf=ansi_enabled_conf,
error_message=message)
def test_array_item_ansi_not_fail_all_null_data():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, array_all_null_gen, array_neg_index_gen, array_out_index_gen).selectExpr(
'a[100]',
'a[-2]',
'a[b]',
'a[c]'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('data_gen', all_basic_gens + [
decimal_gen_32bit, decimal_gen_64bit, decimal_gen_128bit, binary_gen,
StructGen([['child0', StructGen([['child01', IntegerGen()]])], ['child1', string_gen], ['child2', float_gen]], nullable=False),
StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]], nullable=False)], ids=idfn)
def test_make_array(data_gen):
(s1, s2) = gen_scalars_for_sql(data_gen, 2, force_no_nulls=not isinstance(data_gen, NullGen))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'array(null)',
'array(a, b)',
'array(b, a, null, {}, {})'.format(s1, s2),
'array(array(b, a, null, {}, {}), array(a), array(null))'.format(s1, s2)))
@pytest.mark.parametrize('data_gen', single_level_array_gens, ids=idfn)
def test_orderby_array_unique(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : append_unique_int_col_to_df(spark, unary_op_df(spark, data_gen)),
'array_table',
'select array_table.a, array_table.uniq_int from array_table order by uniq_int')
@pytest.mark.parametrize('data_gen', [ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10)], ids=idfn)
def test_orderby_array_of_arrays(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : append_unique_int_col_to_df(spark, unary_op_df(spark, data_gen)),
'array_table',
'select array_table.a, array_table.uniq_int from array_table order by uniq_int')
@pytest.mark.parametrize('data_gen', [ArrayGen(StructGen([['child0', byte_gen],
['child1', string_gen],
['child2', float_gen]]))], ids=idfn)
def test_orderby_array_of_structs(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : append_unique_int_col_to_df(spark, unary_op_df(spark, data_gen)),
'array_table',
'select array_table.a, array_table.uniq_int from array_table order by uniq_int')
@pytest.mark.parametrize('data_gen', [byte_gen, short_gen, int_gen, long_gen,
float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen], ids=idfn)
def test_array_contains(data_gen):
arr_gen = ArrayGen(data_gen)
literal = gen_scalar(data_gen, force_no_nulls=True)
def get_input(spark):
return two_col_df(spark, arr_gen, data_gen)
assert_gpu_and_cpu_are_equal_collect(lambda spark: get_input(spark).select(
array_contains(array(lit(None)), col('b')),
array_contains(array(), col('b')),
array_contains(array(lit(literal), lit(literal)), col('b')),
array_contains(col('a'), literal.cast(data_gen.data_type)),
array_contains(col('a'), col('b')),
array_contains(col('a'), col('a')[5])))
@pytest.mark.parametrize('data_gen',
[FloatGen(special_cases=[(float('nan'), 20)]),
DoubleGen(special_cases=[(float('nan'), 20)])], ids=idfn)
def test_array_contains_for_nans(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, ArrayGen(data_gen), data_gen).select(
array_contains(col('a'), col('b')),
array_contains(col('a'), lit(float('nan')).cast(data_gen.data_type))))
@pytest.mark.parametrize('data_gen', array_item_test_gens, ids=idfn)
def test_array_element_at(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, data_gen, array_no_zero_index_gen).selectExpr(
'element_at(a, cast(NULL as int))',
'element_at(a, 1)',
'element_at(a, 30)',
'element_at(a, -1)',
'element_at(a, -30)',
'element_at(a, b)'))
# No need tests for multiple data types for list data. Only one is enough.
@pytest.mark.parametrize('index', [100, array_out_index_gen], ids=idfn)
def test_array_element_at_ansi_fail_invalid_index(index):
message = "ArrayIndexOutOfBoundsException" if is_before_spark_330() else "SparkArrayIndexOutOfBoundsException"
if isinstance(index, int):
test_func = lambda spark: unary_op_df(spark, ArrayGen(int_gen)).select(
element_at(col('a'), index)).collect()
else:
test_func = lambda spark: two_col_df(spark, ArrayGen(int_gen), index).selectExpr(
'element_at(a, b)').collect()
# For 3.3.0+ strictIndexOperator should not affect element_at
# `strictIndexOperator` has been removed in Spark3.4+ and Databricks11.3+
test_conf = ansi_enabled_conf if (is_spark_340_or_later() or is_databricks113_or_later()) else \
copy_and_update(ansi_enabled_conf, {'spark.sql.ansi.strictIndexOperator': 'false'})
assert_gpu_and_cpu_error(
test_func,
conf=test_conf,
error_message=message)
def test_array_element_at_ansi_not_fail_all_null_data():
# No exception when zero index but all the array rows are null
assert_gpu_and_cpu_are_equal_collect(
lambda spark: three_col_df(spark, array_all_null_gen, array_zero_index_gen, array_out_index_gen).selectExpr(
'element_at(a, 0)',
'element_at(a, b)',
'element_at(a, c)'),
conf=ansi_enabled_conf)
@pytest.mark.parametrize('index', [0, array_zero_index_gen], ids=idfn)
@pytest.mark.parametrize('ansi_enabled', [False, True], ids=idfn)
def test_array_element_at_zero_index_fail(index, ansi_enabled):
if is_spark_340_or_later():
message = "org.apache.spark.SparkRuntimeException: [INVALID_INDEX_OF_ZERO] The index 0 is invalid"
elif is_databricks113_or_later():
message = "org.apache.spark.SparkRuntimeException: [ELEMENT_AT_BY_INDEX_ZERO] The index 0 is invalid"
else:
message = "SQL array indices start at 1"
if isinstance(index, int):
test_func = lambda spark: unary_op_df(spark, ArrayGen(int_gen)).select(
element_at(col('a'), index)).collect()
else:
test_func = lambda spark: two_col_df(spark, ArrayGen(int_gen), index).selectExpr(
'element_at(a, b)').collect()
assert_gpu_and_cpu_error(
test_func,
conf={'spark.sql.ansi.enabled':ansi_enabled},
error_message=message)
@pytest.mark.parametrize('data_gen', array_gens_sample, ids=idfn)
def test_array_transform(data_gen):
def do_it(spark):
columns = ['a', 'b',
'transform(a, item -> item) as ident',
'transform(a, item -> null) as n',
'transform(a, item -> 1) as one',
'transform(a, (item, index) -> index) as indexed',
'transform(a, item -> b) as b_val',
'transform(a, (item, index) -> index - b) as math_on_index']
element_type = data_gen.data_type.elementType
# decimal types can grow too large so we are avoiding those here for now
if isinstance(element_type, IntegralType):
columns.extend(['transform(a, item -> item + 1) as add',
'transform(a, item -> item + item) as mul',
'transform(a, (item, index) -> item + index + b) as all_add'])
if isinstance(element_type, StringType):
columns.extend(['transform(a, entry -> concat(entry, "-test")) as con'])
if isinstance(element_type, ArrayType):
columns.extend(['transform(a, entry -> transform(entry, sub_entry -> 1)) as sub_one',
'transform(a, (entry, index) -> transform(entry, (sub_entry, sub_index) -> index)) as index_as_sub_entry',
'transform(a, (entry, index) -> transform(entry, (sub_entry, sub_index) -> index + sub_index)) as index_add_sub_index',
'transform(a, (entry, index) -> transform(entry, (sub_entry, sub_index) -> index + sub_index + b)) as add_indexes_and_value'])
return two_col_df(spark, data_gen, byte_gen).selectExpr(columns)
assert_gpu_and_cpu_are_equal_collect(do_it)
# TODO add back in string_gen when https://github.com/rapidsai/cudf/issues/9156 is fixed
array_min_max_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen] + decimal_gens
@pytest.mark.parametrize('data_gen', array_min_max_gens, ids=idfn)
def test_array_min_max(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, ArrayGen(data_gen)).selectExpr(
'array_min(a)', 'array_max(a)'))
@pytest.mark.parametrize('data_gen', [ArrayGen(SetValuesGen(datatype, [math.nan, None])) for datatype in [FloatType(), DoubleType()]], ids=idfn)
def test_array_min_max_all_nans(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'array_min(a)', 'array_max(a)'))
@pytest.mark.parametrize('data_gen', [ArrayGen(int_gen, all_null=True)], ids=idfn)
def test_array_min_max_all_nulls(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'array_min(a)', 'array_max(a)'))
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_array_concat_decimal(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : debug_df(unary_op_df(spark, ArrayGen(data_gen)).selectExpr(
'concat(a, a)')))
@pytest.mark.parametrize('data_gen', orderable_gens + nested_gens_sample, ids=idfn)
def test_array_repeat_with_count_column(data_gen):
cnt_gen = IntegerGen(min_val=-5, max_val=5, special_cases=[])
cnt_not_null_gen = IntegerGen(min_val=-5, max_val=5, special_cases=[], nullable=False)
gen = StructGen(
[('elem', data_gen), ('cnt', cnt_gen), ('cnt_nn', cnt_not_null_gen)], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'array_repeat(elem, cnt)',
'array_repeat(elem, cnt_nn)',
'array_repeat("abc", cnt)'))
@pytest.mark.parametrize('data_gen', orderable_gens + nested_gens_sample, ids=idfn)
def test_array_repeat_with_count_scalar(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr(
'array_repeat(a, 3)',
'array_repeat(a, 1)',
'array_repeat(a, 0)',
'array_repeat(a, -2)',
'array_repeat("abc", 2)',
'array_repeat("abc", 0)',
'array_repeat("abc", -1)'))
# We add in several types of processing for foldable functions because the output
# can be different types.
@pytest.mark.parametrize('query', [
'sequence(1, 5) as s',
'array(1, 2, 3) as a',
'array(sequence(1, 5), sequence(2, 7)) as a_a',
'array(map(1, "a", 2, "b")) as a_m',
'array(map_from_arrays(sequence(1, 2), array("1", "2"))) as a_m',
'array(struct(1 as a, 2 as b), struct(3 as a, 4 as b)) as a_s',
'array(struct(1 as a, sequence(1, 5) as b), struct(3 as a, sequence(2, 7) as b)) as a_s_a',
'array(array(struct(1 as a, 2 as b), struct(3 as a, 4 as b))) as a_a_s'], ids=idfn)
def test_sql_array_scalars(query):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql('SELECT {}'.format(query)))
@pytest.mark.parametrize('data_gen', all_basic_gens + nested_gens_sample, ids=idfn)
def test_get_array_struct_fields(data_gen):
array_struct_gen = ArrayGen(
StructGen([['child0', data_gen], ['child1', int_gen]]),
max_length=6)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, array_struct_gen).selectExpr('a.child0'))
@pytest.mark.parametrize('data_gen', [ArrayGen(string_gen), ArrayGen(int_gen)])
@pytest.mark.parametrize('threeVL', [
pytest.param(False, id='3VL:off'),
pytest.param(True, id='3VL:on'),
])
def test_array_exists(data_gen, threeVL):
def do_it(spark):
columns = ['a']
element_type = data_gen.data_type.elementType
if isinstance(element_type, IntegralType):
columns.extend([
'exists(a, item -> item % 2 = 0) as exists_even',
'exists(a, item -> item < 0) as exists_negative',
'exists(a, item -> item >= 0) as exists_non_negative'
])
if isinstance(element_type, StringType):
columns.extend(['exists(a, entry -> length(entry) > 5) as exists_longer_than_5'])
return unary_op_df(spark, data_gen).selectExpr(columns)
assert_gpu_and_cpu_are_equal_collect(do_it, conf= {
'spark.sql.legacy.followThreeValuedLogicInArrayExists' : threeVL,
})
array_zips_gen = array_gens_sample + [ArrayGen(map_string_string_gen[0], max_length=5),
ArrayGen(BinaryGen(max_length=5), max_length=5)]
@pytest.mark.parametrize('data_gen', array_zips_gen, ids=idfn)
def test_arrays_zip(data_gen):
gen = StructGen(
[('a', data_gen), ('b', data_gen), ('c', data_gen), ('d', data_gen)], nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'arrays_zip(a, b, c, d)',
'arrays_zip(a, b, c)',
'arrays_zip(a, b, array())',
'arrays_zip(a)')
)
def test_arrays_zip_corner_cases():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, ArrayGen(int_gen), length=100).selectExpr(
'arrays_zip()',
'arrays_zip(null)',
'arrays_zip(null, null)',
'arrays_zip(null, a)',
'arrays_zip(a, array())',
'arrays_zip(a, array(), array(1, 2))',
'arrays_zip(a, array(1, 2, 4, 3), array(5))')
)
def test_array_max_q1():
def q1(spark):
return spark.sql('SELECT ARRAY_MAX(TRANSFORM(ARRAY_REPEAT(STRUCT(1, 2), 0), s -> s.col2))')
assert_gpu_and_cpu_are_equal_collect(q1)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens + decimal_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_313() or is_spark_330() or is_spark_330cdh(), reason="NaN equality is only handled in Spark 3.1.3+ and SPARK-39976 issue with null and ArrayIntersect in Spark 3.3.0")
def test_array_intersect(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_intersect(a, b))',
'sort_array(array_intersect(b, a))',
'sort_array(array_intersect(a, array()))',
'sort_array(array_intersect(array(), b))',
'sort_array(array_intersect(a, a))',
'sort_array(array_intersect(array(1), array(1, 2, 3)))',
'sort_array(array_intersect(array(), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens_no_nulls + decimal_gens_no_nulls, ids=idfn)
@pytest.mark.skipif(not is_spark_330() and not is_spark_330cdh(), reason="SPARK-39976 issue with null and ArrayIntersect in Spark 3.3.0")
def test_array_intersect_spark330(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_intersect(a, b))',
'sort_array(array_intersect(b, a))',
'sort_array(array_intersect(a, array()))',
'sort_array(array_intersect(array(), b))',
'sort_array(array_intersect(a, a))',
'sort_array(array_intersect(array(1), array(1, 2, 3)))',
'sort_array(array_intersect(array(), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens_no_nans + decimal_gens, ids=idfn)
@pytest.mark.skipif(not is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_array_intersect_before_spark313(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_intersect(a, b))',
'sort_array(array_intersect(b, a))',
'sort_array(array_intersect(a, array()))',
'sort_array(array_intersect(array(), b))',
'sort_array(array_intersect(a, a))',
'sort_array(array_intersect(array(1), array(1, 2, 3)))',
'sort_array(array_intersect(array(), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens + decimal_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_array_union(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_union(a, b))',
'sort_array(array_union(b, a))',
'sort_array(array_union(a, array()))',
'sort_array(array_union(array(), b))',
'sort_array(array_union(a, a))',
'sort_array(array_union(array(1), array(1, 2, 3)))',
'sort_array(array_union(array(), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens_no_nans + decimal_gens, ids=idfn)
@pytest.mark.skipif(not is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_array_union_before_spark313(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_union(a, b))',
'sort_array(array_union(b, a))',
'sort_array(array_union(a, array()))',
'sort_array(array_union(array(), b))',
'sort_array(array_union(a, a))',
'sort_array(array_union(array(1), array(1, 2, 3)))',
'sort_array(array_union(array(), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens + decimal_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_array_except(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_except(a, b))',
'sort_array(array_except(b, a))',
'sort_array(array_except(a, array()))',
'sort_array(array_except(array(), b))',
'sort_array(array_except(a, a))',
'sort_array(array_except(array(1, 2, 3), array(1, 2, 3)))',
'sort_array(array_except(array(1), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens_no_nans + decimal_gens, ids=idfn)
@pytest.mark.skipif(not is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_array_except_before_spark313(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'sort_array(array_except(a, b))',
'sort_array(array_except(b, a))',
'sort_array(array_except(a, array()))',
'sort_array(array_except(array(), b))',
'sort_array(array_except(a, a))',
'sort_array(array_except(array(1, 2, 3), array(1, 2, 3)))',
'sort_array(array_except(array(1), array(1, 2, 3)))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens + decimal_gens, ids=idfn)
@pytest.mark.skipif(is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_arrays_overlap(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'arrays_overlap(a, b)',
'arrays_overlap(b, a)',
'arrays_overlap(a, array())',
'arrays_overlap(array(), b)',
'arrays_overlap(a, a)',
'arrays_overlap(array(1), array(1, 2))',
'arrays_overlap(array(3, 4), array(1, 2))',
'arrays_overlap(array(), array(1, 2))')
)
@incompat
@pytest.mark.parametrize('data_gen', no_neg_zero_all_basic_gens_no_nans + decimal_gens, ids=idfn)
@pytest.mark.skipif(not is_before_spark_313(), reason="NaN equality is only handled in Spark 3.1.3+")
def test_arrays_overlap_before_spark313(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'arrays_overlap(a, b)',
'arrays_overlap(b, a)',
'arrays_overlap(a, array())',
'arrays_overlap(array(), b)',
'arrays_overlap(a, a)',
'arrays_overlap(array(1), array(1, 2))',
'arrays_overlap(array(3, 4), array(1, 2))',
'arrays_overlap(array(), array(1, 2))')
)
@pytest.mark.parametrize('data_gen', [ByteGen(special_cases=[-10, 0, 10]), ShortGen(special_cases=[-10, 0, 10]),
IntegerGen(special_cases=[-10, 0, 10]), LongGen(special_cases=[-10, 0, 10])], ids=idfn)
def test_array_remove_scalar(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True))],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'array_remove(a, -10)',
'array_remove(a, 0)',
'array_remove(a, 10)')
)
@pytest.mark.parametrize('data_gen', [ByteGen(special_cases=[5]), ShortGen(special_cases=[5]),
IntegerGen(special_cases=[5]), LongGen(special_cases=[5]),
FloatGen(special_cases=_non_neg_zero_float_special_cases + [-0.0]),
DoubleGen(special_cases=_non_neg_zero_double_special_cases + [-0.0]),
StringGen(pattern='[0-9]{1,5}'), boolean_gen, date_gen, timestamp_gen] + decimal_gens, ids=idfn)
def test_array_remove(data_gen):
gen = StructGen(
[('a', ArrayGen(data_gen, nullable=True)),
('b', data_gen)],
nullable=False)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, gen).selectExpr(
'array_remove(a, b)',
'array_remove(a, null)')
)
@pytest.mark.parametrize('data_gen', [ArrayGen(sub_gen) for sub_gen in array_gens_sample], ids=idfn)
def test_flatten_array(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('flatten(a)')
)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/array_test.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_error
from data_gen import *
from pyspark.sql.types import *
from spark_session import with_cpu_session
from orc_test import reader_opt_confs
def create_orc(data_gen_list, data_path):
# generate ORC dataframe, and dump it to local file 'data_path'
with_cpu_session(
lambda spark: gen_df(spark, data_gen_list).write.orc(data_path)
)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('to_type', ['boolean', 'tinyint', 'smallint', 'int', 'bigint'])
def test_casting_among_integer_types(spark_tmp_path, reader_confs, v1_enabled_list, to_type):
# cast integral types to another integral types
int_gens = [boolean_gen] + integral_gens
gen_list = [('c' + str(i), gen) for i, gen in enumerate(int_gens)]
data_path = spark_tmp_path + '/ORC_DATA'
create_orc(gen_list, data_path)
# generate schema string like "c0 to_type, c1 to_type, ..., c4 to_type"
schema_str = " {}, ".join([x[0] for x in gen_list]) + " {}"
schema_str = schema_str.format(*([to_type] * len(gen_list)))
all_confs = copy_and_update(reader_confs,
{'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema_str).orc(data_path),
conf=all_confs)
@pytest.mark.parametrize('to_type', ['float', 'double', 'string', 'timestamp'])
def test_casting_from_integer(spark_tmp_path, to_type):
orc_path = spark_tmp_path + '/orc_cast_integer'
# The Python 'datetime' module only supports a max-year of 10000, so we set the Long type max
# to '1e11'. If the long-value is out of this range, pytest will throw an exception.
data_gen = [('boolean_col', boolean_gen), ('tinyint_col', byte_gen),
('smallint_col', ShortGen(min_val=BYTE_MAX + 1)),
('int_col', IntegerGen(min_val=SHORT_MAX + 1)),
('bigint_col', LongGen(min_val=INT_MAX + 1, max_val=int(1e11))),
('negint_col', IntegerGen(max_val=-1))]
create_orc(data_gen, orc_path)
schema_str = "boolean_col {}, tinyint_col {}, smallint_col {}, int_col {}, bigint_col {}, negint_col {}"
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(
schema_str.format(*([to_type] * len(data_gen)))).orc(orc_path)
)
@pytest.mark.parametrize('overflow_long_gen', [LongGen(min_val=int(1e16)),
LongGen(max_val=int(-1e16))])
@pytest.mark.parametrize('to_type', ['timestamp'])
def test_casting_from_overflow_long(spark_tmp_path, overflow_long_gen,to_type):
# Timestamp(micro-seconds) is actually type of int64, when casting long(int64) to timestamp,
# we need to multiply 1e6 (or 1e3), and it may cause overflow. This function aims to test
# whether if 'ArithmeticException' is caught.
orc_path = spark_tmp_path + '/orc_cast_overflow_long'
create_orc([('long_column', overflow_long_gen)], orc_path)
schema_str = "long_column {}".format(to_type)
assert_gpu_and_cpu_error(
df_fun=lambda spark: spark.read.schema(schema_str).orc(orc_path).collect(),
conf={},
error_message="ArithmeticException"
)
# When casting float/double to double/float, we need to compare values of GPU with CPU
# in an approximate way.
@pytest.mark.approximate_float
@pytest.mark.parametrize('to_type', ['float', 'double', 'boolean', 'tinyint', 'smallint', 'int', 'bigint'])
def test_casting_from_float_and_double(spark_tmp_path, to_type):
orc_path = spark_tmp_path + '/orc_casting_from_float_and_double'
data_gen = [('float_column', float_gen), ('double_column', double_gen)]
create_orc(data_gen, orc_path)
schema_str = "float_column {}, double_column {}".format(to_type, to_type)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema_str).orc(orc_path)
)
@pytest.mark.parametrize('data_gen', [DoubleGen(max_exp=32, special_cases=None),
DoubleGen(max_exp=32, special_cases=[8.88e9, 9.99e10, 1.314e11])])
def test_casting_from_double_to_timestamp(spark_tmp_path, data_gen):
# ORC will assume the original double value in seconds, we need to convert them to
# timestamp(INT64 in micro-seconds).
#
# The 'datetime' module in python requires 0 <= year < 10000, and UTC timestamp is start from 1970/1/1.
# That is, the minimum valid negative number is -1970 * 365 * 24 * 3600 = -62125920000 -> 6e10 -> 2^32.
# So we set max_exp = 32 in DoubleGen.
#
# The maximum valid positive number is (10000 - 1970) * 365 * 24 * 3600 = 253234080000 -> 2e11 -> 2^37,
# so we add some special cases from 2^33 - 2^37 (8e9 ~ 1e11).
#
# In DoubleGen, special_case=None will generate some NaN, INF corner cases.
orc_path = spark_tmp_path + '/orc_casting_from_double_to_timestamp'
with_cpu_session(
lambda spark: unary_op_df(spark, data_gen).write.orc(orc_path)
)
# the name of unique column is 'a', cast it into timestamp type
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema("a timestamp").orc(orc_path)
)
def test_casting_from_overflow_double_to_timestamp(spark_tmp_path):
orc_path = spark_tmp_path + '/orc_casting_from_overflow_double_to_timestamp'
with_cpu_session(
lambda spark: unary_op_df(spark, DoubleGen(min_exp=38)).write.orc(orc_path)
)
assert_gpu_and_cpu_error(
df_fun=lambda spark: spark.read.schema("a timestamp").orc(orc_path).collect(),
conf={},
error_message="ArithmeticException"
)
@pytest.mark.parametrize('data_gen', [DecimalGen(precision=9, scale=2),
DecimalGen(precision=18, scale=4),
DecimalGen(precision=38, scale=6)])
@pytest.mark.parametrize('read_type', ["DECIMAL(9,2)",
"DECIMAL(18,4)",
"DECIMAL(38,6)"])
@pytest.mark.parametrize('ansi_mode', ["false", "true"])
def test_casting_decimal_to_decimal(spark_tmp_path, data_gen, read_type, ansi_mode):
"""
Tests that ORC files with decimal columns written with one set of
precision and scale are readable with different precision/scale.
"""
orc_path = spark_tmp_path + '/orc_casting_from_decimal_to_decimal'
with_cpu_session(
lambda spark: unary_op_df(spark, data_gen).write.orc(orc_path)
)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema("a " + read_type).orc(orc_path),
conf={'spark.sql.ansi.enabled': ansi_mode}
)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/orc_cast_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_cpu_and_gpu_are_equal_collect_with_capture, assert_gpu_and_cpu_are_equal_collect
from conftest import spark_tmp_table_factory
from data_gen import *
from marks import ignore_order, allow_non_gpu
from spark_session import is_before_spark_320, with_cpu_session, is_before_spark_312, is_databricks_runtime, is_databricks113_or_later
def create_dim_table(table_name, table_format, length=500):
def fn(spark):
df = gen_df(spark, [
('key', IntegerGen(nullable=False, min_val=0, max_val=9, special_cases=[])),
('skey', IntegerGen(nullable=False, min_val=0, max_val=4, special_cases=[])),
('ex_key', IntegerGen(nullable=False, min_val=0, max_val=3, special_cases=[])),
('value', int_gen),
('filter', RepeatSeqGen(
IntegerGen(min_val=0, max_val=length, special_cases=[]), length=length // 20))
], length)
df.cache()
df.write.format(table_format) \
.mode("overwrite") \
.saveAsTable(table_name)
return df.select('filter').first()[0]
return with_cpu_session(fn)
def create_fact_table(table_name, table_format, length=2000):
def fn(spark):
df = gen_df(spark, [
('key', IntegerGen(nullable=False, min_val=0, max_val=9, special_cases=[])),
('skey', IntegerGen(nullable=False, min_val=0, max_val=4, special_cases=[])),
# ex_key is not a partition column
('ex_key', IntegerGen(nullable=False, min_val=0, max_val=3, special_cases=[])),
('value', int_gen)], length)
df.write.format(table_format) \
.mode("overwrite") \
.partitionBy('key', 'skey') \
.saveAsTable(table_name)
with_cpu_session(fn)
_dpp_conf = [('spark.sql.optimizer.dynamicPartitionPruning.enabled', 'true')]
_exchange_reuse_conf = _dpp_conf + [
('spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly', 'true'),
('spark.sql.exchange.reuse', 'true')
]
_bypass_conf = _dpp_conf + [
('spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly', 'true'),
('spark.sql.exchange.reuse', 'false')
]
_no_exchange_reuse_conf = _dpp_conf + [
('spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly', 'false'),
('spark.sql.exchange.reuse', 'false')
]
_dpp_fallback_conf = _dpp_conf + [
('spark.sql.optimizer.dynamicPartitionPruning.reuseBroadcastOnly', 'false'),
('spark.sql.exchange.reuse', 'false'),
('spark.sql.optimizer.dynamicPartitionPruning.useStats', 'false'),
('spark.sql.optimizer.dynamicPartitionPruning.fallbackFilterRatio', '0'),
]
_statements = [
'''
SELECT fact.key, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key
WHERE dim.filter = {2} AND fact.value > 0
GROUP BY fact.key
''',
'''
SELECT f.key, sum(f.value)
FROM (SELECT *, struct(key, skey) AS keys FROM {0} fact) f
JOIN (SELECT *, struct(key, skey) AS keys FROM {1} dim) d
ON f.keys = d.keys
WHERE d.filter = {2}
GROUP BY f.key
''',
'''
SELECT fact.key, fact.skey, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key AND fact.skey = dim.skey
WHERE dim.filter = {2}
GROUP BY fact.key, fact.skey
''',
'''
SELECT fact.key, fact.skey, fact.ex_key, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key AND fact.skey = dim.skey AND fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
GROUP BY fact.key, fact.skey, fact.ex_key
''',
# This query checks the pattern of reused broadcast subquery: ReusedSubquery(SubqueryBroadcast(...))
# https://github.com/NVIDIA/spark-rapids/issues/4625
"""
SELECT key, max(value)
FROM (
SELECT fact.key as key, fact.value as value
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key
WHERE dim.filter = {2}
UNION ALL
SELECT fact.key as key, fact.value as value
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key
WHERE dim.filter = {2}
)
GROUP BY key
""",
'''
WITH fact_table AS (
SELECT fact.key as key, sum(fact.value) as value
FROM {0} fact
WHERE fact.value > 0
GROUP BY fact.key
ORDER BY fact.key
),
dim_table AS (
SELECT dim.key as key, dim.value as value, dim.filter as filter
FROM {1} dim
WHERE ex_key = 3
ORDER BY dim.key
)
SELECT key, max(value)
FROM (
SELECT f.key as key, f.value as value
FROM fact_table f
JOIN dim_table d
ON f.key = d.key
WHERE d.filter = {2}
UNION ALL
SELECT f.key as key, f.value as value
FROM fact_table f
JOIN dim_table d
ON f.key = d.key
WHERE d.filter = {2}
)
GROUP BY key
'''
]
# When BroadcastExchangeExec is available on filtering side, and it can be reused:
# DynamicPruningExpression(InSubqueryExec(value, GpuSubqueryBroadcastExec)))
@ignore_order
@pytest.mark.parametrize('store_format', ['parquet', 'orc'], ids=idfn)
@pytest.mark.parametrize('s_index', list(range(len(_statements))), ids=idfn)
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_reuse_broadcast_exchange(spark_tmp_table_factory, store_format, s_index, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, store_format, length=10000)
filter_val = create_dim_table(dim_table, store_format, length=2000)
statement = _statements[s_index].format(fact_table, dim_table, filter_val)
if is_databricks113_or_later() and aqe_enabled == 'true':
# SubqueryBroadcastExec is unoptimized in Databricks 11.3 with EXECUTOR_BROADCAST
# See https://github.com/NVIDIA/spark-rapids/issues/7425
exist_classes='DynamicPruningExpression,SubqueryBroadcastExec,ReusedExchangeExec'
else:
exist_classes='DynamicPruningExpression,GpuSubqueryBroadcastExec,ReusedExchangeExec'
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# The existence of GpuSubqueryBroadcastExec indicates the reuse works on the GPU
exist_classes,
conf=dict(_exchange_reuse_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
# The SubqueryBroadcast can work on GPU even if the scan who holds it fallbacks into CPU.
@ignore_order
@pytest.mark.allow_non_gpu('FileSourceScanExec')
def test_dpp_reuse_broadcast_exchange_cpu_scan(spark_tmp_table_factory):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, 'parquet', length=10000)
filter_val = create_dim_table(dim_table, 'parquet', length=2000)
statement = _statements[0].format(fact_table, dim_table, filter_val)
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# The existence of GpuSubqueryBroadcastExec indicates the reuse works on the GPU
exist_classes='FileSourceScanExec,GpuSubqueryBroadcastExec,ReusedExchangeExec',
conf=dict(_exchange_reuse_conf + [
('spark.sql.adaptive.enabled', 'false'),
('spark.rapids.sql.format.parquet.read.enabled', 'false')]))
# When BroadcastExchange is not available and non-broadcast DPPs are forbidden, Spark will bypass it:
# DynamicPruningExpression(Literal.TrueLiteral)
@ignore_order
@pytest.mark.parametrize('store_format', ['parquet', 'orc'], ids=idfn)
@pytest.mark.parametrize('s_index', list(range(len(_statements))), ids=idfn)
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_bypass(spark_tmp_table_factory, store_format, s_index, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, store_format)
filter_val = create_dim_table(dim_table, store_format)
statement = _statements[s_index].format(fact_table, dim_table, filter_val)
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# Bypass with a true literal, if we can not reuse broadcast exchange.
exist_classes='DynamicPruningExpression',
non_exist_classes='SubqueryExec,SubqueryBroadcastExec',
conf=dict(_bypass_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
# When BroadcastExchange is not available, but it is still worthwhile to run DPP,
# then Spark will plan an extra Aggregate to collect filtering values:
# DynamicPruningExpression(InSubqueryExec(value, SubqueryExec(Aggregate(...))))
@ignore_order
@pytest.mark.parametrize('store_format', ['parquet', 'orc'], ids=idfn)
@pytest.mark.parametrize('s_index', list(range(len(_statements))), ids=idfn)
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_via_aggregate_subquery(spark_tmp_table_factory, store_format, s_index, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, store_format)
filter_val = create_dim_table(dim_table, store_format)
statement = _statements[s_index].format(fact_table, dim_table, filter_val)
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# SubqueryExec appears if we plan extra subquery for DPP
exist_classes='DynamicPruningExpression,SubqueryExec',
conf=dict(_no_exchange_reuse_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
# When BroadcastExchange is not available, Spark will skip DPP if there is no potential benefit
@ignore_order
@pytest.mark.parametrize('store_format', ['parquet', 'orc'], ids=idfn)
@pytest.mark.parametrize('s_index', list(range(len(_statements))), ids=idfn)
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_skip(spark_tmp_table_factory, store_format, s_index, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, store_format)
filter_val = create_dim_table(dim_table, store_format)
statement = _statements[s_index].format(fact_table, dim_table, filter_val)
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# SubqueryExec appears if we plan extra subquery for DPP
non_exist_classes='DynamicPruningExpression',
conf=dict(_dpp_fallback_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
# GPU verification on https://issues.apache.org/jira/browse/SPARK-34436
@ignore_order
@allow_non_gpu('FilterExec')
@pytest.mark.parametrize('store_format', ['parquet', 'orc'], ids=idfn)
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
@pytest.mark.skipif(is_before_spark_312(), reason="DPP over LikeAny/LikeAll filter not enabled until Spark 3.1.2")
def test_dpp_like_any(spark_tmp_table_factory, store_format, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
create_fact_table(fact_table, store_format)
def create_dim_table_for_like(spark):
df = gen_df(spark, [
('key', IntegerGen(nullable=False, min_val=0, max_val=9, special_cases=[])),
('filter', StringGen(pattern='[0-9]{2,10}')),
], 100)
df.write.format(store_format).mode("overwrite").saveAsTable(dim_table)
with_cpu_session(create_dim_table_for_like)
statement = """
SELECT f.key, f.skey, f.value
FROM {0} f JOIN {1} s
ON f.key = s.key
WHERE s.filter LIKE ANY ('%00%', '%01%', '%10%', '%11%')
""".format(fact_table, dim_table)
if is_databricks113_or_later() and aqe_enabled == 'true':
exist_classes='DynamicPruningExpression,SubqueryBroadcastExec,ReusedExchangeExec'
else:
exist_classes='DynamicPruningExpression,GpuSubqueryBroadcastExec,ReusedExchangeExec'
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
exist_classes,
conf=dict(_exchange_reuse_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
# Test handling DPP expressions from a HashedRelation that rearranges columns
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_from_swizzled_hash_keys(spark_tmp_table_factory, aqe_enabled):
dim_table = spark_tmp_table_factory.get()
fact_table = spark_tmp_table_factory.get()
def setup_tables(spark):
spark.sql("CREATE TABLE {}(id string) PARTITIONED BY (dt date, hr string, mins string) STORED AS PARQUET".format(dim_table))
spark.sql("INSERT INTO {}(id,dt,hr,mins) values ('somevalue', date('2022-01-01'), '11', '59')".format(dim_table))
spark.sql("CREATE TABLE {}(id string)".format(fact_table) +
" PARTITIONED BY (dt date, hr string, mins string) STORED AS PARQUET")
spark.sql("INSERT INTO {}(id,dt,hr,mins)".format(fact_table) +
" SELECT 'somevalue', to_date('2022-01-01'), '11', '59'")
with_cpu_session(setup_tables, conf={
"hive.exec.dynamic.partition" : "true",
"hive.exec.dynamic.partition.mode" : "nonstrict"
})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT COUNT(*) AS cnt FROM {} f".format(fact_table) +
" LEFT JOIN (SELECT *, " +
" date_format(concat(string(dt),' ',hr,':',mins,':','00'),'yyyy-MM-dd HH:mm:ss.SSS') AS ts" +
" from {}) tmp".format(dim_table) +
" ON f.hr = tmp.hr AND f.dt = tmp.dt WHERE tmp.ts < CURRENT_TIMESTAMP"),
conf=dict(_dpp_conf + [('spark.sql.adaptive.enabled', aqe_enabled),
("spark.rapids.sql.castStringToTimestamp.enabled", "true"),
("spark.rapids.sql.hasExtendedYearValues", "false")]))
# Test handling DPP subquery that could broadcast EmptyRelation rather than a GPU serialized batch
@pytest.mark.parametrize('aqe_enabled', [
'false',
pytest.param('true', marks=pytest.mark.skipif(is_before_spark_320() and not is_databricks_runtime(),
reason='Only in Spark 3.2.0+ AQE and DPP can be both enabled'))
], ids=idfn)
def test_dpp_empty_relation(spark_tmp_table_factory, aqe_enabled):
dim_table = spark_tmp_table_factory.get()
fact_table = spark_tmp_table_factory.get()
def setup_tables(spark):
spark.sql("CREATE TABLE {}(id string) PARTITIONED BY (dt date, hr string, mins string) STORED AS PARQUET".format(dim_table))
spark.sql("INSERT INTO {}(id,dt,hr,mins) values ('somevalue', date('2022-01-01'), '11', '59')".format(dim_table))
spark.sql("CREATE TABLE {}(id string)".format(fact_table) +
" PARTITIONED BY (dt date, hr string, mins string) STORED AS PARQUET")
spark.sql("INSERT INTO {}(id,dt,hr,mins)".format(fact_table) +
" SELECT 'somevalue', to_date('2022-01-01'), '11', '59'")
with_cpu_session(setup_tables, conf={
"hive.exec.dynamic.partition" : "true",
"hive.exec.dynamic.partition.mode" : "nonstrict"
})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT COUNT(*) AS cnt FROM {} f".format(fact_table) +
" LEFT JOIN (SELECT * from {}) tmp".format(dim_table) +
" ON f.hr = tmp.hr AND f.dt = tmp.dt WHERE tmp.mins > 60"),
conf=dict(_dpp_conf + [('spark.sql.adaptive.enabled', aqe_enabled)]))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/dpp_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal, assert_gpu_fallback_collect, assert_spark_exception
from data_gen import *
from marks import allow_non_gpu, iceberg, ignore_order
from spark_session import is_before_spark_320, is_databricks_runtime, with_cpu_session, with_gpu_session
iceberg_map_gens = [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DateGen, TimestampGen ]] + \
[simple_string_to_string_map_gen,
MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(string_gen), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen)]
iceberg_gens_list = [
[byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, binary_gen, ArrayGen(binary_gen),
ArrayGen(byte_gen), ArrayGen(long_gen), ArrayGen(string_gen), ArrayGen(date_gen),
ArrayGen(timestamp_gen), ArrayGen(decimal_gen_64bit), ArrayGen(ArrayGen(byte_gen)),
StructGen([['child0', ArrayGen(byte_gen)], ['child1', byte_gen], ['child2', float_gen], ['child3', decimal_gen_64bit]]),
ArrayGen(StructGen([['child0', string_gen], ['child1', double_gen], ['child2', int_gen]]))
] + iceberg_map_gens + decimal_gens ]
rapids_reader_types = ['PERFILE', 'MULTITHREADED', 'COALESCING']
@allow_non_gpu("BatchScanExec")
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
def test_iceberg_fallback_not_unsafe_row(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
spark.sql("CREATE TABLE {} (id BIGINT, data STRING) USING ICEBERG".format(table))
spark.sql("INSERT INTO {} VALUES (1, 'a'), (2, 'b'), (3, 'c')".format(table))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT COUNT(DISTINCT id) from {}".format(table)),
conf={"spark.rapids.sql.format.iceberg.enabled": "false"}
)
@iceberg
@ignore_order(local=True)
@pytest.mark.skipif(is_before_spark_320() or is_databricks_runtime(),
reason="AQE+DPP not supported until Spark 3.2.0+ and AQE+DPP not supported on Databricks")
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_aqe_dpp(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = two_col_df(spark, int_gen, int_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} (a INT, b INT) USING ICEBERG PARTITIONED BY (a)".format(table))
spark.sql("INSERT INTO {} SELECT * FROM {}".format(table, tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * from {} as X JOIN {} as Y ON X.a = Y.a WHERE Y.a > 0".format(table, table)),
conf={"spark.sql.adaptive.enabled": "true",
"spark.rapids.sql.format.parquet.reader.type": reader_type,
"spark.sql.optimizer.dynamicPartitionPruning.enabled": "true"})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize("data_gens", iceberg_gens_list, ids=idfn)
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_parquet_read_round_trip_select_one(spark_tmp_table_factory, data_gens, reader_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = gen_df(spark, gen_list)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG AS SELECT * FROM {}".format(table, tmpview))
with_cpu_session(setup_iceberg_table)
# explicitly only select 1 column to make sure we test that path in the schema parsing code
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT _c0 FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize("data_gens", iceberg_gens_list, ids=idfn)
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_parquet_read_round_trip(spark_tmp_table_factory, data_gens, reader_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = gen_df(spark, gen_list)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG AS SELECT * FROM {}".format(table, tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@pytest.mark.parametrize("data_gens", [[long_gen]], ids=idfn)
@pytest.mark.parametrize("iceberg_format", ["orc", "avro"], ids=idfn)
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_unsupported_formats(spark_tmp_table_factory, data_gens, iceberg_format, reader_type):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = gen_df(spark, gen_list)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"TBLPROPERTIES('write.format.default' = '{}') ".format(iceberg_format) + \
"AS SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_spark_exception(
lambda : with_gpu_session(
lambda spark : spark.sql("SELECT * FROM {}".format(table)).collect(),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type}),
"UnsupportedOperationException")
@iceberg
@allow_non_gpu("BatchScanExec", "ColumnarToRowExec")
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize("disable_conf", ["spark.rapids.sql.format.iceberg.enabled",
"spark.rapids.sql.format.iceberg.read.enabled"], ids=idfn)
def test_iceberg_read_fallback(spark_tmp_table_factory, disable_conf):
table = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
spark.sql("CREATE TABLE {} (id BIGINT, data STRING) USING ICEBERG".format(table))
spark.sql("INSERT INTO {} VALUES (1, 'a'), (2, 'b'), (3, 'c')".format(table))
with_cpu_session(setup_iceberg_table)
assert_gpu_fallback_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
"BatchScanExec",
conf = {disable_conf : "false"})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
# Compression codec to test and whether the codec is supported by cudf
# Note that compression codecs brotli and lzo need extra jars
# https://githbub.com/NVIDIA/spark-rapids/issues/143
@pytest.mark.parametrize("codec_info", [
("uncompressed", None),
("snappy", None),
("gzip", None),
pytest.param(("lz4", "Unsupported compression type"),
marks=pytest.mark.skipif(is_before_spark_320(),
reason="Hadoop with Spark 3.1.x does not support lz4 by default")),
("zstd", None)], ids=idfn)
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_read_parquet_compression_codec(spark_tmp_table_factory, codec_info, reader_type):
codec, error_msg = codec_info
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} (id BIGINT, data BIGINT) USING ICEBERG ".format(table) + \
"TBLPROPERTIES('write.parquet.compression-codec' = '{}')".format(codec))
spark.sql("INSERT INTO {} SELECT * FROM {}".format(table, tmpview))
with_cpu_session(setup_iceberg_table)
query = "SELECT * FROM {}".format(table)
read_conf = {'spark.rapids.sql.format.parquet.reader.type': reader_type}
if error_msg:
assert_spark_exception(
lambda : with_gpu_session(lambda spark : spark.sql(query).collect(), conf=read_conf),
error_msg)
else:
assert_gpu_and_cpu_are_equal_collect(lambda spark : spark.sql(query), conf=read_conf)
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize("key_gen", [int_gen, long_gen, string_gen, boolean_gen, date_gen, timestamp_gen, decimal_gen_64bit], ids=idfn)
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_read_partition_key(spark_tmp_table_factory, key_gen, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = two_col_df(spark, key_gen, long_gen).orderBy("a")
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG PARTITIONED BY (a) ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT a FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_input_meta(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen).orderBy("a")
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG PARTITIONED BY (a) ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql(
"SELECT a, input_file_name(), input_file_block_start(), input_file_block_length() " + \
"FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_disorder_read_schema(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = three_col_df(spark, long_gen, string_gen, float_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT b,c,a FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
def test_iceberg_read_appended_table(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(lambda spark : spark.sql("SELECT * FROM {}".format(table)))
@iceberg
# Some metadata files have types that are not supported on the GPU yet (e.g.: BinaryType)
@allow_non_gpu("BatchScanExec", "ProjectExec")
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
def test_iceberg_read_metadata_fallback(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
for subtable in ["all_data_files", "all_manifests", "files", "history",
"manifests", "partitions", "snapshots"]:
# SQL does not have syntax to read table metadata
assert_gpu_fallback_collect(
lambda spark : spark.read.format("iceberg").load("default.{}.{}".format(table, subtable)),
"BatchScanExec")
@iceberg
# Some metadata files have types that are not supported on the GPU yet (e.g.: BinaryType)
@allow_non_gpu("BatchScanExec", "ProjectExec")
def test_iceberg_read_metadata_count(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
for subtable in ["all_data_files", "all_manifests", "files", "history",
"manifests", "partitions", "snapshots"]:
# SQL does not have syntax to read table metadata
assert_gpu_and_cpu_row_counts_equal(
lambda spark : spark.read.format("iceberg").load("default.{}.{}".format(table, subtable)))
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.skipif(is_before_spark_320(), reason="Spark 3.1.x has a catalog bug precluding scope prefix in table names")
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_read_timetravel(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_snapshots(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
return spark.sql("SELECT snapshot_id FROM default.{}.snapshots ".format(table) + \
"ORDER BY committed_at").head()[0]
first_snapshot_id = with_cpu_session(setup_snapshots)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.option("snapshot-id", first_snapshot_id) \
.format("iceberg").load("default.{}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.skipif(is_before_spark_320(), reason="Spark 3.1.x has a catalog bug precluding scope prefix in table names")
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_incremental_read(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_snapshots(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
df = binary_op_df(spark, long_gen, seed=2)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
return spark.sql("SELECT snapshot_id FROM default.{}.snapshots ".format(table) + \
"ORDER BY committed_at").collect()
snapshots = with_cpu_session(setup_snapshots)
start_snapshot, end_snapshot = [ row[0] for row in snapshots[:2] ]
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read \
.option("start-snapshot-id", start_snapshot) \
.option("end-snapshot-id", end_snapshot) \
.format("iceberg").load("default.{}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_reorder_columns(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} ALTER COLUMN b FIRST".format(table))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_rename_column(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} RENAME COLUMN a TO c".format(table))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_column_names_swapped(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} RENAME COLUMN a TO c".format(table))
spark.sql("ALTER TABLE {} RENAME COLUMN b TO a".format(table))
spark.sql("ALTER TABLE {} RENAME COLUMN c TO b".format(table))
df = binary_op_df(spark, long_gen, seed=1)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_alter_column_type(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = three_col_df(spark, int_gen, float_gen, DecimalGen(precision=7, scale=3))
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} ALTER COLUMN a TYPE BIGINT".format(table))
spark.sql("ALTER TABLE {} ALTER COLUMN b TYPE DOUBLE".format(table))
spark.sql("ALTER TABLE {} ALTER COLUMN c TYPE DECIMAL(17, 3)".format(table))
df = three_col_df(spark, long_gen, double_gen, DecimalGen(precision=17, scale=3))
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_add_column(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} ADD COLUMNS (c DOUBLE)".format(table))
df = three_col_df(spark, long_gen, long_gen, double_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_remove_column(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} DROP COLUMN a".format(table))
df = unary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_add_partition_field(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, int_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("ALTER TABLE {} ADD PARTITION FIELD b".format(table))
df = binary_op_df(spark, int_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {} ORDER BY b".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_drop_partition_field(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, int_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} (a INT, b INT) USING ICEBERG PARTITIONED BY (b)".format(table))
spark.sql("INSERT INTO {} SELECT * FROM {} ORDER BY b".format(table, tmpview))
spark.sql("ALTER TABLE {} DROP PARTITION FIELD b".format(table))
df = binary_op_df(spark, int_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("INSERT INTO {} ".format(table) + \
"SELECT * FROM {}".format(tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_v1_delete(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("DELETE FROM {} WHERE a < 0".format(table))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
@iceberg
@pytest.mark.skipif(is_before_spark_320(), reason="merge-on-read not supported on Spark 3.1.x")
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_v2_delete_unsupported(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG ".format(table) + \
"TBLPROPERTIES('format-version' = 2, 'write.delete.mode' = 'merge-on-read') " + \
"AS SELECT * FROM {}".format(tmpview))
spark.sql("DELETE FROM {} WHERE a < 0".format(table))
with_cpu_session(setup_iceberg_table)
assert_spark_exception(
lambda : with_gpu_session(
lambda spark : spark.sql("SELECT * FROM {}".format(table)).collect(),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type}),
"UnsupportedOperationException: Delete filter is not supported")
@iceberg
@ignore_order(local=True) # Iceberg plans with a thread pool and is not deterministic in file ordering
@pytest.mark.parametrize('reader_type', rapids_reader_types)
def test_iceberg_parquet_read_with_input_file(spark_tmp_table_factory, reader_type):
table = spark_tmp_table_factory.get()
tmpview = spark_tmp_table_factory.get()
def setup_iceberg_table(spark):
df = binary_op_df(spark, long_gen)
df.createOrReplaceTempView(tmpview)
spark.sql("CREATE TABLE {} USING ICEBERG AS SELECT * FROM {}".format(table, tmpview))
with_cpu_session(setup_iceberg_table)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT *, input_file_name() FROM {}".format(table)),
conf={'spark.rapids.sql.format.parquet.reader.type': reader_type})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/iceberg_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_incompat, should_sort_on_spark, should_sort_locally, get_float_check, get_limit, spark_jvm
from datetime import date, datetime, timedelta
from decimal import Decimal
import math
from pyspark.sql import Row
import pytest
from spark_session import with_cpu_session, with_gpu_session
import time
import types as pytypes
import data_gen
import difflib
import sys
def _assert_equal(cpu, gpu, float_check, path):
t = type(cpu)
if (t is Row):
assert len(cpu) == len(gpu), "CPU and GPU row have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
if hasattr(cpu, "__fields__") and hasattr(gpu, "__fields__"):
assert cpu.__fields__ == gpu.__fields__, "CPU and GPU row have different fields at {} CPU: {} GPU: {}".format(path, cpu.__fields__, gpu.__fields__)
for field in cpu.__fields__:
_assert_equal(cpu[field], gpu[field], float_check, path + [field])
else:
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is list):
assert len(cpu) == len(gpu), "CPU and GPU list have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is tuple):
assert len(cpu) == len(gpu), "CPU and GPU list have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is pytypes.GeneratorType):
index = 0
# generator has no zip :( so we have to do this the hard way
done = False
while not done:
sub_cpu = None
sub_gpu = None
try:
sub_cpu = next(cpu)
except StopIteration:
done = True
try:
sub_gpu = next(gpu)
except StopIteration:
done = True
if done:
assert sub_cpu == sub_gpu and sub_cpu == None, "CPU and GPU generators have different lengths at {}".format(path)
else:
_assert_equal(sub_cpu, sub_gpu, float_check, path + [index])
index = index + 1
elif (t is dict):
# The order of key/values is not guaranteed in python dicts, nor are they guaranteed by Spark
# so sort the items to do our best with ignoring the order of dicts
cpu_items = list(cpu.items()).sort(key=_RowCmp)
gpu_items = list(gpu.items()).sort(key=_RowCmp)
_assert_equal(cpu_items, gpu_items, float_check, path + ["map"])
elif (t is int):
assert cpu == gpu, "GPU and CPU int values are different at {}".format(path)
elif (t is float):
if (math.isnan(cpu)):
assert math.isnan(gpu), "GPU and CPU float values are different at {}".format(path)
else:
assert float_check(cpu, gpu), "GPU and CPU float values are different {}".format(path)
elif isinstance(cpu, str):
assert cpu == gpu, "GPU and CPU string values are different at {}".format(path)
elif isinstance(cpu, datetime):
assert cpu == gpu, "GPU and CPU timestamp values are different at {}".format(path)
elif isinstance(cpu, date):
assert cpu == gpu, "GPU and CPU date values are different at {}".format(path)
elif isinstance(cpu, bool):
assert cpu == gpu, "GPU and CPU boolean values are different at {}".format(path)
elif isinstance(cpu, Decimal):
assert cpu == gpu, "GPU and CPU decimal values are different at {}".format(path)
elif isinstance(cpu, bytearray):
assert cpu == gpu, "GPU and CPU bytearray values are different at {}".format(path)
elif isinstance(cpu, timedelta):
# Used by interval type DayTimeInterval for Pyspark 3.3.0+
assert cpu == gpu, "GPU and CPU timedelta values are different at {}".format(path)
elif (cpu == None):
assert cpu == gpu, "GPU and CPU are not both null at {}".format(path)
else:
assert False, "Found unexpected type {} at {}".format(t, path)
def assert_equal(cpu, gpu):
"""Verify that the result from the CPU and the GPU are equal"""
try:
_assert_equal(cpu, gpu, float_check=get_float_check(), path=[])
except:
sys.stdout.writelines(difflib.unified_diff(
a=[f"{x}\n" for x in cpu],
b=[f"{x}\n" for x in gpu],
fromfile='CPU OUTPUT',
tofile='GPU OUTPUT'))
raise
def _has_incompat_conf(conf):
return ('spark.rapids.sql.incompatibleOps.enabled' in conf and
conf['spark.rapids.sql.incompatibleOps.enabled'].lower() == 'true')
class _RowCmp(object):
"""Allows for sorting Rows in a consistent way"""
def __init__(self, wrapped):
if isinstance(wrapped, Row) or isinstance(wrapped, list) or isinstance(wrapped, tuple):
self.wrapped = [_RowCmp(c) for c in wrapped]
elif isinstance(wrapped, dict):
def sort_dict(e):
return _RowCmp(e)
tmp = [(k, v) for k, v in wrapped.items()]
tmp.sort(key=sort_dict)
self.wrapped = [_RowCmp(c) for c in tmp]
else:
self.wrapped = wrapped
if isinstance(wrapped, float):
self.is_nan = math.isnan(wrapped)
else:
self.is_nan = False
def cmp(self, other):
try:
#None comes before anything else
#NaN comes next
if (self.wrapped is None and other.wrapped is None):
return 0
elif (self.wrapped is None):
return -1
elif (other.wrapped is None):
return 1
elif self.is_nan and other.is_nan:
return 0
elif self.is_nan:
return -1
elif other.is_nan:
return 1
elif self.wrapped == other.wrapped:
return 0
elif self.wrapped < other.wrapped:
return -1
else:
return 1
except TypeError as te:
print("ERROR TRYING TO COMPARE {} to {} {}".format(self.wrapped, other.wrapped, te))
raise te
def __lt__(self, other):
return self.cmp(other) < 0
def __gt__(self, other):
return self.cmp(other) > 0
def __eq__(self, other):
return self.cmp(other) == 0
def __le__(self, other):
return self.cmp(other) <= 0
def __ge__(self, other):
return self.cmp(other) >= 0
def __ne__(self, other):
return self.cmp(other) != 0
def _prep_func_for_compare(func, mode):
sort_locally = should_sort_locally()
if should_sort_on_spark():
def with_sorted(spark):
df = func(spark)
return df.sort([f"`{x}`" for x in df.columns])
sorted_func = with_sorted
else:
sorted_func = func
limit_val = get_limit()
if limit_val > 0:
def with_limit(spark):
df = sorted_func(spark)
return df.limit(limit_val)
limit_func = with_limit
else:
limit_func = sorted_func
if mode == 'COLLECT':
bring_back = lambda spark: limit_func(spark).collect()
collect_type = 'COLLECT'
elif mode == 'COUNT':
bring_back = lambda spark: limit_func(spark).count()
collect_type = 'COUNT'
elif mode == 'COLLECT_WITH_DATAFRAME':
def bring_back(spark):
df = limit_func(spark)
return (df.collect(), df)
collect_type = 'COLLECT'
return (bring_back, collect_type)
else:
bring_back = lambda spark: limit_func(spark).toLocalIterator()
collect_type = 'ITERATOR'
if sort_locally:
raise RuntimeError('Local Sort is only supported on a collect')
return (bring_back, collect_type)
def _prep_incompat_conf(conf):
if is_incompat():
conf = dict(conf) # Make a copy before we change anything
conf['spark.rapids.sql.incompatibleOps.enabled'] = 'true'
elif _has_incompat_conf(conf):
raise AssertionError("incompat must be enabled by the incompat fixture")
return conf
def _assert_gpu_and_cpu_writes_are_equal(
write_func,
read_func,
base_path,
mode,
conf={}):
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
cpu_path = base_path + '/CPU'
with_cpu_session(lambda spark : write_func(spark, cpu_path), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
gpu_path = base_path + '/GPU'
with_gpu_session(lambda spark : write_func(spark, gpu_path), conf=conf)
gpu_end = time.time()
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, cpu_path), mode)
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, gpu_path), mode)
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_and_cpu_writes_are_equal_collect(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
on the CPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, 'COLLECT', conf=conf)
def assert_gpu_and_cpu_writes_are_equal_iterator(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
on the CPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, 'ITERATOR', conf=conf)
def assert_gpu_and_cpu_sql_writes_are_equal_collect(table_name_factory, write_sql_func, conf={}):
"""
Assert when running SQL text from write_sql_func on both the CPU and the GPU and reading
both resulting tables on the CPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_table = table_name_factory.get()
cpu_start = time.time()
def do_write(spark, table_name):
sql_texts = write_sql_func(spark, table_name)
sql_text_list = [sql_texts] if isinstance(sql_texts, str) else sql_texts
for sql_text in sql_text_list:
spark.sql(sql_text)
return None
with_cpu_session(lambda spark : do_write(spark, cpu_table), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
gpu_table = table_name_factory.get()
with_gpu_session(lambda spark : do_write(spark, gpu_table), conf=conf)
gpu_end = time.time()
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
mode = "COLLECT"
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: spark.sql("SELECT * FROM {}".format(cpu_table)), mode)
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: spark.sql("SELECT * FROM {}".format(gpu_table)), mode)
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_fallback_write(write_func,
read_func,
base_path,
cpu_fallback_class_name,
conf={}):
assert_gpu_fallback_write(write_func,
read_func,
base_path,
[cpu_fallback_class_name], # make a single item list
conf)
# Similar to
def assert_gpu_fallback_write(write_func,
read_func,
base_path,
cpu_fallback_class_name_list,
conf={}):
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
cpu_path = base_path + '/CPU'
with_cpu_session(lambda spark : write_func(spark, cpu_path), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.startCapture()
gpu_start = time.time()
gpu_path = base_path + '/GPU'
with_gpu_session(lambda spark : write_func(spark, gpu_path), conf=conf)
gpu_end = time.time()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertCapturedAndGpuFellBack(cpu_fallback_class_name_list, 10000)
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, cpu_path), 'COLLECT')
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, gpu_path), 'COLLECT')
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_cpu_and_gpu_are_equal_collect_with_capture(func,
exist_classes='',
non_exist_classes='',
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, 'COLLECT_WITH_DATAFRAME')
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu, cpu_df = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
from_gpu, gpu_df = with_gpu_session(bring_back, conf=conf)
gpu_end = time.time()
jvm = spark_jvm()
if exist_classes:
for clz in exist_classes.split(','):
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertContains(gpu_df._jdf, clz)
if non_exist_classes:
for clz in non_exist_classes.split(','):
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertNotContain(gpu_df._jdf, clz)
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_cpu_and_gpu_are_equal_sql_with_capture(df_fun,
sql,
table_name,
exist_classes='',
non_exist_classes='',
conf=None,
debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_cpu_and_gpu_are_equal_collect_with_capture(do_it_all, exist_classes, non_exist_classes, conf)
def assert_gpu_fallback_collect(func,
cpu_fallback_class_name,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, 'COLLECT_WITH_DATAFRAME')
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu, cpu_df = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
from_gpu, gpu_df = with_gpu_session(bring_back, conf=conf)
gpu_end = time.time()
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertDidFallBack(gpu_df._jdf, cpu_fallback_class_name)
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_sql_fallback_collect(df_fun, cpu_fallback_class_name, table_name, sql, conf=None, debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_fallback_collect(do_it_all, cpu_fallback_class_name, conf)
def _assert_gpu_and_cpu_are_equal(func,
mode,
conf={},
is_cpu_first=True):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
def run_on_gpu():
print('### GPU RUN ###')
global gpu_start
gpu_start = time.time()
global from_gpu
from_gpu = with_gpu_session(bring_back, conf=conf)
global gpu_end
gpu_end = time.time()
if is_cpu_first:
run_on_cpu()
run_on_gpu()
else:
run_on_gpu()
run_on_cpu()
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def run_with_cpu(func,
mode,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
print("run_with_cpu")
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
run_on_cpu()
print('### {}: CPU TOOK {} ###'.format(collect_type,
cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
return from_cpu
def run_with_cpu_and_gpu(func,
mode,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
def run_on_gpu():
print('### GPU RUN ###')
global gpu_start
gpu_start = time.time()
global from_gpu
from_gpu = with_gpu_session(bring_back, conf=conf)
global gpu_end
gpu_end = time.time()
run_on_cpu()
run_on_gpu()
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
return (from_cpu, from_gpu)
def assert_gpu_and_cpu_are_equal_collect(func, conf={}, is_cpu_first=True):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_are_equal(func, 'COLLECT', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_are_equal_iterator(func, conf={}, is_cpu_first=True):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_are_equal(func, 'ITERATOR', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_row_counts_equal(func, conf={}, is_cpu_first=True):
"""
Assert that the row counts from running the func are the same on both the CPU and GPU.
This function runs count() to only get the number of rows and compares that count
between the CPU and GPU. It does NOT compare any underlying data.
"""
_assert_gpu_and_cpu_are_equal(func, 'COUNT', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_are_equal_sql(df_fun, table_name, sql, conf=None, debug=False, is_cpu_first=True, validate_execs_in_gpu_plan=[]):
"""
Assert that the specified SQL query produces equal results on CPU and GPU.
:param df_fun: a function that will create the dataframe
:param table_name: Name of table to be created with the dataframe
:param sql: SQL query to be run on the specified table
:param conf: Any user-specified confs. Empty by default.
:param debug: Boolean to indicate if the SQL output should be printed
:param is_cpu_first: Boolean to indicate if the CPU should be run first or not
:param validate_execs_in_gpu_plan: String list of expressions to be validated in the GPU plan.
:return: Assertion failure, if results from CPU and GPU do not match.
"""
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
# we hold off on setting the validate execs until after creating the temp view
spark.conf.set('spark.rapids.sql.test.validateExecsInGpuPlan', ','.join(validate_execs_in_gpu_plan))
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(do_it_all, conf, is_cpu_first=is_cpu_first)
def assert_spark_exception(func, error_message):
"""
Assert that a specific Java exception is thrown
:param func: a function to be verified
:param error_message: a string such as the one produce by java.lang.Exception.toString
:return: Assertion failure if no exception matching error_message has occurred.
"""
with pytest.raises(Exception) as excinfo:
func()
actual_error = excinfo.exconly()
assert error_message in actual_error, f"Expected error '{error_message}' did not appear in '{actual_error}'"
def assert_gpu_and_cpu_error(df_fun, conf, error_message):
"""
Assert that GPU and CPU execution results in a specific Java exception thrown
:param df_fun: a function to be verified
:param conf: Spark config
:param error_message: a string such as the one produce by java.lang.Exception.toString
:return: Assertion failure if either GPU or CPU versions has not generated error messages
expected
"""
assert_spark_exception(lambda: with_cpu_session(df_fun, conf), error_message)
assert_spark_exception(lambda: with_gpu_session(df_fun, conf), error_message)
def with_cpu_sql(df_fun, table_name, sql, conf=None, debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(do_it_all, conf, is_cpu_first=is_cpu_first)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/asserts.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from asserts import assert_cpu_and_gpu_are_equal_collect_with_capture, assert_cpu_and_gpu_are_equal_sql_with_capture, assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal, \
assert_gpu_fallback_collect, assert_gpu_and_cpu_are_equal_sql, assert_gpu_and_cpu_error, assert_spark_exception
from data_gen import *
from marks import *
import pyarrow as pa
import pyarrow.parquet as pa_pq
from pyspark.sql.types import *
from pyspark.sql.functions import *
from spark_init_internal import spark_version
from spark_session import with_cpu_session, with_gpu_session, is_before_spark_320, is_before_spark_330, is_spark_321cdh
from conftest import is_databricks_runtime, is_dataproc_runtime
def read_parquet_df(data_path):
return lambda spark : spark.read.parquet(data_path)
def read_parquet_sql(data_path):
return lambda spark : spark.sql('select * from parquet.`{}`'.format(data_path))
rebase_write_corrected_conf = {
'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'CORRECTED',
'spark.sql.legacy.parquet.int96RebaseModeInWrite': 'CORRECTED'
}
rebase_write_legacy_conf = {
'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'LEGACY',
'spark.sql.legacy.parquet.int96RebaseModeInWrite': 'LEGACY'
}
# Like the standard map_gens_sample but with timestamps limited
parquet_map_gens = [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DateGen,
lambda nullable=True: TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc), nullable=nullable)]] +\
[simple_string_to_string_map_gen,
MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(string_gen), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen)]
parquet_gens_list = [[byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen,
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc)), ArrayGen(byte_gen),
ArrayGen(long_gen), ArrayGen(string_gen), ArrayGen(date_gen),
ArrayGen(TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))),
ArrayGen(decimal_gen_64bit),
ArrayGen(ArrayGen(byte_gen)),
StructGen([['child0', ArrayGen(byte_gen)], ['child1', byte_gen], ['child2', float_gen], ['child3', decimal_gen_64bit]]),
ArrayGen(StructGen([['child0', string_gen], ['child1', double_gen], ['child2', int_gen]]))] +
parquet_map_gens + decimal_gens,
pytest.param([timestamp_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/132'))]
# test with original parquet file reader, the multi-file parallel reader for cloud, and coalesce file reader for
# non-cloud
original_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'PERFILE'}
multithreaded_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '0',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': True}
coalesce_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'COALESCING'}
coalesce_parquet_file_reader_multithread_filter_chunked_conf = {'spark.rapids.sql.format.parquet.reader.type': 'COALESCING',
'spark.rapids.sql.coalescing.reader.numFilterParallel': '2',
'spark.rapids.sql.reader.chunked': True}
coalesce_parquet_file_reader_multithread_filter_conf = {'spark.rapids.sql.format.parquet.reader.type': 'COALESCING',
'spark.rapids.sql.coalescing.reader.numFilterParallel': '2',
'spark.rapids.sql.reader.chunked': False}
native_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'PERFILE',
'spark.rapids.sql.format.parquet.reader.footer.type': 'NATIVE'}
native_multithreaded_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED',
'spark.rapids.sql.format.parquet.reader.footer.type': 'NATIVE',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '0',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': True}
native_coalesce_parquet_file_reader_conf = {'spark.rapids.sql.format.parquet.reader.type': 'COALESCING',
'spark.rapids.sql.format.parquet.reader.footer.type': 'NATIVE'}
native_coalesce_parquet_file_reader_chunked_conf = {'spark.rapids.sql.format.parquet.reader.type': 'COALESCING',
'spark.rapids.sql.format.parquet.reader.footer.type': 'NATIVE',
'spark.rapids.sql.reader.chunked': True}
combining_multithreaded_parquet_file_reader_conf_ordered = {'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': True}
combining_multithreaded_parquet_file_reader_conf_unordered = pytest.param({'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': False}, marks=pytest.mark.ignore_order(local=True))
combining_multithreaded_parquet_file_reader_deprecated_conf_ordered = {
'spark.rapids.sql.format.parquet.reader.type': 'MULTITHREADED',
'spark.rapids.sql.format.parquet.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.format.parquet.multithreaded.read.keepOrder': True}
# For now the native configs are not compatible with spark.sql.parquet.writeLegacyFormat written files
# for nested types
reader_opt_confs_native = [native_parquet_file_reader_conf, native_multithreaded_parquet_file_reader_conf,
native_coalesce_parquet_file_reader_conf,
coalesce_parquet_file_reader_multithread_filter_chunked_conf,
native_coalesce_parquet_file_reader_chunked_conf]
reader_opt_confs_no_native = [original_parquet_file_reader_conf, multithreaded_parquet_file_reader_conf,
coalesce_parquet_file_reader_conf, coalesce_parquet_file_reader_multithread_filter_conf,
combining_multithreaded_parquet_file_reader_conf_ordered,
combining_multithreaded_parquet_file_reader_deprecated_conf_ordered]
reader_opt_confs = reader_opt_confs_native + reader_opt_confs_no_native
@pytest.mark.parametrize('parquet_gens', [[byte_gen, short_gen, int_gen, long_gen]], ids=idfn)
@pytest.mark.parametrize('read_func', [read_parquet_df])
@pytest.mark.parametrize('reader_confs', [coalesce_parquet_file_reader_multithread_filter_conf,
coalesce_parquet_file_reader_multithread_filter_chunked_conf])
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_coalescing_multiple_files(spark_tmp_path, parquet_gens, read_func, reader_confs, v1_enabled_list):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
# high number of slices so that a single task reads more than 1 file
lambda spark : gen_df(spark, gen_list, num_slices=30).write.parquet(data_path),
conf=rebase_write_corrected_conf)
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# once https://github.com/NVIDIA/spark-rapids/issues/1126 is in we can remove spark.sql.legacy.parquet.datetimeRebaseModeInRead config which is a workaround
# for nested timestamp/date support
assert_gpu_and_cpu_are_equal_collect(read_func(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_avoid_coalesce_incompatible_files(spark_tmp_path, v1_enabled_list):
data_path = spark_tmp_path + '/PARQUET_DATA'
def setup_table(spark):
df1 = spark.createDataFrame([(("a", "b"),)], "x: struct<y: string, z: string>")
df1.write.parquet(data_path + "/data1")
df2 = spark.createDataFrame([(("a",),)], "x: struct<z: string>")
df2.write.parquet(data_path + "/data2")
with_cpu_session(setup_table, conf=rebase_write_corrected_conf)
# Configure confs to read as a single task
all_confs = copy_and_update(coalesce_parquet_file_reader_multithread_filter_conf, {
"spark.sql.sources.useV1SourceList": v1_enabled_list,
"spark.sql.files.minPartitionNum": "1"})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read\
.schema("x STRUCT<y: string, z: string>")\
.option("recursiveFileLookup", "true").parquet(data_path),
conf=all_confs)
@pytest.mark.parametrize('parquet_gens', parquet_gens_list, ids=idfn)
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_round_trip(spark_tmp_path, parquet_gens, read_func, reader_confs, v1_enabled_list):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(data_path),
conf=rebase_write_corrected_conf)
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# once https://github.com/NVIDIA/spark-rapids/issues/1126 is in we can remove spark.sql.legacy.parquet.datetimeRebaseModeInRead config which is a workaround
# for nested timestamp/date support
assert_gpu_and_cpu_are_equal_collect(read_func(data_path),
conf=all_confs)
@allow_non_gpu('FileSourceScanExec')
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.parquet.enabled', 'spark.rapids.sql.format.parquet.read.enabled'])
def test_parquet_fallback(spark_tmp_path, read_func, disable_conf):
data_gens = [string_gen,
byte_gen, short_gen, int_gen, long_gen, boolean_gen] + decimal_gens
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/PARQUET_DATA'
reader = read_func(data_path)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.parquet(data_path))
assert_gpu_fallback_collect(
lambda spark : reader(spark).select(f.col('*'), f.col('_c2') + f.col('_c3')),
'FileSourceScanExec',
conf={disable_conf: 'false',
"spark.sql.sources.useV1SourceList": "parquet"})
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('binary_as_string', [True, False])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
def test_parquet_read_round_trip_binary(std_input_path, read_func, binary_as_string, reader_confs):
data_path = std_input_path + '/binary_as_string.parquet'
all_confs = copy_and_update(reader_confs, {
'spark.sql.parquet.binaryAsString': binary_as_string,
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# once https://github.com/NVIDIA/spark-rapids/issues/1126 is in we can remove spark.sql.legacy.parquet.datetimeRebaseModeInRead config which is a workaround
# for nested timestamp/date support
assert_gpu_and_cpu_are_equal_collect(read_func(data_path),
conf=all_confs)
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('binary_as_string', [True, False])
@pytest.mark.parametrize('data_gen', [binary_gen,
ArrayGen(binary_gen),
StructGen([('a_1', binary_gen), ('a_2', string_gen)]),
StructGen([('a_1', ArrayGen(binary_gen))]),
MapGen(ByteGen(nullable=False), binary_gen)], ids=idfn)
def test_binary_df_read(spark_tmp_path, binary_as_string, read_func, data_gen):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(lambda spark: unary_op_df(spark, data_gen).write.parquet(data_path))
all_confs = {
'spark.sql.parquet.binaryAsString': binary_as_string,
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead': 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'}
assert_gpu_and_cpu_are_equal_collect(read_func(data_path), conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_forced_binary_schema(std_input_path, v1_enabled_list):
data_path = std_input_path + '/binary_as_string.parquet'
all_confs = copy_and_update(reader_opt_confs[0], {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# once https://github.com/NVIDIA/spark-rapids/issues/1126 is in we can remove spark.sql.legacy.parquet.datetimeRebaseModeInRead config which is a workaround
# for nested timestamp/date support
# This forces a Binary Column to a String Column and a String Column to a Binary Column.
schema = StructType([StructField("a", LongType()), StructField("b", StringType()), StructField("c", BinaryType())])
assert_gpu_and_cpu_are_equal_collect(lambda spark : spark.read.schema(schema).parquet(data_path),
conf=all_confs)
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_round_trip_binary_as_string(std_input_path, read_func, reader_confs, v1_enabled_list):
data_path = std_input_path + '/binary_as_string.parquet'
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.parquet.binaryAsString': 'true',
# set the int96 rebase mode values because its LEGACY in databricks which will preclude this op from running on GPU
'spark.sql.legacy.parquet.int96RebaseModeInRead' : 'CORRECTED',
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# once https://github.com/NVIDIA/spark-rapids/issues/1126 is in we can remove spark.sql.legacy.parquet.datetimeRebaseModeInRead config which is a workaround
# for nested timestamp/date support
assert_gpu_and_cpu_are_equal_collect(read_func(data_path),
conf=all_confs)
parquet_compress_options = ['none', 'uncompressed', 'snappy', 'gzip']
# zstd is available in spark 3.2.0 and later.
if not is_before_spark_320():
parquet_compress_options.append('zstd')
# The following need extra jars 'lzo', 'lz4', 'brotli', 'zstd'
# https://github.com/NVIDIA/spark-rapids/issues/143
@pytest.mark.parametrize('compress', parquet_compress_options)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_compress_read_round_trip(spark_tmp_path, compress, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : binary_op_df(spark, long_gen).write.parquet(data_path),
conf={'spark.sql.parquet.compression.codec': compress})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
parquet_pred_push_gens = [
byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, boolean_gen,
string_gen, date_gen,
# Once https://github.com/NVIDIA/spark-rapids/issues/132 is fixed replace this with
# timestamp_gen
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))] + decimal_gens
@pytest.mark.parametrize('parquet_gen', parquet_pred_push_gens, ids=idfn)
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_pred_push_round_trip(spark_tmp_path, parquet_gen, read_func, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('a', RepeatSeqGen(parquet_gen, 100)), ('b', parquet_gen)]
s0 = gen_scalar(parquet_gen, force_no_nulls=True)
with_cpu_session(
lambda spark : gen_df(spark, gen_list).orderBy('a').write.parquet(data_path),
conf=rebase_write_corrected_conf)
rf = read_func(data_path)
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: rf(spark).select(f.col('a') >= s0),
conf=all_confs)
parquet_ts_write_options = ['INT96', 'TIMESTAMP_MICROS', 'TIMESTAMP_MILLIS']
# Once https://github.com/NVIDIA/spark-rapids/issues/1126 is fixed delete this test and merge it
# into test_ts_read_round_trip nested timestamps and dates are not supported right now.
@pytest.mark.parametrize('gen', [ArrayGen(TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))),
ArrayGen(ArrayGen(TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))))], ids=idfn)
@pytest.mark.parametrize('ts_write', parquet_ts_write_options)
@pytest.mark.parametrize('ts_rebase', ['CORRECTED', 'LEGACY'])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/1126')
def test_parquet_ts_read_round_trip_nested(gen, spark_tmp_path, ts_write, ts_rebase, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : unary_op_df(spark, gen).write.parquet(data_path),
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase,
'spark.sql.parquet.outputTimestampType': ts_write})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
# Once https://github.com/NVIDIA/spark-rapids/issues/132 is fixed replace this with
# timestamp_gen
@pytest.mark.parametrize('gen', [TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))], ids=idfn)
@pytest.mark.parametrize('ts_write', parquet_ts_write_options)
@pytest.mark.parametrize('ts_rebase', ['CORRECTED', 'LEGACY'])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_ts_read_round_trip(gen, spark_tmp_path, ts_write, ts_rebase, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : unary_op_df(spark, gen).write.parquet(data_path),
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase,
'spark.sql.parquet.outputTimestampType': ts_write})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
def readParquetCatchException(spark, data_path):
with pytest.raises(Exception) as e_info:
df = spark.read.parquet(data_path).collect()
assert e_info.match(r".*SparkUpgradeException.*")
# Once https://github.com/NVIDIA/spark-rapids/issues/1126 is fixed nested timestamps and dates should be added in
# Once https://github.com/NVIDIA/spark-rapids/issues/132 is fixed replace this with
# timestamp_gen
@pytest.mark.parametrize('gen', [TimestampGen(start=datetime(1590, 1, 1, tzinfo=timezone.utc))], ids=idfn)
@pytest.mark.parametrize('ts_write', parquet_ts_write_options)
@pytest.mark.parametrize('ts_rebase', ['LEGACY'])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_ts_read_fails_datetime_legacy(gen, spark_tmp_path, ts_write, ts_rebase, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : unary_op_df(spark, gen).write.parquet(data_path),
conf={'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': ts_rebase,
'spark.sql.legacy.parquet.int96RebaseModeInWrite': ts_rebase,
'spark.sql.parquet.outputTimestampType': ts_write})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
with_gpu_session(
lambda spark : readParquetCatchException(spark, data_path),
conf=all_confs)
@pytest.mark.parametrize('parquet_gens', [[byte_gen, short_gen, decimal_gen_32bit], decimal_gens,
[ArrayGen(decimal_gen_32bit, max_length=10)],
[StructGen([['child0', decimal_gen_32bit]])]], ids=idfn)
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_decimal_read_legacy(spark_tmp_path, parquet_gens, read_func, reader_confs, v1_enabled_list):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(data_path),
conf={'spark.sql.parquet.writeLegacyFormat': 'true'})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(read_func(data_path), conf=all_confs)
parquet_gens_legacy_list = [[byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))] + decimal_gens,
pytest.param([timestamp_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/133')),
pytest.param([date_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/133'))]
@pytest.mark.parametrize('parquet_gens', parquet_gens_legacy_list, ids=idfn)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_round_trip_legacy(spark_tmp_path, parquet_gens, v1_enabled_list, reader_confs):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(data_path),
conf=rebase_write_legacy_conf)
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('batch_size', [100, INT_MAX])
def test_parquet_simple_partitioned_read(spark_tmp_path, v1_enabled_list, reader_confs, batch_size):
# Once https://github.com/NVIDIA/spark-rapids/issues/133 and https://github.com/NVIDIA/spark-rapids/issues/132 are fixed
# we should go with a more standard set of generators
parquet_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))] + decimal_gens
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0/key2=20'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(first_data_path),
conf=rebase_write_legacy_conf)
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1/key2=21'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(second_data_path),
conf=rebase_write_corrected_conf)
third_data_path = spark_tmp_path + '/PARQUET_DATA/key=2/key2=22'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(third_data_path),
conf=rebase_write_corrected_conf)
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs,
{'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.rapids.sql.batchSizeBytes': batch_size})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
# In this we are reading the data, but only reading the key the data was partitioned by
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_partitioned_read_just_partitions(spark_tmp_path, v1_enabled_list, reader_confs):
parquet_gens = [byte_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(first_data_path),
conf=rebase_write_legacy_conf)
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.parquet(second_data_path),
conf=rebase_write_corrected_conf)
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).select("key"),
conf=all_confs)
reader_opt_confs_with_unordered = reader_opt_confs + [combining_multithreaded_parquet_file_reader_conf_unordered]
@pytest.mark.parametrize('reader_confs', reader_opt_confs_with_unordered)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_schema_missing_cols(spark_tmp_path, v1_enabled_list, reader_confs):
# Once https://github.com/NVIDIA/spark-rapids/issues/133 and https://github.com/NVIDIA/spark-rapids/issues/132 are fixed
# we should go with a more standard set of generators
parquet_gens = [byte_gen, short_gen, int_gen, long_gen]
first_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, first_gen_list, 10).write.parquet(first_data_path))
# generate with 1 column less
second_parquet_gens = [byte_gen, short_gen, int_gen]
second_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(second_parquet_gens)]
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, second_gen_list, 10).write.parquet(second_data_path))
# third with same as first
third_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
third_data_path = spark_tmp_path + '/PARQUET_DATA/key=2'
with_cpu_session(
lambda spark : gen_df(spark, third_gen_list, 10).write.parquet(third_data_path))
# fourth with same as second
fourth_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(second_parquet_gens)]
fourth_data_path = spark_tmp_path + '/PARQUET_DATA/key=3'
with_cpu_session(
lambda spark : gen_df(spark, fourth_gen_list, 10).write.parquet(fourth_data_path))
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.files.maxPartitionBytes': '1g',
'spark.sql.files.minPartitionNum': '1'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
# To test https://github.com/NVIDIA/spark-rapids/pull/7405. Without the fix in that issue this test
# throws an exception about can't allocate negative amount. To make this problem happen, we
# read a bunch of empty parquet blocks by filtering on only things in the first and last of 1000 files.
@pytest.mark.parametrize('reader_confs', [combining_multithreaded_parquet_file_reader_conf_ordered])
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_buffer_allocation_empty_blocks(spark_tmp_path, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA/'
with_cpu_session(
lambda spark : spark.range(0, 1000, 1, 1000).write.parquet(data_path))
# we want all the files to be read by a single Spark task
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.files.maxPartitionBytes': '2g',
'spark.sql.files.minPartitionNum': '1',
'spark.sql.openCostInBytes': '1'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).filter("id < 2 or id > 990"),
conf=all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.skipif(is_databricks_runtime(), reason="https://github.com/NVIDIA/spark-rapids/issues/7733")
def test_parquet_read_ignore_missing(spark_tmp_path, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA/'
data_path_tmp = spark_tmp_path + '/PARQUET_DATA_TMP/'
# we need to create the files, get the dataframe but remove the file before we
# actually read the file contents. Here we save the data into a second directory
# so that when CPU runs, it can remove the file and then put the data back to run
# on the GPU.
def setup_data(spark):
df = spark.range(0, 1000, 1, 2).write.parquet(data_path)
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
src_path = sc._jvm.org.apache.hadoop.fs.Path(data_path)
dst_path = sc._jvm.org.apache.hadoop.fs.Path(data_path_tmp)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
sc._jvm.org.apache.hadoop.fs.FileUtil.copy(fs, src_path, fs, dst_path, False, config)
df
with_cpu_session(lambda spark : setup_data(spark))
file_deleted = ""
def read_and_remove(spark):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(data_path_tmp)
src_path = sc._jvm.org.apache.hadoop.fs.Path(data_path)
dst_path = sc._jvm.org.apache.hadoop.fs.Path(data_path_tmp)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
fs.delete(src_path)
sc._jvm.org.apache.hadoop.fs.FileUtil.copy(fs, dst_path, fs, src_path, False, config)
# input_file_name doesn't use combine so get the input file names in a different dataframe
# that we ultimately don't return
df = spark.read.parquet(data_path)
df_with_file_names = df.withColumn("input_file", input_file_name())
distinct_file_names = df_with_file_names.select("input_file").distinct().sort("input_file")
num_files = distinct_file_names.count()
assert(num_files == 2)
files_to_read=[]
for i in range(0, 2):
files_to_read.insert(i, distinct_file_names.collect()[i][0])
df_to_test = spark.read.parquet(files_to_read[0], files_to_read[1])
# we do our best to try to remove the one Spark will read first but its not
# guaranteed
file_to_delete = files_to_read[1]
path_to_delete = sc._jvm.org.apache.hadoop.fs.Path(file_to_delete)
fs.delete(path_to_delete)
df_with_file_names_after = df.withColumn("input_file", input_file_name())
distinct_file_names_after = df_with_file_names_after.select("input_file").distinct()
num_files_after_delete = distinct_file_names_after.count()
assert(num_files_after_delete == 1)
return df_to_test
# we want all the files to be read by a single Spark task
all_confs = copy_and_update(reader_confs, {
'spark.sql.files.ignoreMissingFiles': 'true',
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.files.maxPartitionBytes': '2g',
'spark.sql.files.minPartitionNum': '1',
'spark.sql.openCostInBytes': '1'})
assert_gpu_and_cpu_row_counts_equal(
lambda spark : read_and_remove(spark),
conf=all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_merge_schema(spark_tmp_path, v1_enabled_list, reader_confs):
# Once https://github.com/NVIDIA/spark-rapids/issues/133 and https://github.com/NVIDIA/spark-rapids/issues/132 are fixed
# we should go with a more standard set of generators
parquet_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))] + decimal_gens
first_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, first_gen_list).write.parquet(first_data_path),
conf=rebase_write_legacy_conf)
second_gen_list = [(('_c' if i % 2 == 0 else '_b') + str(i), gen) for i, gen in enumerate(parquet_gens)]
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, second_gen_list).write.parquet(second_data_path),
conf=rebase_write_corrected_conf)
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.option('mergeSchema', 'true').parquet(data_path),
conf=all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_read_merge_schema_from_conf(spark_tmp_path, v1_enabled_list, reader_confs):
# Once https://github.com/NVIDIA/spark-rapids/issues/133 and https://github.com/NVIDIA/spark-rapids/issues/132 are fixed
# we should go with a more standard set of generators
parquet_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
TimestampGen(start=datetime(1900, 1, 1, tzinfo=timezone.utc))] + decimal_gens
first_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, first_gen_list).write.parquet(first_data_path),
conf=rebase_write_legacy_conf)
second_gen_list = [(('_c' if i % 2 == 0 else '_b') + str(i), gen) for i, gen in enumerate(parquet_gens)]
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, second_gen_list).write.parquet(second_data_path),
conf=rebase_write_corrected_conf)
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.parquet.mergeSchema': 'true'})
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_read_parquet_with_empty_clipped_schema(spark_tmp_path, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark: gen_df(spark, [('a', int_gen)], length=100).write.parquet(data_path))
schema = StructType([StructField('b', IntegerType()), StructField('c', StringType())])
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema).parquet(data_path), conf=all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_input_meta(spark_tmp_path, v1_enabled_list, reader_confs):
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.parquet(first_data_path))
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.parquet(second_data_path))
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.rapids.sql.format.parquet.multithreaded.read.keepOrder': 'true'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
@allow_non_gpu('ProjectExec', 'Alias', 'InputFileName', 'InputFileBlockStart', 'InputFileBlockLength',
'FilterExec', 'And', 'IsNotNull', 'GreaterThan', 'Literal',
'FileSourceScanExec', 'ColumnarToRowExec',
'BatchScanExec', 'ParquetScan')
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.parquet.enabled', 'spark.rapids.sql.format.parquet.read.enabled'])
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_input_meta_fallback(spark_tmp_path, v1_enabled_list, reader_confs, disable_conf):
first_data_path = spark_tmp_path + '/PARQUET_DATA/key=0'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.parquet(first_data_path))
second_data_path = spark_tmp_path + '/PARQUET_DATA/key=1'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.parquet(second_data_path))
data_path = spark_tmp_path + '/PARQUET_DATA'
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
disable_conf: 'false'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
def createBucketedTableAndJoin(spark, tbl_1, tbl_2):
spark.range(10e4).write.bucketBy(4, "id").sortBy("id").mode('overwrite').saveAsTable(tbl_1)
spark.range(10e6).write.bucketBy(4, "id").sortBy("id").mode('overwrite').saveAsTable(tbl_2)
bucketed_4_10e4 = spark.table(tbl_1)
bucketed_4_10e6 = spark.table(tbl_2)
return bucketed_4_10e4.join(bucketed_4_10e6, "id")
@ignore_order
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
# this test would be better if we could ensure exchanges didn't exist - ie used buckets
def test_buckets(spark_tmp_path, v1_enabled_list, reader_confs, spark_tmp_table_factory):
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.autoBroadcastJoinThreshold': '-1'})
def do_it(spark):
return createBucketedTableAndJoin(spark, spark_tmp_table_factory.get(),
spark_tmp_table_factory.get())
assert_gpu_and_cpu_are_equal_collect(do_it, conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_small_file_memory(spark_tmp_path, v1_enabled_list):
# stress the memory usage by creating a lot of small files.
# The more files we combine the more the offsets will be different which will cause
# footer size to change.
# Without the addition of extraMemory in GpuParquetScan this would cause reallocations
# of the host memory buffers.
cols = [string_gen] * 4
gen_list = [('_c' + str(i), gen ) for i, gen in enumerate(cols)]
first_data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).repartition(2000).write.parquet(first_data_path),
conf=rebase_write_corrected_conf)
data_path = spark_tmp_path + '/PARQUET_DATA'
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path),
conf={'spark.rapids.sql.format.parquet.reader.type': 'COALESCING',
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.files.maxPartitionBytes': "1g"})
_nested_pruning_schemas = [
([["a", StructGen([["c_1", StringGen()], ["c_2", LongGen()], ["c_3", ShortGen()]])]],
[["a", StructGen([["c_1", StringGen()]])]]),
([["a", StructGen([["c_1", StringGen()], ["c_2", LongGen()], ["c_3", ShortGen()]])]],
[["a", StructGen([["c_2", LongGen()]])]]),
([["a", StructGen([["c_1", StringGen()], ["c_2", LongGen()], ["c_3", ShortGen()]])]],
[["a", StructGen([["c_3", ShortGen()]])]]),
([["a", StructGen([["c_1", StringGen()], ["c_2", LongGen()], ["c_3", ShortGen()]])]],
[["a", StructGen([["c_1", StringGen()], ["c_3", ShortGen()]])]]),
([["a", StructGen([["c_1", StringGen()], ["c_2", LongGen()], ["c_3", ShortGen()]])]],
[["a", StructGen([["c_3", ShortGen()], ["c_2", LongGen()], ["c_1", StringGen()]])]]),
([["ar", ArrayGen(StructGen([["str_1", StringGen()],["str_2", StringGen()]]))]],
[["ar", ArrayGen(StructGen([["str_2", StringGen()]]))]]),
([["struct", StructGen([["c_1", StringGen()], ["case_insensitive", LongGen()], ["c_3", ShortGen()]])]],
[["STRUCT", StructGen([["case_INSENsitive", LongGen()]])]]),
([["struct", StructGen([["c_1", StringGen()], ["case_insensitive", LongGen()], ["c_3", ShortGen()]])]],
[["struct", StructGen([["CASE_INSENSITIVE", LongGen()]])]]),
([["struct", StructGen([["c_1", StringGen()], ["case_insensitive", LongGen()], ["c_3", ShortGen()]])]],
[["stRUct", StructGen([["CASE_INSENSITIVE", LongGen()]])]]),
]
# TODO CHECK FOR DECIMAL??
@pytest.mark.parametrize('data_gen,read_schema', _nested_pruning_schemas, ids=idfn)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('nested_enabled', ["true", "false"])
def test_nested_pruning_and_case_insensitive(spark_tmp_path, data_gen, read_schema, reader_confs, v1_enabled_list, nested_enabled):
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(
lambda spark : gen_df(spark, data_gen).write.parquet(data_path),
conf=rebase_write_corrected_conf)
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.optimizer.nestedSchemaPruning.enabled': nested_enabled,
'spark.sql.legacy.parquet.datetimeRebaseModeInRead': 'CORRECTED'})
# This is a hack to get the type in a slightly less verbose way
rs = StructGen(read_schema, nullable=False).data_type
assert_gpu_and_cpu_are_equal_collect(lambda spark : spark.read.schema(rs).parquet(data_path),
conf=all_confs)
def test_spark_32639(std_input_path):
data_path = "%s/SPARK-32639/000.snappy.parquet" % (std_input_path)
schema_str = 'value MAP<STRUCT<first:STRING, middle:STRING, last:STRING>, STRING>'
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema_str).parquet(data_path),
conf=original_parquet_file_reader_conf)
@pytest.mark.skipif(not is_before_spark_320(), reason='Spark 3.1.x does not need special handling')
@pytest.mark.skipif(is_dataproc_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/8074')
def test_parquet_read_nano_as_longs_31x(std_input_path):
data_path = "%s/timestamp-nanos.parquet" % (std_input_path)
# we correctly return timestamp_micros when running against Spark 3.1.x
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path))
@pytest.mark.skipif(is_before_spark_320(), reason='Spark 3.1.x supports reading timestamps in nanos')
def test_parquet_read_nano_as_longs_false(std_input_path):
data_path = "%s/timestamp-nanos.parquet" % (std_input_path)
conf = copy_and_update(original_parquet_file_reader_conf, {
'spark.sql.legacy.parquet.nanosAsLong': False })
def read_timestamp_nano_parquet(spark):
spark.read.parquet(data_path).collect()
assert_gpu_and_cpu_error(
read_timestamp_nano_parquet,
conf,
error_message="Illegal Parquet type: INT64 (TIMESTAMP(NANOS,true))")
@pytest.mark.skipif(is_before_spark_320(), reason='Spark 3.1.x supports reading timestamps in nanos')
def test_parquet_read_nano_as_longs_not_configured(std_input_path):
data_path = "%s/timestamp-nanos.parquet" % (std_input_path)
def read_timestamp_nano_parquet(spark):
spark.read.parquet(data_path).collect()
assert_gpu_and_cpu_error(
read_timestamp_nano_parquet,
conf=original_parquet_file_reader_conf,
error_message="Illegal Parquet type: INT64 (TIMESTAMP(NANOS,true))")
@pytest.mark.skipif(is_before_spark_320(), reason='Spark 3.1.x supports reading timestamps in nanos')
@pytest.mark.skipif(spark_version() >= '3.2.0' and spark_version() < '3.2.4', reason='New config added in 3.2.4')
@pytest.mark.skipif(spark_version() >= '3.3.0' and spark_version() < '3.3.2', reason='New config added in 3.3.2')
@pytest.mark.skipif(is_databricks_runtime() and spark_version() == '3.3.2', reason='Config not in DB 12.2')
@allow_non_gpu('FileSourceScanExec, ColumnarToRowExec')
def test_parquet_read_nano_as_longs_true(std_input_path):
data_path = "%s/timestamp-nanos.parquet" % (std_input_path)
conf = copy_and_update(original_parquet_file_reader_conf, {
'spark.sql.legacy.parquet.nanosAsLong': True })
assert_gpu_fallback_collect(
lambda spark: spark.read.parquet(data_path),
'FileSourceScanExec',
conf=conf)
def test_many_column_project():
def _create_wide_data_frame(spark, num_cols):
schema_dict = {}
for i in range(num_cols):
schema_dict[f"c{i}"] = i
return spark.createDataFrame([Row(**r) for r in [schema_dict]])\
.withColumn('out', f.col('c1') * 100)
assert_gpu_and_cpu_are_equal_collect(
func=lambda spark: _create_wide_data_frame(spark, 1000),
is_cpu_first=False)
def setup_parquet_file_with_column_names(spark, table_name):
drop_query = "DROP TABLE IF EXISTS {}".format(table_name)
create_query = "CREATE TABLE `{}` (`a` INT, `b` ARRAY<INT>, `c` STRUCT<`c_1`: INT, `c_2`: STRING>) USING parquet"\
.format(table_name)
insert_query = "INSERT INTO {} VALUES(13, array(2020), named_struct('c_1', 1, 'c_2', 'hello'))".format(table_name)
spark.sql(drop_query).collect
spark.sql(create_query).collect
spark.sql(insert_query).collect
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_disorder_read_schema(spark_tmp_table_factory, reader_confs, v1_enabled_list):
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_parquet_file_with_column_names(spark, table_name))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT a,b FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c,a FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c,b FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT a,c,b FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT a,b,c FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT b,c,a FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT b,c,a FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c,a,b FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c,b,a FROM {}".format(table_name)),
all_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c.c_2,c.c_1,b,a FROM {}".format(table_name)),
all_confs)
# SPARK-34859 put in a fix for handling column indexes with vectorized parquet
# This is a version of those same tests to verify that we are parsing
# the data correctly.
# These tests really only matter for Spark 3.2.0 and above, but they should run
# on any version, but might not test the exact same thing.
# Based off of ParquetColumnIndexSuite.
# Timestamp generation was modified because the original tests were written
# that to cast a long to a a timestamp the long was stored in ms, but it is
# stored in seconds, which resulted in dates/timetamps past what python can handle
# We also modified decimal generation to be at most DECIMAL64 until we can support
# DECIMAL128
filters = ["_1 = 500",
"_1 = 500 or _1 = 1500",
"_1 = 500 or _1 = 501 or _1 = 1500",
"_1 = 500 or _1 = 501 or _1 = 1000 or _1 = 1500",
"_1 >= 500 and _1 < 1000",
"(_1 >= 500 and _1 < 1000) or (_1 >= 1500 and _1 < 1600)"]
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('enable_dictionary', ["true", "false"], ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_reading_from_unaligned_pages_basic_filters(spark_tmp_path, reader_confs, enable_dictionary, v1_enabled_list):
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
data_path = spark_tmp_path + '/PARQUET_UNALIGNED_DATA'
with_cpu_session(lambda spark : spark.range(0, 2000)\
.selectExpr("id as _1", "concat(id, ':', repeat('o', id DIV 100)) as _2")\
.coalesce(1)\
.write\
.option("parquet.page.size", "4096")
.option("parquet.enable.dictionary", enable_dictionary)
.parquet(data_path))
for filter_str in filters:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).filter(filter_str),
all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('enable_dictionary', ["true", "false"], ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_reading_from_unaligned_pages_all_types(spark_tmp_path, reader_confs, enable_dictionary, v1_enabled_list):
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
data_path = spark_tmp_path + '/PARQUET_UNALIGNED_DATA'
with_cpu_session(lambda spark : spark.range(0, 2000)\
.selectExpr("id as _1",
"cast(id as short) as _3",
"cast(id as int) as _4",
"cast(id as float) as _5",
"cast(id as double) as _6",
# DECIMAL128 IS NOT SUPPORTED YET "cast(id as decimal(20,0)) as _7",
"cast(id as decimal(10,0)) as _7",
"cast(id as decimal(30,0)) as _8",
"cast(cast(1618161925 + (id * 60 * 60 * 24) as timestamp) as date) as _9",
"cast(1618161925 + id as timestamp) as _10")\
.coalesce(1)\
.write\
.option("parquet.page.size", "4096")
.option("parquet.enable.dictionary", enable_dictionary)
.parquet(data_path))
for filter_str in filters:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).filter(filter_str),
all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('enable_dictionary', ["true", "false"], ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_reading_from_unaligned_pages_all_types_dict_optimized(spark_tmp_path, reader_confs, enable_dictionary, v1_enabled_list):
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
data_path = spark_tmp_path + '/PARQUET_UNALIGNED_DATA'
with_cpu_session(lambda spark : spark.range(0, 2000)\
.selectExpr("id as _1",
"cast(id % 10 as byte) as _2",
"cast(id % 10 as short) as _3",
"cast(id % 10 as int) as _4",
"cast(id % 10 as float) as _5",
"cast(id % 10 as double) as _6",
# DECIMAL128 IS NOT SUPPORTED YET "cast(id % 10 as decimal(20,0)) as _7",
"cast(id % 10 as decimal(10,0)) as _7",
"cast(id % 10 as decimal(20,0)) as _8",
"cast(id % 2 as boolean) as _9",
"cast(cast(1618161925 + ((id % 10) * 60 * 60 * 24) as timestamp) as date) as _10",
"cast(1618161925 + (id % 10) as timestamp) as _11")\
.coalesce(1)\
.write\
.option("parquet.page.size", "4096")
.option("parquet.enable.dictionary", enable_dictionary)
.parquet(data_path))
for filter_str in filters:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).filter(filter_str),
all_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('enable_dictionary', ["true", "false"], ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_reading_from_unaligned_pages_basic_filters_with_nulls(spark_tmp_path, reader_confs, enable_dictionary, v1_enabled_list):
# insert 50 null values in [400, 450) to verify that they are skipped during processing row
# range [500, 1000) against the second page of col_2 [400, 800)
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
data_path = spark_tmp_path + '/PARQUET_UNALIGNED_DATA'
with_cpu_session(lambda spark : spark.range(0, 2000)\
.selectExpr("id as _1", "IF(id >= 400 AND id < 450, null, concat(id, ':', repeat('o', id DIV 100))) as _2")\
.coalesce(1)\
.write\
.option("parquet.page.size", "4096")
.option("parquet.enable.dictionary", enable_dictionary)
.parquet(data_path))
for filter_str in filters:
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(data_path).filter(filter_str),
all_confs)
conf_for_parquet_aggregate_pushdown = {
"spark.sql.parquet.aggregatePushdown": "true",
"spark.sql.sources.useV1SourceList": ""
}
@pytest.mark.skipif(is_before_spark_330(), reason='Aggregate push down on Parquet is a new feature of Spark 330')
def test_parquet_scan_without_aggregation_pushdown_not_fallback(spark_tmp_path):
"""
No aggregation will be pushed down in this test, so we should not fallback to CPU
"""
data_path = spark_tmp_path + "/pushdown.parquet"
def do_parquet_scan(spark):
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").mode("overwrite").parquet(data_path)
df = spark.read.parquet(data_path).selectExpr("Max(p)")
return df
assert_gpu_and_cpu_are_equal_collect(
do_parquet_scan,
conf_for_parquet_aggregate_pushdown
)
@pytest.mark.skipif(is_before_spark_330(), reason='Aggregate push down on Parquet is a new feature of Spark 330')
@allow_non_gpu(any = True)
def test_parquet_scan_with_aggregation_pushdown_fallback(spark_tmp_path):
"""
The aggregation will be pushed down in this test, so we should fallback to CPU
"""
data_path = spark_tmp_path + "/pushdown.parquet"
def do_parquet_scan(spark):
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").mode("overwrite").parquet(data_path)
df = spark.read.parquet(data_path).selectExpr("count(p)")
return df
assert_cpu_and_gpu_are_equal_collect_with_capture(
do_parquet_scan,
exist_classes= "BatchScanExec",
non_exist_classes= "GpuBatchScanExec",
conf = conf_for_parquet_aggregate_pushdown)
@pytest.mark.skipif(is_before_spark_330(), reason='Hidden file metadata columns are a new feature of Spark 330')
@allow_non_gpu(any = True)
@pytest.mark.parametrize('metadata_column', ["file_path", "file_name", "file_size", "file_modification_time"])
def test_parquet_scan_with_hidden_metadata_fallback(spark_tmp_path, metadata_column):
data_path = spark_tmp_path + "/hidden_metadata.parquet"
with_cpu_session(lambda spark : spark.range(10) \
.selectExpr("id", "id % 3 as p") \
.write \
.partitionBy("p") \
.mode("overwrite") \
.parquet(data_path))
def do_parquet_scan(spark):
df = spark.read.parquet(data_path).selectExpr("id", "_metadata.{}".format(metadata_column))
return df
assert_cpu_and_gpu_are_equal_collect_with_capture(
do_parquet_scan,
exist_classes= "FileSourceScanExec",
non_exist_classes= "GpuBatchScanExec")
@ignore_order
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="Databricks does not support ignoreCorruptFiles")
def test_parquet_read_with_corrupt_files(spark_tmp_path, reader_confs, v1_enabled_list):
first_data_path = spark_tmp_path + '/PARQUET_DATA/first'
with_cpu_session(lambda spark : spark.range(1).toDF("a").write.parquet(first_data_path))
second_data_path = spark_tmp_path + '/PARQUET_DATA/second'
with_cpu_session(lambda spark : spark.range(1, 2).toDF("a").write.parquet(second_data_path))
third_data_path = spark_tmp_path + '/PARQUET_DATA/third'
with_cpu_session(lambda spark : spark.range(2, 3).toDF("a").write.json(third_data_path))
all_confs = copy_and_update(reader_confs,
{'spark.sql.files.ignoreCorruptFiles': "true",
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.parquet(first_data_path, second_data_path, third_data_path),
conf=all_confs)
def with_id(i):
return {'parquet.field.id': i}
# Field ID test cases were re-written from:
# https://github.com/apache/spark/blob/v3.3.0-rc3/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFieldIdIOSuite.scala
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
@pytest.mark.parametrize('footer_read', ["JAVA", "NATIVE", "AUTO"], ids=idfn)
def test_parquet_read_field_id_using_correctly(spark_tmp_path, footer_read):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = StructType([StructField("random", IntegerType(), metadata=with_id(1)),
StructField("name", StringType(), metadata=with_id(0))])
write_data = [(100, 'text'), (200, 'more')]
# write parquet with field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=enable_parquet_field_id_write)
# use field IDs to specify the reading columns, then mapping the column names
# map column `name` to `a`, map column `random` to `b`
read_schema = StructType([
StructField("a", StringType(), True, metadata=with_id(0)),
StructField("b", IntegerType(), True, metadata=with_id(1)),
])
conf = copy_and_update(enable_parquet_field_id_read,
{"spark.rapids.sql.format.parquet.reader.footer.type": footer_read})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=conf)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path).where("b < 50"),
conf=conf)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path).where("a >= 'oh'"),
conf=conf)
read_schema_mixed = StructType([
StructField("name", StringType(), True),
StructField("b", IntegerType(), True, metadata=with_id(1)),
])
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema_mixed).parquet(data_path),
conf=conf)
read_schema_mixed_half_matched = StructType([
StructField("unmatched", StringType(), True),
StructField("b", IntegerType(), True, metadata=with_id(1)),
])
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema_mixed_half_matched).parquet(data_path),
conf=conf)
# not specify schema
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path).where("name >= 'oh'"),
conf=conf)
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
@pytest.mark.parametrize('footer_read', ["JAVA", "NATIVE", "AUTO"], ids=idfn)
def test_parquet_read_field_id_absence(spark_tmp_path, footer_read):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = StructType([StructField("a", IntegerType(), metadata=with_id(3)),
StructField("randomName", StringType())])
write_data = [(100, 'text'), (200, 'more')]
# write parquet with field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=enable_parquet_field_id_write)
conf = copy_and_update(enable_parquet_field_id_read,
{"spark.rapids.sql.format.parquet.reader.footer.type": footer_read})
# 3 different cases for the 3 columns to read:
# - a: ID 1 is not found, but there is column with name `a`, still return null
# - b: ID 2 is not found, return null
# - c: ID 3 is found, read it
read_schema = StructType([
StructField("a", IntegerType(), True, metadata=with_id(1)),
StructField("b", StringType(), True, metadata=with_id(2)),
StructField("c", IntegerType(), True, metadata=with_id(3)),
])
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=conf)
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
@pytest.mark.parametrize('footer_read', ["JAVA", "NATIVE", "AUTO"], ids=idfn)
def test_parquet_read_multiple_field_id_matches(spark_tmp_path, footer_read):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = StructType([
StructField("a", IntegerType(), True, metadata=with_id(1)), # duplicated field ID
StructField("rand1", StringType(), True, metadata=with_id(2)),
StructField("rand2", StringType(), True, metadata=with_id(1)), # duplicated field ID
])
write_data = [(100, 'text', 'txt'), (200, 'more', 'mr')]
# write parquet with field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=enable_parquet_field_id_write)
conf = copy_and_update(enable_parquet_field_id_read,
{"spark.rapids.sql.format.parquet.reader.footer.type": footer_read})
read_schema = StructType([StructField("a", IntegerType(), True, metadata=with_id(1))])
# Both CPU and GPU invokes `ParquetReadSupport.clipParquetSchema` which throws an exception
assert_gpu_and_cpu_error(
lambda spark: spark.read.schema(read_schema).parquet(data_path).collect(),
conf=conf,
error_message="Found duplicate field(s)")
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
@pytest.mark.parametrize('footer_read', ["JAVA", "NATIVE", "AUTO"], ids=idfn)
def test_parquet_read_without_field_id(spark_tmp_path, footer_read):
data_path = spark_tmp_path + '/PARQUET_DATA'
# Parquet without field ID
write_schema = StructType([
StructField("a", IntegerType(), True),
StructField("rand1", StringType(), True),
StructField("rand2", StringType(), True),
])
write_data = [(100, 'text', 'txt'), (200, 'more', 'mr')]
# write parquet with field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=enable_parquet_field_id_write)
conf = copy_and_update(enable_parquet_field_id_read,
{"spark.rapids.sql.format.parquet.reader.footer.type": footer_read})
read_schema = StructType([StructField("a", IntegerType(), True, metadata=with_id(1))])
# Spark read schema expects field Ids, but Parquet file schema doesn't contain any field Ids.
# If `spark.sql.parquet.fieldId.read.ignoreMissing` is false(default value), throws exception
assert_gpu_and_cpu_error(
lambda spark: spark.read.schema(read_schema).parquet(data_path).collect(),
conf=conf,
error_message="Parquet file schema doesn't contain any field Ids")
# Spark read schema expects field Ids, but Parquet file schema doesn't contain any field Ids.
# If `spark.sql.parquet.fieldId.read.ignoreMissing` is true,
# return a column with all values are null for the unmatched field IDs
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=copy_and_update(conf,
{"spark.sql.parquet.fieldId.read.ignoreMissing": "true"}))
# test global config: field_id_write_enable=false, field_id_read_enable=true
# test global config: field_id_write_enable=true, field_id_read_enable=true
@pytest.mark.skipif(is_before_spark_330(), reason='Field ID is not supported before Spark 330')
@pytest.mark.parametrize('footer_read', ["JAVA", "NATIVE", "AUTO"], ids=idfn)
def test_parquet_read_field_id_global_flags(spark_tmp_path, footer_read):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = StructType([
StructField("a", IntegerType(), True, metadata=with_id(1)),
StructField("rand1", StringType(), True, metadata=with_id(2)),
StructField("rand2", StringType(), True, metadata=with_id(3)),
])
read_schema = StructType([
StructField("some", IntegerType(), True, metadata=with_id(1)),
StructField("other", StringType(), True, metadata=with_id(2)),
StructField("name", StringType(), True, metadata=with_id(3)),
])
write_data = [(100, "text", "txt"), (200, "more", "mr")]
# not write field IDs into Parquet file although `write_schema` contains field IDs
# try to read by field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=disable_parquet_field_id_write)
conf = copy_and_update(enable_parquet_field_id_read,
{"spark.rapids.sql.format.parquet.reader.footer.type": footer_read})
assert_gpu_and_cpu_error(
lambda spark: spark.read.schema(read_schema).parquet(data_path).collect(),
conf=conf,
error_message="Parquet file schema doesn't contain any field Ids")
# write field IDs into Parquet
# read by field IDs
with_cpu_session(lambda spark: spark.createDataFrame(write_data, write_schema).repartition(1)
.write.mode("overwrite").parquet(data_path),
conf=enable_parquet_field_id_write)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=conf)
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_parquet_read_daytime_interval_cpu_file(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('_c1', DayTimeIntervalGen())]
# write DayTimeInterval with CPU
with_cpu_session(lambda spark :gen_df(spark, gen_list).coalesce(1).write.mode("overwrite").parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_parquet_read_daytime_interval_gpu_file(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('_c1', DayTimeIntervalGen())]
# write DayTimeInterval with GPU
with_gpu_session(lambda spark :gen_df(spark, gen_list).coalesce(1).write.mode("overwrite").parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path))
@pytest.mark.skipif(is_before_spark_330(), reason='DayTimeInterval is not supported before Pyspark 3.3.0')
def test_parquet_push_down_on_interval_type(spark_tmp_path):
gen_list = [('_c1', DayTimeIntervalGen())]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).coalesce(1).write.parquet(data_path))
assert_gpu_and_cpu_are_equal_sql(
lambda spark: spark.read.parquet(data_path),
"testData",
"select * from testData where _c1 > interval '10 0:0:0' day to second")
def test_parquet_read_case_insensitivity(spark_tmp_path):
gen_list = [('one', int_gen), ('tWo', byte_gen), ('THREE', boolean_gen)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.parquet(data_path).select('one', 'two', 'three'),
{'spark.sql.caseSensitive': 'false'}
)
# test read INT32 as INT8/INT16/Date
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_int32_downcast(spark_tmp_path, reader_confs, v1_enabled_list):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = [("d", date_gen), ('s', short_gen), ('b', byte_gen)]
with_cpu_session(
lambda spark: gen_df(spark, write_schema).selectExpr(
"cast(d as Int) as d",
"cast(s as Int) as s",
"cast(b as Int) as b").write.parquet(data_path))
read_schema = StructType([StructField("d", DateType()),
StructField("s", ShortType()),
StructField("b", ByteType())])
conf = copy_and_update(reader_confs,
{'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=conf)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize("types", [("byte", "short"), ("byte", "int"), ("short", "int")], ids=idfn)
def test_parquet_read_int_upcast(spark_tmp_path, reader_confs, v1_enabled_list, types):
data_path = spark_tmp_path + "/PARQUET_DATA"
store_type, load_type = types
with_cpu_session(lambda spark: spark.range(10) \
.selectExpr(f"cast(id as {store_type})") \
.write.parquet(data_path))
conf = copy_and_update(reader_confs,
{'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(f"id {load_type}").parquet(data_path),
conf=conf)
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
def test_parquet_nested_column_missing(spark_tmp_path, reader_confs, v1_enabled_list):
data_path = spark_tmp_path + '/PARQUET_DATA'
write_schema = [("a", string_gen), ("b", int_gen), ("c", StructGen([("ca", long_gen)]))]
with_cpu_session(
lambda spark: gen_df(spark, write_schema).write.parquet(data_path))
read_schema = StructType([StructField("a", StringType()),
StructField("b", IntegerType()),
StructField("c", StructType([
StructField("ca", LongType()),
StructField("cb", StringType())]))])
conf = copy_and_update(reader_confs,
{'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).parquet(data_path),
conf=conf)
def test_parquet_check_schema_compatibility(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('int', int_gen), ('long', long_gen), ('dec32', decimal_gen_32bit)]
with_cpu_session(lambda spark: gen_df(spark, gen_list).coalesce(1).write.parquet(data_path))
read_int_as_long = StructType(
[StructField('long', LongType()), StructField('int', LongType())])
assert_gpu_and_cpu_error(
lambda spark: spark.read.schema(read_int_as_long).parquet(data_path).collect(),
conf={},
error_message='Parquet column cannot be converted')
read_dec32_as_dec64 = StructType(
[StructField('int', IntegerType()), StructField('dec32', DecimalType(15, 10))])
assert_gpu_and_cpu_error(
lambda spark: spark.read.schema(read_dec32_as_dec64).parquet(data_path).collect(),
conf={},
error_message='Parquet column cannot be converted')
# For nested types, GPU throws incompatible exception with a different message from CPU.
def test_parquet_check_schema_compatibility_nested_types(spark_tmp_path):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('array_long', ArrayGen(long_gen)),
('array_array_int', ArrayGen(ArrayGen(int_gen))),
('struct_float', StructGen([('f', float_gen), ('d', double_gen)])),
('struct_array_int', StructGen([('a', ArrayGen(int_gen))])),
('map', map_string_string_gen[0])]
with_cpu_session(lambda spark: gen_df(spark, gen_list).coalesce(1).write.parquet(data_path))
read_array_long_as_int = StructType([StructField('array_long', ArrayType(IntegerType()))])
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.schema(read_array_long_as_int).parquet(data_path).collect()),
error_message='Parquet column cannot be converted')
read_arr_arr_int_as_long = StructType(
[StructField('array_array_int', ArrayType(ArrayType(LongType())))])
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.schema(read_arr_arr_int_as_long).parquet(data_path).collect()),
error_message='Parquet column cannot be converted')
read_struct_flt_as_dbl = StructType([StructField(
'struct_float', StructType([StructField('f', DoubleType())]))])
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.schema(read_struct_flt_as_dbl).parquet(data_path).collect()),
error_message='Parquet column cannot be converted')
read_struct_arr_int_as_long = StructType([StructField(
'struct_array_int', StructType([StructField('a', ArrayType(LongType()))]))])
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.schema(read_struct_arr_int_as_long).parquet(data_path).collect()),
error_message='Parquet column cannot be converted')
read_map_str_str_as_str_int = StructType([StructField(
'map', MapType(StringType(), IntegerType()))])
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.schema(read_map_str_str_as_str_int).parquet(data_path).collect()),
error_message='Parquet column cannot be converted')
@pytest.mark.skipif(is_before_spark_320() or is_spark_321cdh(), reason='Encryption is not supported before Spark 3.2.0 or Parquet < 1.12')
@pytest.mark.skipif(os.environ.get('INCLUDE_PARQUET_HADOOP_TEST_JAR', 'false') == 'false', reason='INCLUDE_PARQUET_HADOOP_TEST_JAR is disabled')
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs)
def test_parquet_read_encryption(spark_tmp_path, reader_confs, v1_enabled_list):
data_path = spark_tmp_path + '/PARQUET_DATA'
gen_list = [('one', int_gen), ('two', byte_gen), ('THREE', boolean_gen)]
encryption_confs = {
'parquet.encryption.kms.client.class': 'org.apache.parquet.crypto.keytools.mocks.InMemoryKMS',
'parquet.encryption.key.list': 'keyA:AAECAwQFBgcICQoLDA0ODw== , keyB:AAECAAECAAECAAECAAECAA==',
'parquet.crypto.factory.class': 'org.apache.parquet.crypto.keytools.PropertiesDrivenCryptoFactory'
}
conf = copy_and_update(reader_confs, encryption_confs)
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.
option("parquet.encryption.column.keys" , "keyA:one").
option("parquet.encryption.footer.key" , "keyB").
parquet(data_path), conf=encryption_confs)
# test with missing encryption conf reading encrypted file
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.parquet(data_path).collect()),
error_message='Could not read footer for file')
assert_spark_exception(
lambda: with_gpu_session(
lambda spark: spark.read.parquet(data_path).collect(), conf=conf),
error_message='The GPU does not support reading encrypted Parquet files')
def test_parquet_read_count(spark_tmp_path):
parquet_gens = [int_gen, string_gen, double_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(parquet_gens)]
data_path = spark_tmp_path + '/PARQUET_DATA'
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.parquet(data_path))
assert_gpu_and_cpu_row_counts_equal(lambda spark: spark.read.parquet(data_path))
# assert the spark plan of the equivalent SQL query contains no column in read schema
assert_cpu_and_gpu_are_equal_sql_with_capture(
lambda spark: spark.read.parquet(data_path), "SELECT COUNT(*) FROM tab", "tab",
exist_classes=r'GpuFileGpuScan parquet .* ReadSchema: struct<>')
@pytest.mark.parametrize('read_func', [read_parquet_df, read_parquet_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "parquet"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('col_name', ['K0', 'k0', 'K3', 'k3', 'V0', 'v0'], ids=idfn)
@ignore_order
def test_read_case_col_name(spark_tmp_path, read_func, v1_enabled_list, reader_confs, col_name):
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list})
gen_list =[('k0', LongGen(nullable=False, min_val=0, max_val=0)),
('k1', LongGen(nullable=False, min_val=1, max_val=1)),
('k2', LongGen(nullable=False, min_val=2, max_val=2)),
('k3', LongGen(nullable=False, min_val=3, max_val=3)),
('v0', LongGen()),
('v1', LongGen()),
('v2', LongGen()),
('v3', LongGen())]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/PAR_DATA'
reader = read_func(data_path)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.partitionBy('k0', 'k1', 'k2', 'k3').parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : reader(spark).selectExpr(col_name),
conf=all_confs)
@pytest.mark.parametrize("reader_confs", reader_opt_confs, ids=idfn)
@ignore_order
def test_parquet_column_name_with_dots(spark_tmp_path, reader_confs):
data_path = spark_tmp_path + "/PARQUET_DATA"
reader = read_parquet_df(data_path)
all_confs = reader_confs
gens = [
("a.b", StructGen([
("c.d.e", StructGen([
("f.g", int_gen),
("h", string_gen)])),
("i.j", long_gen)])),
("k", boolean_gen)]
with_cpu_session(lambda spark: gen_df(spark, gens).write.parquet(data_path))
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark), conf=all_confs)
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark).selectExpr("`a.b`"), conf=all_confs)
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark).selectExpr("`a.b`.`c.d.e`.`f.g`"),
conf=all_confs)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/parquet_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_sql_writes_are_equal_collect, assert_gpu_fallback_collect, \
assert_gpu_and_cpu_are_equal_collect, assert_equal, run_with_cpu_and_gpu
from conftest import spark_jvm
from data_gen import *
from datetime import date, datetime, timezone
from marks import *
from spark_session import is_hive_available, is_spark_33X, is_spark_340_or_later, with_cpu_session, \
is_databricks122_or_later
# Using timestamps from 1970 to work around a cudf ORC bug
# https://github.com/NVIDIA/spark-rapids/issues/140.
# Using a limited upper end for timestamps to avoid INT96 overflow on Parquet.
def _restricted_timestamp(nullable=True):
return TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime(2262, 4, 11, tzinfo=timezone.utc),
nullable=nullable)
_basic_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
_restricted_timestamp()
] + decimal_gens
_basic_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(_basic_gens)])
_struct_gens = [_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', _basic_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
_array_gens = [ArrayGen(sub_gen) for sub_gen in _basic_gens] + [
ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))]
_map_gens = [simple_string_to_string_map_gen] + [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen,
lambda nullable=True: _restricted_timestamp(nullable=nullable),
lambda nullable=True: DateGen(start=date(1590, 1, 1), nullable=nullable),
lambda nullable=True: DecimalGen(precision=15, scale=1, nullable=nullable),
lambda nullable=True: DecimalGen(precision=36, scale=5, nullable=nullable)]]
_write_gens = [_basic_gens, _struct_gens, _array_gens, _map_gens]
# There appears to be a race when computing tasks for writing, order can be different even on CPU
@ignore_order(local=True)
@pytest.mark.skipif(not is_hive_available(), reason="Hive is missing")
@pytest.mark.parametrize("gens", _write_gens, ids=idfn)
@pytest.mark.parametrize("storage", ["PARQUET", "nativeorc", "hiveorc"])
def test_optimized_hive_ctas_basic(gens, storage, spark_tmp_table_factory):
data_table = spark_tmp_table_factory.get()
gen_list = [('c' + str(i), gen) for i, gen in enumerate(gens)]
with_cpu_session(lambda spark: gen_df(spark, gen_list).createOrReplaceTempView(data_table))
def do_write(spark, table_name):
store_name = storage
if storage.endswith("orc"):
store_name = "ORC"
return "CREATE TABLE {} STORED AS {} AS SELECT * FROM {}".format(
table_name, store_name, data_table)
conf = {
"spark.sql.legacy.parquet.datetimeRebaseModeInWrite": "CORRECTED",
"spark.sql.legacy.parquet.int96RebaseModeInWrite": "CORRECTED"
}
if storage == "nativeorc":
conf["spark.sql.orc.impl"] = "native"
elif storage == "hiveorc":
conf["spark.sql.orc.impl"] = "hive"
assert_gpu_and_cpu_sql_writes_are_equal_collect(spark_tmp_table_factory, do_write, conf=conf)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.skipif(not is_hive_available(), reason="Hive is missing")
@pytest.mark.parametrize("gens", [_basic_gens], ids=idfn)
@pytest.mark.parametrize("storage_with_confs", [
("PARQUET", {"spark.sql.legacy.parquet.datetimeRebaseModeInWrite": "LEGACY",
"spark.sql.legacy.parquet.int96RebaseModeInWrite": "LEGACY"}),
("PARQUET", {"parquet.encryption.footer.key": "k1",
"parquet.encryption.column.keys": "k2:a"}),
("PARQUET", {"spark.sql.parquet.compression.codec": "gzip"}),
("PARQUET", {"spark.sql.parquet.writeLegacyFormat": "true"}),
("ORC", {"spark.sql.orc.compression.codec": "zlib"})], ids=idfn)
def test_optimized_hive_ctas_configs_fallback(gens, storage_with_confs, spark_tmp_table_factory):
data_table = spark_tmp_table_factory.get()
gen_list = [('c' + str(i), gen) for i, gen in enumerate(gens)]
with_cpu_session(lambda spark: gen_df(spark, gen_list).createOrReplaceTempView(data_table))
storage, confs = storage_with_confs
fallback_class = "ExecutedCommandExec" if is_spark_340_or_later() or is_databricks122_or_later() else "DataWritingCommandExec"
assert_gpu_fallback_collect(
lambda spark: spark.sql("CREATE TABLE {} STORED AS {} AS SELECT * FROM {}".format(
spark_tmp_table_factory.get(), storage, data_table)),
fallback_class, conf=confs)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.skipif(not is_hive_available(), reason="Hive is missing")
@pytest.mark.parametrize("gens", [_basic_gens], ids=idfn)
@pytest.mark.parametrize("storage_with_opts", [
("PARQUET", {"parquet.encryption.footer.key": "k1",
"parquet.encryption.column.keys": "k2:a"}),
("ORC", {"orc.compress": "zlib"})], ids=idfn)
def test_optimized_hive_ctas_options_fallback(gens, storage_with_opts, spark_tmp_table_factory):
data_table = spark_tmp_table_factory.get()
gen_list = [('c' + str(i), gen) for i, gen in enumerate(gens)]
with_cpu_session(lambda spark: gen_df(spark, gen_list).createOrReplaceTempView(data_table))
storage, opts = storage_with_opts
opts_string = ", ".join(["'{}'='{}'".format(k, v) for k, v in opts.items()])
fallback_class = "ExecutedCommandExec" if is_spark_340_or_later() or is_databricks122_or_later() else "DataWritingCommandExec"
assert_gpu_fallback_collect(
lambda spark: spark.sql("CREATE TABLE {} OPTIONS ({}) STORED AS {} AS SELECT * FROM {}".format(
spark_tmp_table_factory.get(), opts_string, storage, data_table)),
fallback_class)
@allow_non_gpu('DataWritingCommandExec,ExecutedCommandExec,WriteFilesExec')
@pytest.mark.skipif(not (is_hive_available() and is_spark_33X() and not is_databricks122_or_later()),
reason="Requires Hive and Spark 3.3.X to write bucketed Hive tables")
@pytest.mark.parametrize("gens", [_basic_gens], ids=idfn)
@pytest.mark.parametrize("storage", ["PARQUET", "ORC"], ids=idfn)
def test_optimized_hive_bucketed_fallback_33X(gens, storage, spark_tmp_table_factory):
in_table = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: three_col_df(spark, int_gen, int_gen, int_gen).createOrReplaceTempView(in_table))
assert_gpu_fallback_collect(
lambda spark: spark.sql(
"""CREATE TABLE {} STORED AS {}
CLUSTERED BY (b) INTO 3 BUCKETS
AS SELECT * FROM {}""".format(spark_tmp_table_factory.get(), storage, in_table)),
"DataWritingCommandExec")
# Since Spark 3.4.0, the internal "SortExec" will be pulled out by default
# from the FileFormatWriter. Then it is visible in the planning stage.
@allow_non_gpu("DataWritingCommandExec", "SortExec", "WriteFilesExec")
@pytest.mark.skipif(not (is_hive_available() and (is_spark_340_or_later() or is_databricks122_or_later())),
reason="Requires Hive and Spark 3.4+ to write bucketed Hive tables with SortExec pulled out")
@pytest.mark.parametrize("gens", [_basic_gens], ids=idfn)
@pytest.mark.parametrize("storage", ["PARQUET", "ORC"], ids=idfn)
@pytest.mark.parametrize("planned_write", [True, False], ids=idfn)
def test_optimized_hive_bucketed_fallback(gens, storage, planned_write, spark_tmp_table_factory):
in_table = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: three_col_df(spark, int_gen, int_gen, int_gen).createOrReplaceTempView(in_table))
assert_gpu_fallback_collect(
lambda spark: spark.sql(
"""CREATE TABLE {} STORED AS {}
CLUSTERED BY (b) INTO 3 BUCKETS
AS SELECT * FROM {}""".format(spark_tmp_table_factory.get(), storage, in_table)),
"ExecutedCommandExec",
{"spark.sql.optimizer.plannedWrite.enabled": planned_write})
def test_hive_copy_ints_to_long(spark_tmp_table_factory):
do_hive_copy(spark_tmp_table_factory, int_gen, "INT", "BIGINT")
def test_hive_copy_longs_to_float(spark_tmp_table_factory):
do_hive_copy(spark_tmp_table_factory, long_gen, "BIGINT", "FLOAT")
def do_hive_copy(spark_tmp_table_factory, gen, type1, type2):
t1 = spark_tmp_table_factory.get()
with_cpu_session(lambda spark: unary_op_df(spark, gen).createOrReplaceTempView(t1))
def do_test(spark):
t2 = spark_tmp_table_factory.get()
t3 = spark_tmp_table_factory.get()
spark.sql("""CREATE TABLE {} (c0 {}) USING PARQUET""".format(t2, type1))
spark.sql("""INSERT INTO {} SELECT a FROM {}""".format(t2, t1))
spark.sql("""CREATE TABLE {} (c0 {}) USING PARQUET""".format(t3, type2))
# Copy data between two tables, causing ansi_cast() expressions to be inserted into the plan.
return spark.sql("""INSERT INTO {} SELECT c0 FROM {}""".format(t3, t2))
(from_cpu, cpu_df), (from_gpu, gpu_df) = run_with_cpu_and_gpu(
do_test, 'COLLECT_WITH_DATAFRAME',
conf={
'spark.sql.ansi.enabled': 'true',
'spark.sql.storeAssignmentPolicy': 'ANSI'})
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertContainsAnsiCast(cpu_df._jdf)
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertContainsAnsiCast(gpu_df._jdf)
assert_equal(from_cpu, from_gpu) | spark-rapids-branch-23.10 | integration_tests/src/main/python/hive_write_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from datetime import datetime, timezone
from marks import ignore_order
import pytest
from spark_session import is_databricks_runtime, is_databricks113_or_later
_formats = ("parquet", "orc")
_confs = {
"spark.sql.legacy.parquet.datetimeRebaseModeInRead": "CORRECTED",
"spark.sql.legacy.parquet.int96RebaseModeInRead": "CORRECTED",
"spark.sql.legacy.parquet.datetimeRebaseModeInWrite": "CORRECTED",
"spark.sql.legacy.parquet.int96RebaseModeInWrite": "CORRECTED",
}
# List of additional column data generators to use when adding columns
_additional_gens = [
boolean_gen,
byte_gen,
short_gen,
int_gen,
long_gen,
float_gen,
double_gen,
string_gen,
date_gen,
TimestampGen(start=datetime(1677, 9, 22, tzinfo=timezone.utc), end=datetime(2262, 4, 11, tzinfo=timezone.utc)),
# RAPIDS Accelerator does not support MapFromArrays yet
# https://github.com/NVIDIA/spark-rapids/issues/8696
# simple_string_to_string_map_gen),
ArrayGen(date_gen),
struct_gen_decimal128,
StructGen([("c0", ArrayGen(long_gen)), ("c1", boolean_gen)]),
]
def get_additional_columns():
"""Returns a list of column_name, data_generator pairs to use when adding columns"""
return [ (f"new_{i}", g) for i, g in enumerate(_additional_gens) ]
def get_ddl(col_gen_pairs):
"""Given a list of column_name, data_generator paris, returns the corresponding DDL string"""
return ', '.join([f"{c} {g.data_type.simpleString()}" for c, g in col_gen_pairs])
@ignore_order(local=True)
@pytest.mark.parametrize("format", _formats)
def test_column_add_after_partition(spark_tmp_table_factory, format):
# Databricks 10.4 appears to be missing https://issues.apache.org/jira/browse/SPARK-39417
# so avoid generating nulls for numeric partitions
before_gens = [("a", LongGen(min_val=-1, max_val=1,
nullable=not is_databricks_runtime() or is_databricks113_or_later())),
("b", SetValuesGen(StringType(), ["x", "y", "z"])),
("c", long_gen)]
new_cols_gens = get_additional_columns()
new_ddl = get_ddl(new_cols_gens)
after_gens = before_gens + new_cols_gens
def testf(spark):
table_name = spark_tmp_table_factory.get()
df = gen_df(spark, before_gens)
df.write\
.format(format)\
.partitionBy("a", "b")\
.saveAsTable(table_name)
spark.sql(f"ALTER TABLE {table_name} ADD COLUMNS ({new_ddl})")
df = gen_df(spark, after_gens)
df.write\
.format(format)\
.mode("append")\
.partitionBy("a", "b")\
.saveAsTable(table_name)
return spark.sql(f"SELECT * FROM {table_name}")
assert_gpu_and_cpu_are_equal_collect(testf, conf=_confs)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/schema_evolution_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_cpu_and_gpu_are_equal_sql_with_capture, assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal, assert_gpu_fallback_collect, \
assert_cpu_and_gpu_are_equal_collect_with_capture, assert_gpu_and_cpu_writes_are_equal_collect, assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import with_cpu_session, is_before_spark_320, is_before_spark_330, is_spark_cdh, is_spark_340_or_later
from parquet_test import _nested_pruning_schemas
from conftest import is_databricks_runtime
pytestmark = pytest.mark.nightly_resource_consuming_test
def read_orc_df(data_path):
return lambda spark : spark.read.orc(data_path)
def read_orc_sql(data_path):
return lambda spark : spark.sql('select * from orc.`{}`'.format(data_path))
# Using timestamps from 1590 to work around a cudf ORC bug
# https://github.com/NVIDIA/spark-rapids/issues/131.
# Once the bug is fixed we should remove this and use timestamp_gen.
def get_orc_timestamp_gen(nullable=True):
return TimestampGen(start=datetime(1590, 1, 1, tzinfo=timezone.utc), nullable=nullable)
orc_timestamp_gen = get_orc_timestamp_gen()
# test with original orc file reader, the multi-file parallel reader for cloud
original_orc_file_reader_conf = {'spark.rapids.sql.format.orc.reader.type': 'PERFILE'}
multithreaded_orc_file_reader_conf = {'spark.rapids.sql.format.orc.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '0',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': True}
multithreaded_orc_file_reader_combine_ordered_conf = {
'spark.rapids.sql.format.orc.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': True}
multithreaded_orc_file_reader_combine_unordered_conf = {
'spark.rapids.sql.format.orc.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': False}
coalescing_orc_file_reader_conf = {'spark.rapids.sql.format.orc.reader.type': 'COALESCING'}
reader_opt_confs_common = [original_orc_file_reader_conf, multithreaded_orc_file_reader_conf,
coalescing_orc_file_reader_conf,
multithreaded_orc_file_reader_combine_ordered_conf]
reader_opt_confs = reader_opt_confs_common + [
pytest.param(multithreaded_orc_file_reader_combine_unordered_conf, marks=pytest.mark.ignore_order(local=True))]
# The Count result can not be sorted, so local sort can not be used.
reader_opt_confs_for_count = reader_opt_confs_common + [multithreaded_orc_file_reader_combine_unordered_conf]
@pytest.mark.parametrize('name', ['timestamp-date-test.orc'])
@pytest.mark.parametrize('read_func', [read_orc_df, read_orc_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_basic_read(std_input_path, name, read_func, v1_enabled_list, orc_impl, reader_confs):
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.orc.impl': orc_impl})
assert_gpu_and_cpu_are_equal_collect(
read_func(std_input_path + '/' + name),
conf=all_confs)
# ORC does not support negative scale for decimal. So here is "decimal_gens_no_neg".
# Otherwise it will get the below exception.
# ...
#E Caused by: java.lang.IllegalArgumentException: Missing integer at
# 'struct<`_c0`:decimal(7,^-3),`_c1`:decimal(7,3),`_c2`:decimal(7,7),`_c3`:decimal(12,2)>'
#E at org.apache.orc.TypeDescription.parseInt(TypeDescription.java:244)
#E at org.apache.orc.TypeDescription.parseType(TypeDescription.java:362)
# ...
orc_basic_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
orc_timestamp_gen] + decimal_gens
orc_basic_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(orc_basic_gens)])
# Some array gens, but not all because of nesting
orc_array_gens_sample = [ArrayGen(sub_gen) for sub_gen in orc_basic_gens] + [
ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(decimal_gen_64bit, max_length=10), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))]
# Some struct gens, but not all because of nesting.
# No empty struct gen because it leads to an error as below.
# '''
# E pyspark.sql.utils.AnalysisException:
# E Datasource does not support writing empty or nested empty schemas.
# E Please make sure the data schema has at least one or more column(s).
# '''
orc_struct_gens_sample = [orc_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', orc_basic_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
orc_basic_map_gens = [simple_string_to_string_map_gen] + [MapGen(f(nullable=False), f()) for f in [
BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen,
lambda nullable=True: get_orc_timestamp_gen(nullable),
lambda nullable=True: DateGen(start=date(1590, 1, 1), nullable=nullable),
lambda nullable=True: DecimalGen(precision=15, scale=1, nullable=nullable),
lambda nullable=True: DecimalGen(precision=36, scale=5, nullable=nullable)]]
# Some map gens, but not all because of nesting
orc_map_gens_sample = orc_basic_map_gens + [
MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(string_gen), max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(decimal_gen_128bit), max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False),
ArrayGen(StructGen([["c0", decimal_gen_64bit], ["c1", decimal_gen_128bit]])), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen),
MapGen(StructGen([['child0', byte_gen], ['child1', long_gen]], nullable=False),
StructGen([['child0', byte_gen], ['child1', long_gen]]))]
orc_gens_list = [orc_basic_gens,
orc_array_gens_sample,
orc_struct_gens_sample,
orc_map_gens_sample,
pytest.param([date_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/131')),
pytest.param([timestamp_gen], marks=pytest.mark.xfail(reason='https://github.com/NVIDIA/spark-rapids/issues/131'))]
flattened_orc_gens = orc_basic_gens + orc_array_gens_sample + orc_struct_gens_sample
@allow_non_gpu('FileSourceScanExec')
@pytest.mark.parametrize('read_func', [read_orc_df, read_orc_sql])
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.orc.enabled', 'spark.rapids.sql.format.orc.read.enabled'])
def test_orc_fallback(spark_tmp_path, read_func, disable_conf):
data_gens =[string_gen,
byte_gen, short_gen, int_gen, long_gen, boolean_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(data_gens)]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/ORC_DATA'
reader = read_func(data_path)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.orc(data_path))
assert_gpu_fallback_collect(
lambda spark : reader(spark).select(f.col('*'), f.col('_c2') + f.col('_c3')),
'FileSourceScanExec',
conf={disable_conf: 'false',
"spark.sql.sources.useV1SourceList": "orc"})
@pytest.mark.order(2)
@pytest.mark.parametrize('orc_gens', orc_gens_list, ids=idfn)
@pytest.mark.parametrize('read_func', [read_orc_df, read_orc_sql])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
def test_read_round_trip(spark_tmp_path, orc_gens, read_func, reader_confs, v1_enabled_list):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(data_path))
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
read_func(data_path),
conf=all_confs)
orc_pred_push_gens = [
byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen, boolean_gen,
string_gen,
# Once https://github.com/NVIDIA/spark-rapids/issues/139 is fixed replace this with
# date_gen
DateGen(start=date(1590, 1, 1)),
# Once https://github.com/NVIDIA/spark-rapids/issues/140 is fixed replace this with
# timestamp_gen
orc_timestamp_gen]
@pytest.mark.order(2)
@pytest.mark.parametrize('orc_gen', orc_pred_push_gens, ids=idfn)
@pytest.mark.parametrize('read_func', [read_orc_df, read_orc_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_pred_push_round_trip(spark_tmp_path, orc_gen, read_func, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/ORC_DATA'
# Append two struct columns to verify nested predicate pushdown.
gen_list = [('a', RepeatSeqGen(orc_gen, 100)), ('b', orc_gen),
('s1', StructGen([['sa', orc_gen]])),
('s2', StructGen([['sa', StructGen([['ssa', orc_gen]])]]))]
s0 = gen_scalar(orc_gen, force_no_nulls=True)
with_cpu_session(
lambda spark : gen_df(spark, gen_list).orderBy('a').write.orc(data_path))
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
rf = read_func(data_path)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: rf(spark).select(f.col('a') >= s0, f.col('s1.sa') >= s0, f.col('s2.sa.ssa') >= s0),
conf=all_confs)
orc_compress_options = ['none', 'uncompressed', 'snappy', 'zlib']
# zstd is available in spark 3.2.0 and later.
if not is_before_spark_320() and not is_spark_cdh():
orc_compress_options.append('zstd')
# The following need extra jars 'lzo'
# https://github.com/NVIDIA/spark-rapids/issues/143
# Test the different compress combinations
@ignore_order
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_mixed_compress_read(spark_tmp_path, v1_enabled_list, reader_confs):
data_pathes = []
for compress in orc_compress_options:
data_path = spark_tmp_path + '/ORC_DATA' + compress
with_cpu_session(
lambda spark : binary_op_df(spark, long_gen).write.orc(data_path),
conf={'spark.sql.orc.compression.codec': compress})
data_pathes.append(data_path)
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_pathes),
conf=all_confs)
@pytest.mark.parametrize('compress', orc_compress_options)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_compress_read_round_trip(spark_tmp_path, compress, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark : binary_op_df(spark, long_gen).write.orc(data_path),
conf={'spark.sql.orc.compression.codec': compress})
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_simple_partitioned_read(spark_tmp_path, v1_enabled_list, reader_confs):
# Once https://github.com/NVIDIA/spark-rapids/issues/131 is fixed
# we should go with a more standard set of generators
orc_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
orc_timestamp_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
first_data_path = spark_tmp_path + '/ORC_DATA/key=0/key2=20'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/key=1/key2=21'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(second_data_path))
third_data_path = spark_tmp_path + '/ORC_DATA/key=2/key2=22'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(third_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.orc(data_path),
conf=all_confs)
# Setup external table by altering column names
def setup_external_table_with_forced_positions(spark, table_name, data_path):
rename_cols_query = "CREATE EXTERNAL TABLE `{}` (`col10` INT, `_c1` STRING, `col30` DOUBLE) STORED AS orc LOCATION '{}'".format(table_name, data_path)
spark.sql(rename_cols_query).collect
@pytest.mark.skipif(is_before_spark_320(), reason='ORC forced positional evolution support is added in Spark-3.2')
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('forced_position', ["true", "false"])
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
def test_orc_forced_position(spark_tmp_path, spark_tmp_table_factory, reader_confs, forced_position, orc_impl):
orc_gens = [int_gen, string_gen, double_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
data_path = spark_tmp_path + 'ORC_DATA'
with_cpu_session(lambda spark : gen_df(spark, gen_list).write.orc(data_path))
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_external_table_with_forced_positions(spark, table_name, data_path))
all_confs = copy_and_update(reader_confs, {
'orc.force.positional.evolution': forced_position,
'spark.sql.orc.impl': orc_impl})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT * FROM {}".format(table_name)),
conf=all_confs)
# In this we are reading the data, but only reading the key the data was partitioned by
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_partitioned_read_just_partitions(spark_tmp_path, v1_enabled_list, reader_confs):
orc_gens = [byte_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
first_data_path = spark_tmp_path + '/ORC_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(second_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_path).select("key"),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_merge_schema_read(spark_tmp_path, v1_enabled_list, reader_confs):
# Once https://github.com/NVIDIA/spark-rapids/issues/131 is fixed
# we should go with a more standard set of generators
orc_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
orc_timestamp_gen]
first_gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
first_data_path = spark_tmp_path + '/ORC_DATA/key=0'
with_cpu_session(
lambda spark : gen_df(spark, first_gen_list).write.orc(first_data_path))
second_gen_list = [(('_c' if i % 2 == 0 else '_b') + str(i), gen) for i, gen in enumerate(orc_gens)]
second_data_path = spark_tmp_path + '/ORC_DATA/key=1'
with_cpu_session(
lambda spark : gen_df(spark, second_gen_list).write.orc(second_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.option('mergeSchema', 'true').orc(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_read_orc_with_empty_clipped_schema(spark_tmp_path, v1_enabled_list, reader_confs):
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark: gen_df(spark, [('a', int_gen)], length=100).write.orc(data_path))
schema = StructType([StructField('b', IntegerType()), StructField('c', StringType())])
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema).orc(data_path), conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_orc_read_multiple_schema(spark_tmp_path, v1_enabled_list, reader_confs):
first_gen_list = [('a', int_gen), ('b', int_gen)]
first_data_path = spark_tmp_path + '/ORC_DATA/key=0'
with_cpu_session(
lambda spark: gen_df(spark, first_gen_list, num_slices=10).write.orc(first_data_path))
second_gen_list = [('c', int_gen), ('b', int_gen), ('a', int_gen)]
second_data_path = spark_tmp_path + '/ORC_DATA/key=1'
with_cpu_session(
lambda spark: gen_df(spark, second_gen_list, num_slices=10).write.orc(second_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
read_schema = StructType([StructField("b", IntegerType()),
StructField("a", IntegerType()),
StructField("c", IntegerType())])
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(read_schema).orc(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
def test_orc_read_avoid_coalesce_incompatible_files(spark_tmp_path, v1_enabled_list):
data_path = spark_tmp_path + '/ORC_DATA'
def setup_table(spark):
df1 = spark.createDataFrame([(("a", "b"),)], "x: struct<y: string, z: string>")
df1.write.orc(data_path + "/data1")
df2 = spark.createDataFrame([(("a",),)], "x: struct<z: string>")
df2.write.orc(data_path + "/data2")
with_cpu_session(setup_table)
# Configure confs to read as a single task
all_confs = copy_and_update(coalescing_orc_file_reader_conf, {
"spark.sql.sources.useV1SourceList": v1_enabled_list,
"spark.sql.files.minPartitionNum": "1"})
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read \
.schema("x STRUCT<y: string, z: string>") \
.option("recursiveFileLookup", "true").orc(data_path),
conf=all_confs)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_input_meta(spark_tmp_path, v1_enabled_list, reader_confs):
first_data_path = spark_tmp_path + '/ORC_DATA/key=0'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/key=1'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.orc(second_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = copy_and_update(reader_confs, {'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
@allow_non_gpu('ProjectExec', 'Alias', 'InputFileName', 'InputFileBlockStart', 'InputFileBlockLength',
'FilterExec', 'And', 'IsNotNull', 'GreaterThan', 'Literal',
'FileSourceScanExec', 'ColumnarToRowExec',
'BatchScanExec', 'OrcScan')
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('disable_conf', ['spark.rapids.sql.format.orc.enabled', 'spark.rapids.sql.format.orc.read.enabled'])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_input_meta_fallback(spark_tmp_path, v1_enabled_list, reader_confs, disable_conf):
first_data_path = spark_tmp_path + '/ORC_DATA/key=0'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/key=1'
with_cpu_session(
lambda spark : unary_op_df(spark, long_gen).write.orc(second_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
disable_conf: 'false'})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_path)\
.filter(f.col('a') > 0)\
.selectExpr('a',
'input_file_name()',
'input_file_block_start()',
'input_file_block_length()'),
conf=all_confs)
def setup_orc_file_no_column_names(spark, table_name, location=None):
drop_query = "DROP TABLE IF EXISTS {}".format(table_name)
create_query = "CREATE TABLE `{}` (`_col1` INT, `_col2` STRING, `_col3` INT) USING orc".format(table_name)
if location:
create_query += f" LOCATION '{location}'"
insert_query = "INSERT INTO {} VALUES(13, '155', 2020)".format(table_name)
spark.sql(drop_query).collect
spark.sql(create_query).collect
spark.sql(insert_query).collect
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_missing_column_names(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_no_column_names(spark, table_name))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT _col3,_col2 FROM {}".format(table_name)),
reader_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs_for_count, ids=idfn)
def test_missing_column_names_count(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_no_column_names(spark, table_name))
assert_gpu_and_cpu_row_counts_equal(
lambda spark : spark.sql("SELECT * FROM {}".format(table_name)),
reader_confs)
# ORC checks if there are no column names by looking at the column names
# so it is possible to have some of the names match the pattern and other not
def setup_orc_file_partial_no_column_names(spark, table_name, location=None):
drop_query = "DROP TABLE IF EXISTS {}".format(table_name)
create_query = "CREATE TABLE `{}` (`_col1` INT, `arr` ARRAY<STRING>, `str` STRUCT<a: INT>) USING orc".format(table_name)
if location:
create_query += f" LOCATION '{location}'"
insert_query = "INSERT INTO {} VALUES(13, array('155'), named_struct('a', 2020))".format(table_name)
spark.sql(drop_query).collect
spark.sql(create_query).collect
spark.sql(insert_query).collect
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_partial_missing_column_names(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_partial_no_column_names(spark, table_name))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT str,arr FROM {}".format(table_name)),
reader_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs_for_count, ids=idfn)
def test_partial_missing_column_names_count(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_partial_no_column_names(spark, table_name))
assert_gpu_and_cpu_row_counts_equal(
lambda spark : spark.sql("SELECT * FROM {}".format(table_name)),
reader_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_missing_column_names_with_schema(spark_tmp_table_factory, spark_tmp_path, reader_confs):
table_name = spark_tmp_table_factory.get()
table_location = spark_tmp_path + "/ORC_DATA"
with_cpu_session(lambda spark : setup_orc_file_no_column_names(spark, table_name, table_location))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema("a int, b string, c int").orc(table_location),
reader_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs_for_count, ids=idfn)
def test_missing_column_names_count_with_schema(spark_tmp_table_factory, spark_tmp_path, reader_confs):
table_name = spark_tmp_table_factory.get()
table_location = spark_tmp_path + "/ORC_DATA"
with_cpu_session(lambda spark : setup_orc_file_no_column_names(spark, table_name, table_location))
assert_gpu_and_cpu_row_counts_equal(
lambda spark : spark.read.schema("a int, b string, c int").orc(table_location),
reader_confs)
def setup_orc_file_with_column_names(spark, table_name):
drop_query = "DROP TABLE IF EXISTS {}".format(table_name)
create_query = "CREATE TABLE `{}` (`c_1` INT, `c_2` STRING, `c_3` ARRAY<INT>) USING orc".format(table_name)
insert_query = "INSERT INTO {} VALUES(13, '155', array(2020))".format(table_name)
spark.sql(drop_query).collect
spark.sql(create_query).collect
spark.sql(insert_query).collect
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_disorder_read_schema(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_with_column_names(spark, table_name))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_2,c_1 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_3,c_1 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_3,c_2 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_1,c_3,c_2 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_1,c_2,c_3 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_2,c_1,c_3 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_2,c_3,c_1 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_3,c_1,c_2 FROM {}".format(table_name)),
reader_confs)
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT c_3,c_2,c_1 FROM {}".format(table_name)),
reader_confs)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
def test_missing_column_names_filter(spark_tmp_table_factory, reader_confs):
table_name = spark_tmp_table_factory.get()
with_cpu_session(lambda spark : setup_orc_file_no_column_names(spark, table_name))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.sql("SELECT _col3,_col2 FROM {} WHERE _col2 = '155'".format(table_name)),
reader_confs)
@pytest.mark.parametrize('data_gen,read_schema', _nested_pruning_schemas, ids=idfn)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('nested_enabled', ["true", "false"])
def test_read_nested_pruning(spark_tmp_path, data_gen, read_schema, reader_confs, v1_enabled_list, nested_enabled):
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark : gen_df(spark, data_gen).write.orc(data_path))
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.optimizer.nestedSchemaPruning.enabled': nested_enabled})
# This is a hack to get the type in a slightly less verbose way
rs = StructGen(read_schema, nullable=False).data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(rs).orc(data_path),
conf=all_confs)
# This is for the corner case of reading only a struct column that has no nulls.
# Then there will be no streams in a stripe connecting to this column (Its ROW_INDEX
# streams have been pruned by the Plugin.), and CUDF throws an exception for such case.
# Here is the tracking issue: 'https://github.com/rapidsai/cudf/issues/8878'. But it has
# been fixed. Still keep the test here to have this corner case tested.
def test_read_struct_without_stream(spark_tmp_path):
data_gen = StructGen([['c_byte', ByteGen(nullable=False)]], nullable=False)
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark : unary_op_df(spark, data_gen, 10).write.orc(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc(data_path))
@pytest.mark.parametrize('orc_gen', flattened_orc_gens, ids=idfn)
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('case_sensitive', ["false", "true"])
def test_read_with_more_columns(spark_tmp_path, orc_gen, reader_confs, v1_enabled_list, case_sensitive):
struct_gen = StructGen([('nested_col', orc_gen)])
# Map is not supported yet.
gen_list = [("top_pri", orc_gen),
("top_st", struct_gen),
("top_ar", ArrayGen(struct_gen, max_length=10))]
data_path = spark_tmp_path + '/ORC_DATA'
with_cpu_session(
lambda spark : gen_df(spark, gen_list).write.orc(data_path))
all_confs = reader_confs.copy()
all_confs.update({'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.caseSensitive': case_sensitive})
# This is a hack to get the type in a slightly less verbose way
extra_struct_gen = StructGen([('nested_col', orc_gen), ("nested_non_existing", orc_gen)])
extra_gen_list = [("top_pri", orc_gen),
("top_non_existing_mid", orc_gen),
("TOP_AR", ArrayGen(extra_struct_gen, max_length=10)),
("top_ST", extra_struct_gen),
("top_non_existing_end", orc_gen)]
rs = StructGen(extra_gen_list, nullable=False).data_type
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.schema(rs).orc(data_path),
conf=all_confs)
@pytest.mark.skipif(is_before_spark_330(), reason='Hidden file metadata columns are a new feature of Spark 330')
@allow_non_gpu(any = True)
@pytest.mark.parametrize('metadata_column', ["file_path", "file_name", "file_size", "file_modification_time"])
def test_orc_scan_with_hidden_metadata_fallback(spark_tmp_path, metadata_column):
data_path = spark_tmp_path + "/hidden_metadata.orc"
with_cpu_session(lambda spark : spark.range(10) \
.selectExpr("id", "id % 3 as p") \
.write \
.partitionBy("p") \
.mode("overwrite") \
.orc(data_path))
def do_orc_scan(spark):
df = spark.read.orc(data_path).selectExpr("id", "_metadata.{}".format(metadata_column))
return df
assert_cpu_and_gpu_are_equal_collect_with_capture(
do_orc_scan,
exist_classes= "FileSourceScanExec",
non_exist_classes= "GpuBatchScanExec")
@ignore_order
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.skipif(is_databricks_runtime(), reason="Databricks does not support ignoreCorruptFiles")
def test_orc_read_with_corrupt_files(spark_tmp_path, reader_confs, v1_enabled_list):
first_data_path = spark_tmp_path + '/ORC_DATA/first'
with_cpu_session(lambda spark : spark.range(1).toDF("a").write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/second'
with_cpu_session(lambda spark : spark.range(1, 2).toDF("a").write.orc(second_data_path))
third_data_path = spark_tmp_path + '/ORC_DATA/third'
with_cpu_session(lambda spark : spark.range(2, 3).toDF("a").write.json(third_data_path))
all_confs = copy_and_update(reader_confs,
{'spark.sql.files.ignoreCorruptFiles': "true",
'spark.sql.sources.useV1SourceList': v1_enabled_list})
assert_gpu_and_cpu_are_equal_collect(
lambda spark : spark.read.orc([first_data_path, second_data_path, third_data_path]),
conf=all_confs)
# Spark(330,_) allows aggregate pushdown on ORC by enabling spark.sql.orc.aggregatePushdown.
# Note that Min/Max don't push down partition column. Only Count does.
# The following tests that GPU falls back to CPU when aggregates are pushed down on ORC.
#
# When the spark configuration is enabled we check the following:
# ----------------------------------------------+
# | Aggregate | Partition Column | FallBack CPU |
# +-----------+------------------+--------------+
# | COUNT | Y | Y |
# | MIN | Y | N |
# | MAX | Y | N |
# | COUNT | N | Y |
# | MIN | N | Y |
# | MAX | N | Y |
_aggregate_orc_list_col_partition = ['COUNT']
_aggregate_orc_list_no_col_partition = ['MAX', 'MIN']
_aggregate_orc_list = _aggregate_orc_list_col_partition + _aggregate_orc_list_no_col_partition
_orc_aggregate_pushdown_enabled_conf = {'spark.rapids.sql.format.orc.write.enabled': 'true',
'spark.sql.orc.aggregatePushdown': 'true',
"spark.sql.sources.useV1SourceList": ""}
def _do_orc_scan_with_agg(spark, path, agg):
spark.range(10).selectExpr("id", "id % 3 as p").write.mode("overwrite").orc(path)
return spark.read.orc(path).selectExpr('{}(p)'.format(agg))
def _do_orc_scan_with_agg_on_partitioned_column(spark, path, agg):
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").mode("overwrite").orc(path)
return spark.read.orc(path).selectExpr('{}(p)'.format(agg))
@pytest.mark.skipif(is_before_spark_330(), reason='Aggregate push down on ORC is a new feature of Spark 330')
@pytest.mark.parametrize('aggregate', _aggregate_orc_list)
@allow_non_gpu(any = True)
def test_orc_scan_with_aggregate_pushdown(spark_tmp_path, aggregate):
"""
Spark(330,_) allows aggregate pushdown on ORC by enabling spark.sql.orc.aggregatePushdown.
When the spark configuration is enabled we check the following:
---------------------------+
| Aggregate | FallBack CPU |
+-----------+--------------+
| COUNT | Y |
| MIN | Y |
| MAX | Y |
"""
data_path = spark_tmp_path + '/ORC_DATA/pushdown_00.orc'
# fallback to CPU
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: _do_orc_scan_with_agg(spark, data_path, aggregate),
exist_classes="BatchScanExec",
non_exist_classes="GpuBatchScanExec",
conf=_orc_aggregate_pushdown_enabled_conf)
@pytest.mark.skipif(is_before_spark_330(), reason='Aggregate push down on ORC is a new feature of Spark 330')
@pytest.mark.parametrize('aggregate', _aggregate_orc_list_col_partition)
@allow_non_gpu(any = True)
def test_orc_scan_with_aggregate_pushdown_on_col_partition(spark_tmp_path, aggregate):
"""
Spark(330,_) allows aggregate pushdown on ORC by enabling spark.sql.orc.aggregatePushdown.
Note that Min/Max don't push down partition column. Only Count does.
This test checks that GPU falls back to CPU when aggregates are pushed down on ORC.
When the spark configuration is enabled we check the following:
----------------------------------------------+
| Aggregate | Partition Column | FallBack CPU |
+-----------+------------------+--------------+
| COUNT | Y | Y |
"""
data_path = spark_tmp_path + '/ORC_DATA/pushdown_01.orc'
# fallback to CPU only if aggregate is COUNT
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: _do_orc_scan_with_agg_on_partitioned_column(spark, data_path, aggregate),
exist_classes="BatchScanExec",
non_exist_classes="GpuBatchScanExec",
conf=_orc_aggregate_pushdown_enabled_conf)
@pytest.mark.skipif(is_before_spark_330(), reason='Aggregate push down on ORC is a new feature of Spark 330')
@pytest.mark.parametrize('aggregate', _aggregate_orc_list_no_col_partition)
def test_orc_scan_with_aggregate_no_pushdown_on_col_partition(spark_tmp_path, aggregate):
"""
Spark(330,_) allows aggregate pushdown on ORC by enabling spark.sql.orc.aggregatePushdown.
Note that Min/Max don't push down partition column.
When the spark configuration is enabled we check the following:
----------------------------------------------+
| Aggregate | Partition Column | FallBack CPU |
+-----------+------------------+--------------+
| MIN | Y | N |
| MAX | Y | N |
"""
data_path = spark_tmp_path + '/ORC_DATA/pushdown_02.orc'
# should not fallback to CPU
assert_gpu_and_cpu_are_equal_collect(
lambda spark: _do_orc_scan_with_agg_on_partitioned_column(spark, data_path, aggregate),
conf=_orc_aggregate_pushdown_enabled_conf)
def test_orc_read_count(spark_tmp_path):
data_path = spark_tmp_path + '/ORC_DATA'
orc_gens = [int_gen, string_gen, double_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
with_cpu_session(lambda spark: gen_df(spark, gen_list).write.orc(data_path))
assert_gpu_and_cpu_row_counts_equal(lambda spark: spark.read.orc(data_path))
# assert the spark plan of the equivalent SQL query contains no column in read schema
assert_cpu_and_gpu_are_equal_sql_with_capture(
lambda spark: spark.read.orc(data_path), "SELECT COUNT(*) FROM tab", "tab",
exist_classes=r'GpuFileGpuScan orc .* ReadSchema: struct<>')
# The test_orc_varchar file was created with the Hive CLI like this:
# CREATE TABLE test_orc_varchar(id int, name varchar(20)) STORED AS ORC LOCATION '...';
# INSERT INTO test_orc_varchar values(1, 'abc');
def test_orc_read_varchar_as_string(std_input_path):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema("id bigint, name string").orc(std_input_path + "/test_orc_varchar.orc"))
@pytest.mark.parametrize('gens', orc_gens_list, ids=idfn)
@pytest.mark.parametrize('keep_order', [True, pytest.param(False, marks=pytest.mark.ignore_order(local=True))])
def test_read_round_trip_for_multithreaded_combining(spark_tmp_path, gens, keep_order):
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(gens)]
data_path = spark_tmp_path + '/ORC_DATA'
# 50 partitions to generate enough small files
with_cpu_session(
lambda spark: gen_df(spark, gen_list).repartition(50).write.orc(data_path))
all_confs = {'spark.rapids.sql.format.orc.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': keep_order}
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.orc(data_path), conf=all_confs)
@pytest.mark.parametrize('keep_order', [True, pytest.param(False, marks=pytest.mark.ignore_order(local=True))])
def test_simple_partitioned_read_for_multithreaded_combining(spark_tmp_path, keep_order):
orc_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, DateGen(start=date(1590, 1, 1)),
orc_timestamp_gen]
gen_list = [('_c' + str(i), gen) for i, gen in enumerate(orc_gens)]
first_data_path = spark_tmp_path + '/ORC_DATA/key=0/key2=20'
with_cpu_session(
lambda spark: gen_df(spark, gen_list).repartition(50).write.orc(first_data_path))
second_data_path = spark_tmp_path + '/ORC_DATA/key=1/key2=21'
with_cpu_session(
lambda spark: gen_df(spark, gen_list).repartition(50).write.orc(second_data_path))
third_data_path = spark_tmp_path + '/ORC_DATA/key=2/key2=22'
with_cpu_session(
lambda spark: gen_df(spark, gen_list).repartition(50).write.orc(third_data_path))
data_path = spark_tmp_path + '/ORC_DATA'
all_confs = {'spark.rapids.sql.format.orc.reader.type': 'MULTITHREADED',
'spark.rapids.sql.reader.multithreaded.combine.sizeBytes': '64m',
'spark.rapids.sql.reader.multithreaded.read.keepOrder': keep_order}
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.orc(data_path), conf=all_confs)
@pytest.mark.skipif(is_spark_340_or_later(), reason="https://github.com/NVIDIA/spark-rapids/issues/8324")
@pytest.mark.parametrize('data_file', ['fixed-length-char-column-from-hive.orc'])
@pytest.mark.parametrize('reader', [read_orc_df, read_orc_sql])
def test_read_hive_fixed_length_char(std_input_path, data_file, reader):
"""
Test that a file containing CHAR data is readable as STRING.
"""
assert_gpu_and_cpu_are_equal_collect(
reader(std_input_path + '/' + data_file),
conf={})
@allow_non_gpu("ProjectExec")
@pytest.mark.skipif(is_before_spark_340(), reason="https://github.com/NVIDIA/spark-rapids/issues/8324")
@pytest.mark.parametrize('data_file', ['fixed-length-char-column-from-hive.orc'])
@pytest.mark.parametrize('reader', [read_orc_df, read_orc_sql])
def test_project_fallback_when_reading_hive_fixed_length_char(std_input_path, data_file, reader):
"""
Test that a file containing CHAR data is readable as STRING.
Note: This test can be removed when
https://github.com/NVIDIA/spark-rapids/issues/8324 is resolved.
"""
assert_gpu_fallback_collect(
reader(std_input_path + '/' + data_file),
cpu_fallback_class_name="ProjectExec",
conf={})
@pytest.mark.parametrize('read_func', [read_orc_df, read_orc_sql])
@pytest.mark.parametrize('v1_enabled_list', ["", "orc"])
@pytest.mark.parametrize('orc_impl', ["native", "hive"])
@pytest.mark.parametrize('reader_confs', reader_opt_confs, ids=idfn)
@pytest.mark.parametrize('col_name', ['K0', 'k0', 'K3', 'k3', 'V0', 'v0'], ids=idfn)
@ignore_order
def test_read_case_col_name(spark_tmp_path, read_func, v1_enabled_list, orc_impl, reader_confs, col_name):
all_confs = copy_and_update(reader_confs, {
'spark.sql.sources.useV1SourceList': v1_enabled_list,
'spark.sql.orc.impl': orc_impl})
gen_list =[('k0', LongGen(nullable=False, min_val=0, max_val=0)),
('k1', LongGen(nullable=False, min_val=1, max_val=1)),
('k2', LongGen(nullable=False, min_val=2, max_val=2)),
('k3', LongGen(nullable=False, min_val=3, max_val=3)),
('v0', LongGen()),
('v1', LongGen()),
('v2', LongGen()),
('v3', LongGen())]
gen = StructGen(gen_list, nullable=False)
data_path = spark_tmp_path + '/ORC_DATA'
reader = read_func(data_path)
with_cpu_session(
lambda spark : gen_df(spark, gen).write.partitionBy('k0', 'k1', 'k2', 'k3').orc(data_path))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : reader(spark).selectExpr(col_name),
conf=all_confs)
@pytest.mark.parametrize("reader_confs", reader_opt_confs, ids=idfn)
@ignore_order
def test_orc_column_name_with_dots(spark_tmp_path, reader_confs):
data_path = spark_tmp_path + "/ORC_DATA"
reader = read_orc_df(data_path)
all_confs = reader_confs
gens = [
("a.b", StructGen([
("c.d.e", StructGen([
("f.g", int_gen),
("h", string_gen)])),
("i.j", long_gen)])),
("k", boolean_gen)]
with_cpu_session(lambda spark: gen_df(spark, gens).write.orc(data_path))
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark), conf=all_confs)
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark).selectExpr("`a.b`"), conf=all_confs)
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark).selectExpr("`a.b`.`c.d.e`.`f.g`"),
conf=all_confs)
# This corresponds to a Spark ORC test in https://github.com/apache/spark/blob/v3.4.0/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala#L173
@pytest.mark.parametrize("reader_confs", reader_opt_confs, ids=idfn)
@ignore_order
def test_orc_with_null_column(spark_tmp_path, reader_confs):
data_path = spark_tmp_path + "/ORC_DATA"
all_confs = reader_confs
def gen_null_df(spark):
return spark.createDataFrame(
[(None, None, None, None, None)],
"c1 int, c2 long, c3 float, c4 double, c5 boolean")
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_null_df(spark).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
conf=all_confs)
gpu_file_path = data_path + "/GPU"
reader = read_orc_df(gpu_file_path)
assert_gpu_and_cpu_are_equal_collect(lambda spark: reader(spark), conf=all_confs)
@ignore_order
@large_data_test
@pytest.mark.parametrize("reader_confs", reader_opt_confs, ids=idfn)
def test_orc_with_null_column_with_1m_rows(spark_tmp_path, reader_confs):
data_path = spark_tmp_path + "/ORC_DATA"
all_confs = reader_confs
data = [(i, None, None, None, None) for i in range(1000000)]
def gen_null_df(spark):
return spark.createDataFrame(
data,
"c1 int, c2 long, c3 float, c4 double, c5 boolean")
assert_gpu_and_cpu_writes_are_equal_collect(
lambda spark, path: gen_null_df(spark).write.orc(path),
lambda spark, path: spark.read.orc(path),
data_path,
conf=all_confs)
gpu_file_path = data_path + "/GPU"
sqls = ["SELECT * FROM my_large_table",
"SELECT * FROM my_large_table WHERE c2 = 5",
"SELECT COUNT(*) FROM my_large_table WHERE c3 IS NOT NULL",
"SELECT * FROM my_large_table WHERE c4 IS NULL",
"SELECT * FROM my_large_table WHERE c5 IS NULL",
]
for sql in sqls:
assert_gpu_and_cpu_are_equal_sql(
lambda spark: spark.read.orc(gpu_file_path),
"my_large_table",
sql)
#
# V_0_11.orc is generated by code:
# @Test
# public void testGenerateOrc() throws IOException {
# TypeDescription schema = TypeDescription.fromString("struct<x:bigint>");
# Configuration config = new Configuration();
# Path path = new Path("/tmp/V_0_11.orc");
#
# long[] inp = new long[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6,
# 7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 10, 9, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1,
# 2, 5, 1, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1,
# 9, 2, 6, 3, 7, 1, 9, 2, 6, 2000, 2, 1, 1, 1, 1, 1, 3, 7, 1, 9, 2, 6, 1,
# 1, 1, 1, 1};
# List<Long> input = Lists.newArrayList(Longs.asList(inp));
# Writer writer = OrcFile.createWriter(path,
# OrcFile.writerOptions(config)
# .setSchema(schema)
# .compress(CompressionKind.SNAPPY)
# .version(OrcFile.Version.V_0_11) #############: use V_0_11 version ################
# .bufferSize(10000)
# .encodingStrategy(OrcFile.EncodingStrategy.SPEED));
# VectorizedRowBatch batch = schema.createRowBatch();
# for (Long l : input) {
# appendLong(batch, l);
# }
# writer.addRowBatch(batch);
# writer.close();
# }
#
# Change the version can generate V_0_12.orc file
#
@ignore_order
def test_orc_version_V_0_11_and_V_0_12(std_input_path):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: spark.read.orc(std_input_path + "/V_0_11.orc"),
"v11_table",
"select * from v11_table")
assert_gpu_and_cpu_are_equal_sql(
lambda spark: spark.read.orc(std_input_path + "/V_0_12.orc"),
"v12_table",
"select * from v12_table")
| spark-rapids-branch-23.10 | integration_tests/src/main/python/orc_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_fallback_collect, \
assert_gpu_and_cpu_error, \
assert_gpu_sql_fallback_collect
from data_gen import *
from marks import *
from pyspark.sql.types import *
from spark_session import is_before_spark_320, is_before_spark_350, is_jvm_charset_utf8
if not is_jvm_charset_utf8():
pytestmark = [pytest.mark.regexp, pytest.mark.skip(reason=str("Current locale doesn't support UTF-8, regexp support is disabled"))]
else:
pytestmark = pytest.mark.regexp
_regexp_conf = { 'spark.rapids.sql.regexp.enabled': True }
def mk_str_gen(pattern):
return StringGen(pattern).with_special_case('').with_special_pattern('.{0,10}')
def test_split_re_negative_limit():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[:]", -1)',
'split(a, "[o:]", -1)',
'split(a, "[^:]", -1)',
'split(a, "[^o]", -1)',
'split(a, "[o]{1,2}", -1)',
'split(a, "[bf]", -1)',
'split(a, "b[o]+", -1)',
'split(a, "b[o]*", -1)',
'split(a, "b[o]?", -1)',
'split(a, "[o]", -2)'),
conf=_regexp_conf)
def test_split_re_zero_limit():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[:]", 0)',
'split(a, "[o:]", 0)',
'split(a, "[^:]", 0)',
'split(a, "[^o]", 0)',
'split(a, "[o]{1,2}", 0)',
'split(a, "[bf]", 0)',
'split(a, "f[o]+", 0)',
'split(a, "f[o]*", 0)',
'split(a, "f[o]?", 0)',
'split(a, "[o]", 0)'),
conf=_regexp_conf)
def test_split_re_one_limit():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[:]", 1)',
'split(a, "[o:]", 1)',
'split(a, "[^:]", 1)',
'split(a, "[^o]", 1)',
'split(a, "[o]{1,2}", 1)',
'split(a, "[bf]", 1)',
'split(a, "b[o]+", 1)',
'split(a, "b[o]*", 1)',
'split(a, "b[o]?", 1)',
'split(a, "[o]", 1)'),
conf=_regexp_conf)
def test_split_re_positive_limit():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[:]", 2)',
'split(a, "[o:]", 5)',
'split(a, "[^:]", 2)',
'split(a, "[^o]", 55)',
'split(a, "[o]{1,2}", 999)',
'split(a, "[bf]", 2)',
'split(a, "f[o]+", 2)',
'split(a, "f[o]*", 9)',
'split(a, "f[o]?", 5)',
'split(a, "[o]", 5)'),
conf=_regexp_conf)
def test_split_re_no_limit():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[:]")',
'split(a, "[o:]")',
'split(a, "[^:]")',
'split(a, "[^o]")',
'split(a, "[o]{1,2}")',
'split(a, "[bf]")',
'split(a, "[o]")',
'split(a, "^(boo|foo):$")',
'split(a, "(bo+|fo{2}):$")',
'split(a, "[bf]$:")',
'split(a, "b[o]+")',
'split(a, "b[o]*")',
'split(a, "b[o]?")',
'split(a, "b^")',
'split(a, "^[o]")'),
conf=_regexp_conf)
def test_split_optimized_no_re():
data_gen = mk_str_gen('([bf]o{0,2}[.?+\\^$|{}]{1,2}){1,7}') \
.with_special_case('boo.and.foo') \
.with_special_case('boo?and?foo') \
.with_special_case('boo+and+foo') \
.with_special_case('boo^and^foo') \
.with_special_case('boo$and$foo') \
.with_special_case('boo|and|foo') \
.with_special_case('boo{and}foo') \
.with_special_case('boo$|and$|foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "\\\\.")',
'split(a, "\\\\?")',
'split(a, "\\\\+")',
'split(a, "\\\\^")',
'split(a, "\\\\$")',
'split(a, "\\\\|")',
'split(a, "\\\\{")',
'split(a, "\\\\}")',
'split(a, "\\\\%")',
'split(a, "\\\\;")',
'split(a, "\\\\/")',
'split(a, "\\\\$\\\\|")'),
conf=_regexp_conf)
def test_split_optimized_no_re_combined():
data_gen = mk_str_gen('([bf]o{0,2}[AZ.?+\\^$|{}]{1,2}){1,7}') \
.with_special_case('booA.ZandA.Zfoo') \
.with_special_case('booA?ZandA?Zfoo') \
.with_special_case('booA+ZandA+Zfoo') \
.with_special_case('booA^ZandA^Zfoo') \
.with_special_case('booA$ZandA$Zfoo') \
.with_special_case('booA|ZandA|Zfoo') \
.with_special_case('boo{Zand}Zfoo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "A\\\\.Z")',
'split(a, "A\\\\?Z")',
'split(a, "A\\\\+Z")',
'split(a, "A\\\\^Z")',
'split(a, "A\\\\$Z")',
'split(a, "A\\\\|Z")',
'split(a, "\\\\{Z")',
'split(a, "\\\\}Z")'),
conf=_regexp_conf)
# See https://github.com/NVIDIA/spark-rapids/issues/6958 for issue with zero-width match
@allow_non_gpu('ProjectExec', 'StringSplit')
def test_split_unsupported_fallback():
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_sql_fallback_collect(
lambda spark : unary_op_df(spark, data_gen),
'StringSplit',
'string_split_table',
'select ' +
'split(a, "o*"),' +
'split(a, "o?") from string_split_table')
def test_split_regexp_disabled_no_fallback():
conf = { 'spark.rapids.sql.regexp.enabled': 'false' }
data_gen = mk_str_gen('([bf]o{0,2}[.?+\\^$|&_]{1,2}){1,7}') \
.with_special_case('boo.and.foo') \
.with_special_case('boo?and?foo') \
.with_special_case('boo+and+foo') \
.with_special_case('boo^and^foo') \
.with_special_case('boo$and$foo') \
.with_special_case('boo|and|foo') \
.with_special_case('boo&and&foo') \
.with_special_case('boo_and_foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "\\\\.")',
'split(a, "\\\\?")',
'split(a, "\\\\+")',
'split(a, "\\\\^")',
'split(a, "\\\\$")',
'split(a, "\\\\|")',
'split(a, "&")',
'split(a, "_")',
), conf
)
@allow_non_gpu('ProjectExec', 'StringSplit')
def test_split_regexp_disabled_fallback():
conf = { 'spark.rapids.sql.regexp.enabled': 'false' }
data_gen = mk_str_gen('([bf]o{0,2}:){1,7}') \
.with_special_case('boo:and:foo')
assert_gpu_sql_fallback_collect(
lambda spark : unary_op_df(spark, data_gen),
'StringSplit',
'string_split_table',
'select ' +
'split(a, "[:]", 2), ' +
'split(a, "[o:]", 5), ' +
'split(a, "[^:]", 2), ' +
'split(a, "[^o]", 55), ' +
'split(a, "[o]{1,2}", 999), ' +
'split(a, "[bf]", 2), ' +
'split(a, "[o]", 5) from string_split_table',
conf)
def test_split_escaped_chars_in_character_class():
data_gen = mk_str_gen(r'([0-9][\\\.\[\]\^\-\+]){1,4}')
assert_gpu_and_cpu_are_equal_collect(
# note that regexp patterns are double-escaped to support
# passing from Python to Java
lambda spark : unary_op_df(spark, data_gen).selectExpr(
r'split(a, "[\\.]", 2)',
r'split(a, "[\\[]", 2)',
r'split(a, "[\\]]", 2)',
r'split(a, "[\\^]", 2)',
r'split(a, "[\\-]", 2)',
r'split(a, "[\\+]", 2)',
r'split(a, "[\\\\]", 2)',
))
def test_re_replace():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "TEST", "PROD")',
'REGEXP_REPLACE(a, "^TEST", "PROD")',
'REGEXP_REPLACE(a, "^TEST\\z", "PROD")',
'REGEXP_REPLACE(a, "TEST\\z", "PROD")',
'REGEXP_REPLACE(a, "\\zTEST", "PROD")',
'REGEXP_REPLACE(a, "TEST\\z", "PROD")',
'REGEXP_REPLACE(a, "\\^TEST\\z", "PROD")',
'REGEXP_REPLACE(a, "\\^TEST\\z", "PROD")',
'REGEXP_REPLACE(a, "TEST", "")',
'REGEXP_REPLACE(a, "TEST", "%^[]\ud720")',
'REGEXP_REPLACE(a, "TEST", NULL)'),
conf=_regexp_conf)
# We have shims to support empty strings for zero-repetition patterns
# See https://github.com/NVIDIA/spark-rapids/issues/5456
def test_re_replace_repetition():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "[E]+", "PROD")',
'REGEXP_REPLACE(a, "[A]+", "PROD")',
'REGEXP_REPLACE(a, "A{0,}", "PROD")',
'REGEXP_REPLACE(a, "T?E?", "PROD")',
'REGEXP_REPLACE(a, "A*", "PROD")',
'REGEXP_REPLACE(a, "A{0,5}", "PROD")',
'REGEXP_REPLACE(a, "(A*)", "PROD")',
'REGEXP_REPLACE(a, "(((A*)))", "PROD")',
'REGEXP_REPLACE(a, "((A*)E?)", "PROD")',
'REGEXP_REPLACE(a, "[A-Z]?", "PROD")'
),
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RegExpReplace')
def test_re_replace_issue_5492():
# https://github.com/NVIDIA/spark-rapids/issues/5492
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "[^\\\\sa-zA-Z0-9]", "x")'),
'RegExpReplace',
conf=_regexp_conf)
def test_re_replace_escaped_chars():
# https://github.com/NVIDIA/spark-rapids/issues/7892
gen = mk_str_gen('.{0,5}TEST[\n\r\t\f\a\b\u001b]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "\\\\t", " ")',
'REGEXP_REPLACE(a, "\\\\n", " ")',
'REGEXP_REPLACE(a, "TEST\\\\n", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\r", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\f", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\a", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\b", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\e", "PROD")',
'REGEXP_REPLACE(a, "TEST[\\\\r\\\\n]", "PROD")'),
conf=_regexp_conf)
def test_re_replace_backrefs():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}TEST')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "(TEST)", "$1")',
'REGEXP_REPLACE(a, "(TEST)", "[$0]")',
'REGEXP_REPLACE(a, "(TEST)", "[\\1]")',
'REGEXP_REPLACE(a, "(T)[a-z]+(T)", "[$2][$1][$0]")',
'REGEXP_REPLACE(a, "([0-9]+)(T)[a-z]+(T)", "[$3][$2][$1]")',
'REGEXP_REPLACE(a, "(.)([0-9]+TEST)", "$0 $1 $2")',
'REGEXP_REPLACE(a, "(TESTT)", "\\0 \\1")' # no match
),
conf=_regexp_conf)
def test_re_replace_anchors():
gen = mk_str_gen('.{0,2}TEST[\ud720 A]{0,5}TEST[\r\n\u0085\u2028\u2029]?') \
.with_special_case("TEST") \
.with_special_case("TEST\n") \
.with_special_case("TEST\r\n") \
.with_special_case("TEST\r")
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "TEST$", "")',
'REGEXP_REPLACE(a, "TEST$", "PROD")',
'REGEXP_REPLACE(a, "\ud720[A-Z]+$", "PROD")',
'REGEXP_REPLACE(a, "(\ud720[A-Z]+)$", "PROD")',
'REGEXP_REPLACE(a, "(TEST)$", "$1")',
'REGEXP_REPLACE(a, "^(TEST)$", "$1")',
'REGEXP_REPLACE(a, "\\\\ATEST\\\\Z", "PROD")',
'REGEXP_REPLACE(a, "\\\\ATEST$", "PROD")',
'REGEXP_REPLACE(a, "^TEST\\\\Z", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\Z", "PROD")',
'REGEXP_REPLACE(a, "^TEST$", "PROD")',
),
conf=_regexp_conf)
# For GPU runs, cuDF will check the range and throw exception if index is out of range
def test_re_replace_backrefs_idx_out_of_bounds():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_error(lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "(T)(E)(S)(T)", "[$5]")').collect(),
conf=_regexp_conf,
error_message='')
def test_re_replace_backrefs_escaped():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "(TEST)", "[\\\\$0]")',
'REGEXP_REPLACE(a, "(TEST)", "[\\\\$1]")'),
conf=_regexp_conf)
def test_re_replace_escaped():
gen = mk_str_gen('.{0,5}TEST[\ud720 A]{0,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "[A-Z]+", "\\\\A\\A\\\\t\\\\r\\\\n\\t\\r\\n")'),
conf=_regexp_conf)
def test_re_replace_null():
gen = mk_str_gen('[\u0000 ]{0,2}TE[\u0000 ]{0,2}ST[\u0000 ]{0,2}')\
.with_special_case("\u0000")\
.with_special_case("\u0000\u0000")
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "\u0000", "")',
'REGEXP_REPLACE(a, "\000", "")',
'REGEXP_REPLACE(a, "\00", "")',
'REGEXP_REPLACE(a, "\x00", "")',
'REGEXP_REPLACE(a, "\0", "")',
'REGEXP_REPLACE(a, "\u0000", "NULL")',
'REGEXP_REPLACE(a, "\000", "NULL")',
'REGEXP_REPLACE(a, "\00", "NULL")',
'REGEXP_REPLACE(a, "\x00", "NULL")',
'REGEXP_REPLACE(a, "\0", "NULL")',
'REGEXP_REPLACE(a, "TE\u0000ST", "PROD")',
'REGEXP_REPLACE(a, "TE\u0000\u0000ST", "PROD")',
'REGEXP_REPLACE(a, "[\x00TEST]", "PROD")',
'REGEXP_REPLACE(a, "[TE\00ST]", "PROD")',
'REGEXP_REPLACE(a, "[\u0000-z]", "PROD")'),
conf=_regexp_conf)
def test_regexp_replace():
gen = mk_str_gen('[abcd]{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_replace(a, "a", "A")',
'regexp_replace(a, "[^xyz]", "A")',
'regexp_replace(a, "([^x])|([^y])", "A")',
'regexp_replace(a, "(?:aa)+", "A")',
'regexp_replace(a, "a|b|c", "A")'),
conf=_regexp_conf)
@pytest.mark.skipif(is_before_spark_320(), reason='regexp is synonym for RLike starting in Spark 3.2.0')
def test_regexp():
gen = mk_str_gen('[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp(a, "a{2}")',
'regexp(a, "a{1,3}")',
'regexp(a, "a{1,}")',
'regexp(a, "a[bc]d")'),
conf=_regexp_conf)
@pytest.mark.skipif(is_before_spark_320(), reason='regexp_like is synonym for RLike starting in Spark 3.2.0')
def test_regexp_like():
gen = mk_str_gen('[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_like(a, "a{2}")',
'regexp_like(a, "a{1,3}")',
'regexp_like(a, "a{1,}")',
'regexp_like(a, "a[bc]d")'),
conf=_regexp_conf)
def test_regexp_replace_character_set_negated():
gen = mk_str_gen('[abcd]{0,3}[\r\n]{0,2}[abcd]{0,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_replace(a, "([^a])|([^b])", "1")',
'regexp_replace(a, "[^a]", "1")',
'regexp_replace(a, "([^a]|[\r\n])", "1")',
'regexp_replace(a, "[^a\r\n]", "1")',
'regexp_replace(a, "[^a\r]", "1")',
'regexp_replace(a, "[^a\n]", "1")',
'regexp_replace(a, "[^\r\n]", "1")',
'regexp_replace(a, "[^\r]", "1")',
'regexp_replace(a, "[^\n]", "1")'),
conf=_regexp_conf)
def test_regexp_extract():
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}/?[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "([0-9]+)", 1)',
'regexp_extract(a, "([0-9])([abcd]+)", 1)',
'regexp_extract(a, "([0-9])([abcd]+)", 2)',
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)$", 1)',
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)$", 2)',
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)$", 3)',
'regexp_extract(a, "^([a-d]*)([0-9]*)\\\\/([a-d]*)", 3)',
'regexp_extract(a, "^([a-d]*)([0-9]*)\\\\/([a-d]*)$", 3)',
'regexp_extract(a, "^([a-d]*)([0-9]*)(\\\\/[a-d]*)", 3)',
'regexp_extract(a, "^([a-d]*)([0-9]*)(\\\\/[a-d]*)$", 3)'),
conf=_regexp_conf)
def test_regexp_extract_no_match():
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "^([0-9]+)([a-z]+)([0-9]+)$", 0)',
'regexp_extract(a, "^([0-9]+)([a-z]+)([0-9]+)$", 1)',
'regexp_extract(a, "^([0-9]+)([a-z]+)([0-9]+)$", 2)',
'regexp_extract(a, "^([0-9]+)([a-z]+)([0-9]+)$", 3)'),
conf=_regexp_conf)
# if we determine that the index is out of range we fall back to CPU and let
# Spark take care of the error handling
@allow_non_gpu('ProjectExec', 'RegExpExtract')
def test_regexp_extract_idx_negative():
message = "The specified group index cannot be less than zero" if is_before_spark_350() else \
"[INVALID_PARAMETER_VALUE.REGEX_GROUP_INDEX] The value of parameter(s) `idx` in `regexp_extract` is invalid"
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}')
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)$", -1)').collect(),
error_message = message,
conf=_regexp_conf)
# if we determine that the index is out of range we fall back to CPU and let
# Spark take care of the error handling
@allow_non_gpu('ProjectExec', 'RegExpExtract')
def test_regexp_extract_idx_out_of_bounds():
message = "Regex group count is 3, but the specified group index is 4" if is_before_spark_350() else \
"[INVALID_PARAMETER_VALUE.REGEX_GROUP_INDEX] The value of parameter(s) `idx` in `regexp_extract` is invalid: Expects group index between 0 and 3, but got 4."
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}')
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)$", 4)').collect(),
error_message = message,
conf=_regexp_conf)
def test_regexp_extract_multiline():
gen = mk_str_gen('[abcd]{2}[\r\n]{0,2}[0-9]{2}[\r\n]{0,2}[abcd]{2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "^([a-d]*)([\r\n]*)", 2)'),
conf=_regexp_conf)
def test_regexp_extract_multiline_negated_character_class():
gen = mk_str_gen('[abcd]{2}[\r\n]{0,2}[0-9]{2}[\r\n]{0,2}[abcd]{2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "^([a-d]*)([^a-z]*)([a-d]*)\\z", 2)'),
conf=_regexp_conf)
def test_regexp_extract_idx_0():
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract(a, "([0-9]+)[abcd]([abcd]+)", 0)',
'regexp_extract(a, "^([a-d]*)([0-9]*)([a-d]*)\\z", 0)',
'regexp_extract(a, "^([a-d]*)[0-9]*([a-d]*)\\z", 0)'),
conf=_regexp_conf)
def test_word_boundaries():
gen = StringGen('([abc]{1,3}[\r\n\t \f]{0,2}[123]){1,5}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\b")',
'rlike(a, "\\\\B")',
'rlike(a, "\\\\b\\\\B")',
'regexp_extract(a, "([a-d]+)\\\\b([e-h]+)", 1)',
'regexp_extract(a, "([a-d]+)\\\\B", 1)',
'regexp_replace(a, "\\\\b", "#")',
'regexp_replace(a, "\\\\B", "#")',
),
conf=_regexp_conf)
def test_character_classes():
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}[ \n\t\r]{0,2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "[abcd]")',
'rlike(a, "[^\n\r]")',
'rlike(a, "[\n-\\]")',
'rlike(a, "[+--]")',
'regexp_extract(a, "[123]", 0)',
'regexp_replace(a, "[\\\\0101-\\\\0132]", "@")',
'regexp_replace(a, "[\\\\x41-\\\\x5a]", "@")',
),
conf=_regexp_conf)
def test_regexp_choice():
gen = mk_str_gen('[abcd]{1,3}[0-9]{1,3}[abcd]{1,3}[ \n\t\r]{0,2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "[abcd]|[123]")',
'rlike(a, "[^\n\r]|abcd")',
'rlike(a, "abd1a$|^ab2a")',
'rlike(a, "[a-c]*|[\n]")',
'rlike(a, "[a-c]+|[\n]")',
'regexp_extract(a, "(abc1a$|^ab2ab|a3abc)", 1)',
'regexp_extract(a, "(abc1a$|ab2ab$)", 1)',
'regexp_extract(a, "(ab+|^ab)", 1)',
'regexp_extract(a, "(ab*|^ab)", 1)',
'regexp_replace(a, "[abcd]$|^abc", "@")',
'regexp_replace(a, "[ab]$|[cd]$", "@")',
'regexp_replace(a, "[ab]+|^cd1", "@")'
),
conf=_regexp_conf)
def test_regexp_hexadecimal_digits():
gen = mk_str_gen(
'[abcd]\\\\x00\\\\x7f\\\\x80\\\\xff\\\\x{10ffff}\\\\x{00eeee}[\\\\xa0-\\\\xb0][abcd]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\x7f")',
'rlike(a, "\\\\x80")',
'rlike(a, "[\\\\xa0-\\\\xf0]")',
'rlike(a, "\\\\x{00eeee}")',
'regexp_extract(a, "([a-d]+)\\\\xa0([a-d]+)", 1)',
'regexp_extract(a, "([a-d]+)[\\\\xa0\nabcd]([a-d]+)", 1)',
'regexp_replace(a, "\\\\xff", "@")',
'regexp_replace(a, "[\\\\xa0-\\\\xb0]", "@")',
'regexp_replace(a, "\\\\x{10ffff}", "@")',
),
conf=_regexp_conf)
def test_regexp_whitespace():
gen = mk_str_gen('\u001e[abcd]\t\n{1,3} [0-9]\n {1,3}\x0b\t[abcd]\r\f[0-9]{0,10}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\s")',
'rlike(a, "\\\\s{3}")',
'rlike(a, "[abcd]+\\\\s+[0-9]+")',
'rlike(a, "\\\\S{3}")',
'rlike(a, "[abcd]+\\\\s+\\\\S{2,3}")',
'regexp_extract(a, "([a-d]+)(\\\\s[0-9]+)([a-d]+)", 2)',
'regexp_extract(a, "([a-d]+)(\\\\S+)([0-9]+)", 2)',
'regexp_extract(a, "([a-d]+)(\\\\S+)([0-9]+)", 3)',
'regexp_replace(a, "(\\\\s+)", "@")',
'regexp_replace(a, "(\\\\S+)", "#")',
),
conf=_regexp_conf)
def test_regexp_horizontal_vertical_whitespace():
gen = mk_str_gen(
'''\xA0\u1680\u180e[abcd]\t\n{1,3} [0-9]\n {1,3}\x0b\t[abcd]\r\f[0-9]{0,10}
[\u2001-\u200a]{1,3}\u202f\u205f\u3000\x85\u2028\u2029
''')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\h{2}")',
'rlike(a, "\\\\v{3}")',
'rlike(a, "[abcd]+\\\\h+[0-9]+")',
'rlike(a, "[abcd]+\\\\v+[0-9]+")',
'rlike(a, "\\\\H")',
'rlike(a, "\\\\V")',
'rlike(a, "[abcd]+\\\\h+\\\\V{2,3}")',
'regexp_extract(a, "([a-d]+)([0-9]+\\\\v)([a-d]+)", 2)',
'regexp_extract(a, "([a-d]+)(\\\\H+)([0-9]+)", 2)',
'regexp_extract(a, "([a-d]+)(\\\\V+)([0-9]+)", 3)',
'regexp_replace(a, "(\\\\v+)", "@")',
'regexp_replace(a, "(\\\\H+)", "#")',
),
conf=_regexp_conf)
def test_regexp_linebreak():
gen = mk_str_gen(
'[abc]{1,3}\u000D\u000A[def]{1,3}[\u000A\u000B\u000C\u000D\u0085\u2028\u2029]{0,5}[123]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\R")',
'regexp_extract(a, "([a-d]+)(\\\\R)([a-d]+)", 1)',
'regexp_replace(a, "\\\\R", "")',
),
conf=_regexp_conf)
def test_regexp_octal_digits():
gen = mk_str_gen('[abcd]\u0000\u0041\u007f\u0080\u00ff[\\\\xa0-\\\\xb0][abcd]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'rlike(a, "\\\\0177")',
'rlike(a, "\\\\0200")',
'rlike(a, "\\\\0101")',
'rlike(a, "[\\\\0240-\\\\0377]")',
'regexp_extract(a, "([a-d]+)\\\\0240([a-d]+)", 1)',
'regexp_extract(a, "([a-d]+)[\\\\0141-\\\\0172]([a-d]+)", 0)',
'regexp_replace(a, "\\\\0377", "")',
'regexp_replace(a, "\\\\0260", "")',
),
conf=_regexp_conf)
def test_regexp_replace_digit():
gen = mk_str_gen('[a-z]{0,2}[0-9]{0,2}') \
.with_special_case('䤫畍킱곂⬡❽ࢅ獰蛫青') \
.with_special_case('a\n2\r\n3')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_replace(a, "\\\\d", "x")',
'regexp_replace(a, "\\\\D", "x")',
'regexp_replace(a, "[0-9]", "x")',
'regexp_replace(a, "[^0-9]", "x")',
'regexp_replace(a, "[\\\\d]", "x")',
'regexp_replace(a, "[a\\\\d]{0,2}", "x")',
),
conf=_regexp_conf)
def test_regexp_replace_word():
gen = mk_str_gen('[a-z]{0,2}[_]{0,1}[0-9]{0,2}') \
.with_special_case('䤫畍킱곂⬡❽ࢅ獰蛫青') \
.with_special_case('a\n2\r\n3')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_replace(a, "\\\\w", "x")',
'regexp_replace(a, "\\\\W", "x")',
'regexp_replace(a, "[a-zA-Z_0-9]", "x")',
'regexp_replace(a, "[^a-zA-Z_0-9]", "x")',
),
conf=_regexp_conf)
def test_predefined_character_classes():
gen = mk_str_gen('[a-zA-Z]{0,2}[\r\n!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]{0,2}[0-9]{0,2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_replace(a, "\\\\p{Lower}", "x")',
'regexp_replace(a, "\\\\p{Upper}", "x")',
'regexp_replace(a, "\\\\p{ASCII}", "x")',
'regexp_replace(a, "\\\\p{Alpha}", "x")',
'regexp_replace(a, "\\\\p{Digit}", "x")',
'regexp_replace(a, "\\\\p{Alnum}", "x")',
'regexp_replace(a, "\\\\p{Punct}", "x")',
'regexp_replace(a, "\\\\p{Graph}", "x")',
'regexp_replace(a, "\\\\p{Print}", "x")',
'regexp_replace(a, "\\\\p{Blank}", "x")',
'regexp_replace(a, "\\\\p{Cntrl}", "x")',
'regexp_replace(a, "\\\\p{XDigit}", "x")',
'regexp_replace(a, "\\\\p{Space}", "x")',
'regexp_replace(a, "\\\\P{Lower}", "x")',
'regexp_replace(a, "\\\\P{Upper}", "x")',
'regexp_replace(a, "\\\\P{ASCII}", "x")',
'regexp_replace(a, "\\\\P{Alpha}", "x")',
'regexp_replace(a, "\\\\P{Digit}", "x")',
'regexp_replace(a, "\\\\P{Alnum}", "x")',
'regexp_replace(a, "\\\\P{Punct}", "x")',
'regexp_replace(a, "\\\\P{Graph}", "x")',
'regexp_replace(a, "\\\\P{Print}", "x")',
'regexp_replace(a, "\\\\P{Blank}", "x")',
'regexp_replace(a, "\\\\P{Cntrl}", "x")',
'regexp_replace(a, "\\\\P{XDigit}", "x")',
'regexp_replace(a, "\\\\P{Space}", "x")',
),
conf=_regexp_conf)
def test_rlike():
gen = mk_str_gen('[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a{2}"',
'a rlike "a{1,3}"',
'a rlike "a{1,}"',
'a rlike "a[bc]d"',
'a rlike "a[bc]d"',
'a rlike "^[a-d]*$"'),
conf=_regexp_conf)
def test_rlike_embedded_null():
gen = mk_str_gen('[abcd]{1,3}')\
.with_special_case('\u0000aaa')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a{2}"',
'a rlike "a{1,3}"',
'a rlike "a{1,}"',
'a rlike "a[bc]d"'),
conf=_regexp_conf)
def test_rlike_null_pattern():
gen = mk_str_gen('[abcd]{1,3}')
# Spark optimizes out `RLIKE NULL` in this test
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike NULL'))
@allow_non_gpu('ProjectExec', 'RLike')
def test_rlike_fallback_empty_group():
gen = mk_str_gen('[abcd]{1,3}')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a()?"'),
'RLike',
conf=_regexp_conf)
def test_rlike_escape():
gen = mk_str_gen('[ab]{0,2};?[\\-\\+]{0,2}/?')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a[\\\\-]"',
'a rlike "a\\\\;[\\\\-]"',
'a rlike "a[\\\\-]\\\\/"',
'a rlike "b\\\\;[\\\\-]\\\\/"'),
conf=_regexp_conf)
def test_rlike_multi_line():
gen = mk_str_gen('[abc]\n[def]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "^a"',
'a rlike "^d"',
'a rlike "c\\z"',
'a rlike "e\\z"'),
conf=_regexp_conf)
def test_rlike_missing_escape():
gen = mk_str_gen('a[\\-\\+]')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a[-]"',
'a rlike "a[+-]"',
'a rlike "a[a-b-]"'),
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RLike')
def test_rlike_fallback_possessive_quantifier():
gen = mk_str_gen('(\u20ac|\\w){0,3}a[|b*.$\r\n]{0,2}c\\w{0,3}')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a*+"'),
'RLike',
conf=_regexp_conf)
def test_regexp_extract_all_idx_zero():
gen = mk_str_gen('[abcd]{0,3}[0-9]{0,3}-[0-9]{0,3}[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract_all(a, "([a-d]+).*([0-9])", 0)',
'regexp_extract_all(a, "(a)(b)", 0)',
'regexp_extract_all(a, "([a-z0-9]([abcd]))", 0)',
'regexp_extract_all(a, "(\\\\d+)-(\\\\d+)", 0)',
),
conf=_regexp_conf)
def test_regexp_extract_all_idx_positive():
gen = mk_str_gen('[abcd]{0,3}[0-9]{0,3}-[0-9]{0,3}[abcd]{1,3}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract_all(a, "([a-d]+).*([0-9])", 1)',
'regexp_extract_all(a, "(a)(b)", 2)',
'regexp_extract_all(a, "([a-z0-9]((([abcd](\\\\d?)))))", 3)',
'regexp_extract_all(a, "(\\\\d+)-(\\\\d+)", 2)',
),
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RegExpExtractAll')
def test_regexp_extract_all_idx_negative():
message = "The specified group index cannot be less than zero" if is_before_spark_350() else \
"[INVALID_PARAMETER_VALUE.REGEX_GROUP_INDEX] The value of parameter(s) `idx` in `regexp_extract_all` is invalid"
gen = mk_str_gen('[abcd]{0,3}')
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract_all(a, "(a)", -1)'
).collect(),
error_message=message,
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RegExpExtractAll')
def test_regexp_extract_all_idx_out_of_bounds():
message = "Regex group count is 2, but the specified group index is 3" if is_before_spark_350() else \
"[INVALID_PARAMETER_VALUE.REGEX_GROUP_INDEX] The value of parameter(s) `idx` in `regexp_extract_all` is invalid: Expects group index between 0 and 2, but got 3."
gen = mk_str_gen('[a-d]{1,2}.{0,1}[0-9]{1,2}')
assert_gpu_and_cpu_error(
lambda spark: unary_op_df(spark, gen).selectExpr(
'regexp_extract_all(a, "([a-d]+).*([0-9])", 3)'
).collect(),
error_message=message,
conf=_regexp_conf)
def test_rlike_unicode_support():
gen = mk_str_gen('a[\ud720\ud800\ud900]')\
.with_special_case('a䤫畍킱곂⬡❽ࢅ獰蛫青')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a*"',
'a rlike "a\ud720"',
'a rlike "a\ud720.+$"'),
conf=_regexp_conf)
def test_regexp_replace_unicode_support():
gen = mk_str_gen('TEST[85\ud720\ud800\ud900]')\
.with_special_case('TEST䤫畍킱곂⬡❽ࢅ獰蛫青')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "TEST\ud720", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\b", "PROD")',
'REGEXP_REPLACE(a, "TEST\\\\B", "PROD")',
'REGEXP_REPLACE(a, "TEST䤫", "PROD")',
'REGEXP_REPLACE(a, "TEST[䤫]", "PROD")',
'REGEXP_REPLACE(a, "TEST.*\\\\d", "PROD")',
'REGEXP_REPLACE(a, "TEST[85]*$", "PROD")',
'REGEXP_REPLACE(a, "TEST.+$", "PROD")',
'REGEXP_REPLACE("TEST䤫", "TEST.+$", "PROD")',
),
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RegExpReplace')
def test_regexp_replace_fallback_configured_off():
gen = mk_str_gen('[abcdef]{0,2}')
conf = { 'spark.rapids.sql.regexp.enabled': 'false' }
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "[a-z]+", "PROD")',
'REGEXP_REPLACE(a, "aa", "PROD")',
),
cpu_fallback_class_name='RegExpReplace',
conf=conf
)
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_regexp_extract():
gen = mk_str_gen('[abcdef]{0,2}')
regex_gen = StringGen(r'\[a-z\]\+')
num_gen = IntegerGen(min_val=0, max_val=0, special_cases=[])
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [
("a", gen),
("reg_ex", regex_gen),
("num", num_gen)], length=10).selectExpr(sql_text),
'RegExpExtract')
assert_gpu_did_fallback('REGEXP_EXTRACT(a, "[a-z]+", num)')
assert_gpu_did_fallback('REGEXP_EXTRACT(a, reg_ex, 0)')
assert_gpu_did_fallback('REGEXP_EXTRACT(a, reg_ex, num)')
assert_gpu_did_fallback('REGEXP_EXTRACT("PROD", "[a-z]+", num)')
assert_gpu_did_fallback('REGEXP_EXTRACT("PROD", reg_ex, 0)')
assert_gpu_did_fallback('REGEXP_EXTRACT("PROD", reg_ex, num)')
@allow_non_gpu('ProjectExec')
def test_unsupported_fallback_regexp_extract_all():
gen = mk_str_gen('[abcdef]{0,2}')
regex_gen = StringGen(r'\[a-z\]\+')
num_gen = IntegerGen(min_val=0, max_val=0, special_cases=[])
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [
("a", gen),
("reg_ex", regex_gen),
("num", num_gen)], length=10).selectExpr(sql_text),
'RegExpExtractAll')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL(a, "[a-z]+", num)')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL(a, reg_ex, 0)')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL(a, reg_ex, num)')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL("PROD", "[a-z]+", num)')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL("PROD", reg_ex, 0)')
assert_gpu_did_fallback('REGEXP_EXTRACT_ALL("PROD", reg_ex, num)')
@allow_non_gpu('ProjectExec', 'RegExpReplace')
def test_unsupported_fallback_regexp_replace():
gen = mk_str_gen('[abcdef]{0,2}')
regex_gen = StringGen(r'\[a-z\]\+')
def assert_gpu_did_fallback(sql_text):
assert_gpu_fallback_collect(lambda spark:
gen_df(spark, [
("a", gen),
("reg_ex", regex_gen)], length=10).selectExpr(sql_text),
'RegExpReplace')
assert_gpu_did_fallback('REGEXP_REPLACE(a, "[a-z]+", a)')
assert_gpu_did_fallback('REGEXP_REPLACE(a, reg_ex, "PROD")')
assert_gpu_did_fallback('REGEXP_REPLACE(a, reg_ex, a)')
assert_gpu_did_fallback('REGEXP_REPLACE("PROD", "[a-z]+", a)')
assert_gpu_did_fallback('REGEXP_REPLACE("PROD", reg_ex, "PROD")')
assert_gpu_did_fallback('REGEXP_REPLACE("PROD", reg_ex, a)')
@pytest.mark.parametrize("regexp_enabled", ['true', 'false'])
def test_regexp_replace_simple(regexp_enabled):
gen = mk_str_gen('[abcdef]{0,2}')
conf = { 'spark.rapids.sql.regexp.enabled': regexp_enabled }
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "aa", "PROD")',
'REGEXP_REPLACE(a, "ab", "PROD")',
'REGEXP_REPLACE(a, "ae", "PROD")',
'REGEXP_REPLACE(a, "bc", "PROD")',
'REGEXP_REPLACE(a, "fa", "PROD")'
),
conf=conf
)
@pytest.mark.parametrize("regexp_enabled", ['true', 'false'])
def test_regexp_replace_multi_optimization(regexp_enabled):
gen = mk_str_gen('[abcdef]{0,2}')
conf = { 'spark.rapids.sql.regexp.enabled': regexp_enabled }
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, "aa|bb", "PROD")',
'REGEXP_REPLACE(a, "(aa)|(bb)", "PROD")',
'REGEXP_REPLACE(a, "aa|bb|cc", "PROD")',
'REGEXP_REPLACE(a, "(aa)|(bb)|(cc)", "PROD")',
'REGEXP_REPLACE(a, "aa|bb|cc|dd", "PROD")',
'REGEXP_REPLACE(a, "(aa|bb)|(cc|dd)", "PROD")',
'REGEXP_REPLACE(a, "aa|bb|cc|dd|ee", "PROD")',
'REGEXP_REPLACE(a, "aa|bb|cc|dd|ee|ff", "PROD")'
),
conf=conf
)
def test_regexp_split_unicode_support():
data_gen = mk_str_gen('([bf]o{0,2}青){1,7}') \
.with_special_case('boo青and青foo')
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'split(a, "[青]", -1)',
'split(a, "[o青]", -1)',
'split(a, "[^青]", -1)',
'split(a, "[^o]", -1)',
'split(a, "[o]{1,2}", -1)',
'split(a, "[bf]", -1)',
'split(a, "[o]", -2)'),
conf=_regexp_conf)
@allow_non_gpu('ProjectExec', 'RLike')
def test_regexp_memory_fallback():
gen = StringGen('test')
assert_gpu_fallback_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a{6}"',
'a rlike "a{6,}"',
'a rlike "(?:ab){0,3}"',
'a rlike "(?:12345)?"',
'a rlike "(?:12345)+"',
'a rlike "(?:123456)*"',
'a rlike "a{1,6}"',
'a rlike "abcdef"',
'a rlike "(1)(2)(3)"',
'a rlike "1|2|3|4|5|6"'
),
cpu_fallback_class_name='RLike',
conf={
'spark.rapids.sql.regexp.enabled': True,
'spark.rapids.sql.regexp.maxStateMemoryBytes': '10',
'spark.rapids.sql.batchSizeBytes': '20' # 1 row in the batch
}
)
def test_regexp_memory_ok():
gen = StringGen('test')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'a rlike "a{6}"',
'a rlike "a{6,}"',
'a rlike "(?:ab){0,3}"',
'a rlike "(?:12345)?"',
'a rlike "(?:12345)+"',
'a rlike "(?:123456)*"',
'a rlike "a{1,6}"',
'a rlike "abcdef"',
'a rlike "(1)(2)(3)"',
'a rlike "1|2|3|4|5|6"'
),
conf={
'spark.rapids.sql.regexp.enabled': True,
'spark.rapids.sql.regexp.maxStateMemoryBytes': '12',
'spark.rapids.sql.batchSizeBytes': '20' # 1 row in the batch
}
)
def test_re_replace_all():
"""
regression test for https://github.com/NVIDIA/spark-rapids/issues/8323
"""
gen = mk_str_gen('[a-z]{0,2}\n{0,2}[a-z]{0,2}\n{0,2}')
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, gen).selectExpr(
'REGEXP_REPLACE(a, ".*$", "PROD", 1)'),
conf=_regexp_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/regexp_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyspark.sql.types import *
from asserts import assert_gpu_fallback_collect
from data_gen import *
from marks import ignore_order
# copied from sort_test and added explainOnly mode
_explain_mode_conf = {'spark.rapids.sql.mode': 'explainOnly',
'spark.sql.join.preferSortMergeJoin': 'True',
'spark.sql.shuffle.partitions': '2',
}
def create_df(spark, data_gen, left_length, right_length):
left = binary_op_df(spark, data_gen, length=left_length)
right = binary_op_df(spark, data_gen, length=right_length).withColumnRenamed("a", "r_a")\
.withColumnRenamed("b", "r_b")
return left, right
# just run with one join type since not testing join itself
all_join_types = ['Left']
# use a subset of types just to test explain only mode
all_gen = [StringGen(), ByteGen()]
# here we use the assert_gpu_fallback_collect to make sure explain only mode runs on the CPU
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('join_type', all_join_types, ids=idfn)
def test_explain_only_sortmerge_join(data_gen, join_type):
def do_join(spark):
left, right = create_df(spark, data_gen, 500, 500)
return left.join(right, left.a == right.r_a, join_type)
assert_gpu_fallback_collect(do_join, 'SortMergeJoinExec', conf=_explain_mode_conf)
| spark-rapids-branch-23.10 | integration_tests/src/main/python/explain_mode_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_writes_are_equal_collect, with_cpu_session, with_gpu_session
from data_gen import copy_and_update
from delta_lake_write_test import delta_meta_allow
from marks import allow_non_gpu, delta_lake
from pyspark.sql.functions import *
from spark_session import is_databricks104_or_later
_conf = {'spark.rapids.sql.explain': 'ALL',
'spark.databricks.delta.autoCompact.minNumFiles': 3} # Num files before compaction.
def write_to_delta(num_rows=30, is_partitioned=False, num_writes=3):
"""
Returns bound function that writes to a delta table.
"""
def write(spark, table_path):
input_data = spark.range(num_rows)
input_data = input_data.withColumn("part", expr("id % 3")) if is_partitioned \
else input_data.repartition(1)
writer = input_data.write.format("delta").mode("append")
for _ in range(num_writes):
writer.save(table_path)
return write
@delta_lake
@allow_non_gpu(*delta_meta_allow)
@pytest.mark.skipif(not is_databricks104_or_later(),
reason="Auto compaction of Delta Lake tables is only supported "
"on Databricks 10.4+")
@pytest.mark.parametrize("auto_compact_conf",
["spark.databricks.delta.autoCompact.enabled",
"spark.databricks.delta.properties.defaults.autoOptimize.autoCompact"])
def test_auto_compact_basic(spark_tmp_path, auto_compact_conf):
"""
This test checks whether the results of auto compactions on an un-partitioned table
match, when written via CPU and GPU.
It also checks that the snapshot metrics (number of files added/removed, etc.)
match.
"""
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/AUTO_COMPACT_TEST_DATA"
def read_data(spark, table_path):
return spark.read.format("delta").load(table_path)
assert_gpu_and_cpu_writes_are_equal_collect(
write_func=write_to_delta(is_partitioned=False),
read_func=read_data,
base_path=data_path,
conf=_conf)
def read_metadata(spark, table_path):
input_table = DeltaTable.forPath(spark, table_path)
table_history = input_table.history()
return table_history.select(
"version",
"operation",
expr("operationMetrics[\"numFiles\"]").alias("numFiles"),
expr("operationMetrics[\"numRemovedFiles\"]").alias("numRemoved"),
expr("operationMetrics[\"numAddedFiles\"]").alias("numAdded")
)
conf_enable_auto_compact = copy_and_update(_conf, {auto_compact_conf: "true"})
assert_gpu_and_cpu_writes_are_equal_collect(
write_func=lambda spark, table_path: None, # Already written.
read_func=read_metadata,
base_path=data_path,
conf=conf_enable_auto_compact)
@delta_lake
@allow_non_gpu(*delta_meta_allow)
@pytest.mark.skipif(not is_databricks104_or_later(),
reason="Auto compaction of Delta Lake tables is only supported "
"on Databricks 10.4+")
@pytest.mark.parametrize("auto_compact_conf",
["spark.databricks.delta.autoCompact.enabled",
"spark.databricks.delta.properties.defaults.autoOptimize.autoCompact"])
def test_auto_compact_partitioned(spark_tmp_path, auto_compact_conf):
"""
This test checks whether the results of auto compaction on a partitioned table
match, when written via CPU and GPU.
Note: The behaviour of compaction itself differs from Databricks, in that
the plugin enforces `minFiles` restriction uniformly across all partitions.
Databricks' Delta implementation appears not to.
"""
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/AUTO_COMPACT_TEST_DATA_PARTITIONED"
def read_data(spark, table_path):
return spark.read.format("delta").load(table_path).orderBy("id", "part")
assert_gpu_and_cpu_writes_are_equal_collect(
write_func=write_to_delta(is_partitioned=True),
read_func=read_data,
base_path=data_path,
conf=_conf)
def read_metadata(spark, table_path):
"""
The snapshots might not look alike, in the partitioned case.
Ensure that auto compaction has occurred, even if it's not identical.
"""
input_table = DeltaTable.forPath(spark, table_path)
table_history = input_table.history()
return table_history.select(
"version",
"operation",
expr("operationMetrics[\"numFiles\"] > 0").alias("numFiles_gt_0"),
expr("operationMetrics[\"numRemovedFiles\"] > 0").alias("numRemoved_gt_0"),
expr("operationMetrics[\"numAddedFiles\"] > 0").alias("numAdded_gt_0")
)
conf_enable_auto_compact = copy_and_update(_conf, {auto_compact_conf: "true"})
assert_gpu_and_cpu_writes_are_equal_collect(
write_func=lambda spark, table_path: None, # Already written.
read_func=read_metadata,
base_path=data_path,
conf=conf_enable_auto_compact)
@delta_lake
@allow_non_gpu(*delta_meta_allow)
@pytest.mark.skipif(not is_databricks104_or_later(),
reason="Auto compaction of Delta Lake tables is only supported "
"on Databricks 10.4+")
@pytest.mark.parametrize("auto_compact_conf",
["spark.databricks.delta.autoCompact.enabled",
"spark.databricks.delta.properties.defaults.autoOptimize.autoCompact"])
def test_auto_compact_disabled(spark_tmp_path, auto_compact_conf):
"""
This test verifies that auto-compaction does not run if disabled.
"""
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/AUTO_COMPACT_TEST_CHECK_DISABLED"
disable_auto_compaction = copy_and_update(_conf, {auto_compact_conf: 'false'})
writer = write_to_delta(num_writes=10)
with_gpu_session(func=lambda spark: writer(spark, data_path),
conf=disable_auto_compaction)
# 10 writes should correspond to 10 commits.
# (i.e. there should be no OPTIMIZE commits.)
def verify_table_history(spark):
input_table = DeltaTable.forPath(spark, data_path)
table_history = input_table.history()
assert table_history.select("version", "operation").count() == 10, \
"Expected 10 versions, 1 for each WRITE."
assert table_history.select("version")\
.where("operation = 'OPTIMIZE'")\
.count() == 0,\
"Expected 0 OPTIMIZE operations."
with_cpu_session(verify_table_history, {})
@delta_lake
@allow_non_gpu(*delta_meta_allow)
@pytest.mark.skipif(not is_databricks104_or_later(),
reason="Auto compaction of Delta Lake tables is only supported "
"on Databricks 10.4+")
def test_auto_compact_min_num_files(spark_tmp_path):
"""
This test verifies that auto-compaction honours the minNumFiles setting.
"""
from delta.tables import DeltaTable
data_path = spark_tmp_path + "/AUTO_COMPACT_TEST_MIN_FILES"
enable_auto_compaction_on_5 = {
'spark.databricks.delta.autoCompact.enabled': 'true', # Enable auto compaction.
'spark.databricks.delta.autoCompact.minNumFiles': 5 # Num files before compaction.
}
# Minimum number of input files == 5.
# If 4 files are written, there should be no OPTIMIZE.
writer = write_to_delta(num_writes=4)
with_gpu_session(func=lambda spark: writer(spark, data_path),
conf=enable_auto_compaction_on_5)
def verify_table_history_before_limit(spark):
input_table = DeltaTable.forPath(spark, data_path)
table_history = input_table.history()
assert table_history.select("version", "operation").count() == 4, \
"Expected 4 versions, 1 for each WRITE."
assert table_history.select("version") \
.where("operation = 'OPTIMIZE'") \
.count() == 0, \
"Expected 0 OPTIMIZE operations."
with_cpu_session(verify_table_history_before_limit, {})
# On the 5th file write, auto-OPTIMIZE should kick in.
with_gpu_session(func=lambda spark: write_to_delta(num_writes=1)(spark, data_path),
conf=enable_auto_compaction_on_5)
def verify_table_history_after_limit(spark):
input_table = DeltaTable.forPath(spark, data_path)
table_history = input_table.history()
assert table_history.select("version", "operation").count() == 6, \
"Expected 6 versions, i.e. 5 WRITEs + 1 OPTIMIZE."
assert table_history.select("version") \
.where("operation = 'OPTIMIZE'") \
.count() == 1, \
"Expected 1 OPTIMIZE operations."
with_cpu_session(verify_table_history_after_limit, {})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_lake_auto_compact_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_cpu_and_gpu_are_equal_collect_with_capture, assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from marks import allow_non_gpu, ignore_order, delta_lake
from spark_session import is_databricks_runtime, with_cpu_session, with_gpu_session, is_databricks104_or_later, is_databricks113_or_later
from dpp_test import _exchange_reuse_conf
# Almost all of this is the metadata query
# the important part is to not have InterleaveBits or HilbertLongIndex and PartitionerExpr
# but there is no good way to check for that so I filed https://github.com/NVIDIA/spark-rapids/issues/6875
# Until then we allow anything to be on the CPU.
@allow_non_gpu(any=True)
@delta_lake
@ignore_order(local=True)
def test_delta_zorder(spark_tmp_table_factory):
table = spark_tmp_table_factory.get()
def optimize_table(spark):
# We need to drop the table and rerun each time because in some
# versions delta will keep track if it has already been optimized or not
# and will not re-run if it has been optimized
df = two_col_df(spark, long_gen, string_gen, length=4096)
spark.sql("DROP TABLE IF EXISTS {}".format(table)).show()
spark.sql("CREATE TABLE {} (a BIGINT, b STRING) USING DELTA".format(table)).show()
df.write.insertInto(table)
# The optimize returns stats and metadata about the operation, which is different
# from one run to another, so we cannot just compare them...
spark.sql("OPTIMIZE {} ZORDER BY a, b".format(table)).show()
return spark.sql("select * from {} where a = 1".format(table))
assert_gpu_and_cpu_are_equal_collect(optimize_table,
conf={"spark.rapids.sql.castFloatToIntegralTypes.enabled": True,
"spark.rapids.sql.castFloatToString.enabled": True,
"spark.rapids.sql.explain": "ALL"})
_statements = [
# join on z-ordered column
'''
SELECT fact.ex_key, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
GROUP BY fact.ex_key
''',
# join on 2 z-ordered columns
'''
SELECT fact.ex_key, fact.ex_skey, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.ex_key = dim.ex_key AND fact.ex_skey = dim.ex_skey
WHERE dim.filter = {2}
GROUP BY fact.ex_key, fact.ex_skey
''',
# join on 1 partitioned and 1 z-ordered column
'''
SELECT fact.key, fact.ex_key, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key AND fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
GROUP BY fact.key, fact.ex_key
''',
# join on 2 partitioned and 1 z-ordered columns
'''
SELECT fact.key, fact.skey, fact.ex_key, sum(fact.value)
FROM {0} fact
JOIN {1} dim
ON fact.key = dim.key AND fact.skey = dim.skey AND fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
GROUP BY fact.key, fact.skey, fact.ex_key
''',
# reused subquery, join on z-ordered column
'''
SELECT ex_key, max(value)
FROM (
SELECT fact.ex_key as ex_key, fact.value as value
FROM {0} fact
JOIN {1} dim
ON fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
UNION ALL
SELECT fact.ex_key as ex_key, fact.value as value
FROM {0} fact
JOIN {1} dim
ON fact.ex_key = dim.ex_key
WHERE dim.filter = {2}
)
GROUP BY ex_key
'''
]
# This tests Dynamic File Pruning, a feature in Databricks that is similar to Dynamic Partition Pruning
# except that it adds the DynamicPruningExpression for columns that are not partition columns but are still
# optimized. In this case the DynamicPruningExpression should be added to the DataFilters in the scan.
# This test is very similar to `test_dpp_reuse_broadcast_exchange` but it tests joining using a Z-ordered
# column
@delta_lake
@ignore_order(local=True)
@pytest.mark.skipif(not is_databricks104_or_later(), reason="Dynamic File Pruning is only supported in Databricks 10.4+")
@pytest.mark.parametrize('s_index', list(range(len(_statements))), ids=idfn)
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'])
def test_delta_dfp_reuse_broadcast_exchange(spark_tmp_table_factory, s_index, aqe_enabled):
fact_table, dim_table = spark_tmp_table_factory.get(), spark_tmp_table_factory.get()
def build_and_optimize_tables(spark):
# Note that ex_key is a high-cardinality column, which makes it a good candidate for
# for Z-ordering, which then means it can then be used in Dynamic File Pruning in joins
df = gen_df(spark, [
('key', IntegerGen(nullable=False, min_val=0, max_val=9, special_cases=[])),
('skey', IntegerGen(nullable=False, min_val=0, max_val=4, special_cases=[])),
('ex_key', IntegerGen(nullable=False, min_val=0, max_val=10000, special_cases=[])),
('ex_skey', IntegerGen(nullable=False, min_val=0, max_val=1000, special_cases=[])),
('value', int_gen),
], 10000)
df.write.format("delta") \
.mode("overwrite") \
.partitionBy("key", "skey") \
.saveAsTable(fact_table)
spark.sql("OPTIMIZE {} ZORDER BY (ex_key, ex_skey)".format(fact_table)).show()
df = gen_df(spark, [
('key', IntegerGen(nullable=False, min_val=0, max_val=9, special_cases=[])),
('skey', IntegerGen(nullable=False, min_val=0, max_val=4, special_cases=[])),
('ex_key', IntegerGen(nullable=False, min_val=0, max_val=10000, special_cases=[])),
('ex_skey', IntegerGen(nullable=False, min_val=0, max_val=1000, special_cases=[])),
('value', int_gen),
('filter', RepeatSeqGen(
IntegerGen(min_val=0, max_val=2000, special_cases=[]), length=2000 // 20))
], 2000)
df.write.format("delta") \
.mode("overwrite") \
.saveAsTable(dim_table)
return df.select('filter').first()[0]
filter_val = with_cpu_session(build_and_optimize_tables)
statement = _statements[s_index].format(fact_table, dim_table, filter_val)
if is_databricks113_or_later() and aqe_enabled == 'true':
# SubqueryBroadcastExec is unoptimized in Databricks 11.3 with EXECUTOR_BROADCAST
# See https://github.com/NVIDIA/spark-rapids/issues/7425
exist_classes='DynamicPruningExpression,SubqueryBroadcastExec,ReusedExchangeExec'
else:
exist_classes='DynamicPruningExpression,GpuSubqueryBroadcastExec,ReusedExchangeExec'
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: spark.sql(statement),
# The existence of GpuSubqueryBroadcastExec indicates the reuse works on the GPU
exist_classes,
# Ensure Dynamic File Pruning kicks in by setting thresholds to 0
conf=dict(_exchange_reuse_conf + [
('spark.databricks.optimizer.dynamicFilePruning', 'true'),
('spark.databricks.optimizer.deltaTableSizeThreshold', '0'),
('spark.databricks.optimizer.deltaTableFilesThreshold', '0'),
('spark.sql.adaptive.enabled', aqe_enabled)]))
| spark-rapids-branch-23.10 | integration_tests/src/main/python/delta_zorder_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_row_counts_equal,\
assert_gpu_and_cpu_are_equal_sql,\
assert_gpu_fallback_collect, assert_cpu_and_gpu_are_equal_sql_with_capture,\
assert_cpu_and_gpu_are_equal_collect_with_capture, run_with_cpu, run_with_cpu_and_gpu
from conftest import is_databricks_runtime
from data_gen import *
from functools import reduce
from pyspark.sql.types import *
from marks import *
import pyspark.sql.functions as f
from spark_session import is_databricks104_or_later, with_cpu_session, is_before_spark_330
pytestmark = pytest.mark.nightly_resource_consuming_test
_float_conf = {'spark.rapids.sql.variableFloatAgg.enabled': 'true',
'spark.rapids.sql.castStringToFloat.enabled': 'true'
}
_float_smallbatch_conf = copy_and_update(_float_conf,
{'spark.rapids.sql.batchSizeBytes' : '250'})
_float_conf_partial = copy_and_update(_float_conf,
{'spark.rapids.sql.hashAgg.replaceMode': 'partial'})
_float_conf_final = copy_and_update(_float_conf,
{'spark.rapids.sql.hashAgg.replaceMode': 'final'})
# The input lists or schemas that are used by StructGen.
# grouping longs with nulls
_longs_with_nulls = [('a', LongGen()), ('b', IntegerGen()), ('c', LongGen())]
# grouping longs with no nulls
_longs_with_no_nulls = [
('a', LongGen(nullable=False)),
('b', IntegerGen(nullable=False)),
('c', LongGen(nullable=False))]
# grouping longs with nulls present
_grpkey_longs_with_nulls = [
('a', RepeatSeqGen(LongGen(nullable=(True, 10.0)), length= 20)),
('b', IntegerGen()),
('c', LongGen())]
# grouping doubles with nulls present
_grpkey_dbls_with_nulls = [
('a', RepeatSeqGen(DoubleGen(nullable=(True, 10.0), special_cases=[]), length= 20)),
('b', IntegerGen()),
('c', LongGen())]
# grouping floats with nulls present
_grpkey_floats_with_nulls = [
('a', RepeatSeqGen(FloatGen(nullable=(True, 10.0), special_cases=[]), length= 20)),
('b', IntegerGen()),
('c', LongGen())]
# grouping strings with nulls present
_grpkey_strings_with_nulls = [
('a', RepeatSeqGen(StringGen(pattern='[0-9]{0,30}'), length= 20)),
('b', IntegerGen()),
('c', LongGen())]
# grouping strings with nulls present, and null value
_grpkey_strings_with_extra_nulls = [
('a', RepeatSeqGen(StringGen(pattern='[0-9]{0,30}'), length= 20)),
('b', IntegerGen()),
('c', NullGen())]
# grouping single-level structs
_grpkey_structs_with_non_nested_children = [
('a', RepeatSeqGen(StructGen([
['aa', IntegerGen()],
['ab', StringGen(pattern='[0-9]{0,30}')],
['ac', DecimalGen()]]), length=20)),
('b', IntegerGen()),
('c', NullGen())]
# grouping multiple-level structs
_grpkey_nested_structs = [
('a', RepeatSeqGen(StructGen([
['aa', IntegerGen()],
['ab', StringGen(pattern='[0-9]{0,30}')],
['ac', StructGen([['aca', LongGen()],
['acb', BooleanGen()],
['acc', StructGen([['acca', StringGen()]])]])]]),
length=20)),
('b', IntegerGen()),
('c', NullGen())]
# grouping multiple-level structs with arrays in children
_grpkey_nested_structs_with_array_child = [
('a', RepeatSeqGen(StructGen([
['aa', IntegerGen()],
['ab', ArrayGen(IntegerGen())],
['ac', ArrayGen(StructGen([['aca', LongGen()]]))]]),
length=20)),
('b', IntegerGen()),
('c', NullGen())]
# grouping NullType
_grpkey_nulls = [
('a', NullGen()),
('b', IntegerGen()),
('c', LongGen())]
# grouping floats with other columns containing nans and nulls
_grpkey_floats_with_nulls_and_nans = [
('a', RepeatSeqGen(FloatGen(nullable=(True, 10.0)), length= 20)),
('b', FloatGen(nullable=(True, 10.0), special_cases=[(float('nan'), 10.0)])),
('c', LongGen())]
# grouping single-level lists
# StringGen for the value being aggregated will force CUDF to do a sort based aggregation internally.
_grpkey_list_with_non_nested_children = [[('a', RepeatSeqGen(ArrayGen(data_gen), length=3)),
('b', IntegerGen())] for data_gen in all_basic_gens + decimal_gens] + \
[[('a', RepeatSeqGen(ArrayGen(data_gen), length=3)),
('b', StringGen())] for data_gen in all_basic_gens + decimal_gens]
#grouping mutliple-level structs with arrays
_grpkey_nested_structs_with_array_basic_child = [[
('a', RepeatSeqGen(StructGen([
['aa', IntegerGen()],
['ab', ArrayGen(IntegerGen())]]),
length=20)),
('b', IntegerGen()),
('c', NullGen())]]
_nan_zero_float_special_cases = [
(float('nan'), 5.0),
(NEG_FLOAT_NAN_MIN_VALUE, 5.0),
(NEG_FLOAT_NAN_MAX_VALUE, 5.0),
(POS_FLOAT_NAN_MIN_VALUE, 5.0),
(POS_FLOAT_NAN_MAX_VALUE, 5.0),
(float('0.0'), 5.0),
(float('-0.0'), 5.0),
]
_grpkey_floats_with_nan_zero_grouping_keys = [
('a', RepeatSeqGen(FloatGen(nullable=(True, 10.0), special_cases=_nan_zero_float_special_cases), length=50)),
('b', IntegerGen(nullable=(True, 10.0))),
('c', LongGen())]
_nan_zero_double_special_cases = [
(float('nan'), 5.0),
(NEG_DOUBLE_NAN_MIN_VALUE, 5.0),
(NEG_DOUBLE_NAN_MAX_VALUE, 5.0),
(POS_DOUBLE_NAN_MIN_VALUE, 5.0),
(POS_DOUBLE_NAN_MAX_VALUE, 5.0),
(float('0.0'), 5.0),
(float('-0.0'), 5.0),
]
_grpkey_doubles_with_nan_zero_grouping_keys = [
('a', RepeatSeqGen(DoubleGen(nullable=(True, 10.0), special_cases=_nan_zero_double_special_cases), length=50)),
('b', FloatGen(nullable=(True, 10.0))),
('c', LongGen())]
# Schema for xfail cases
struct_gens_xfail = [
_grpkey_floats_with_nulls_and_nans
]
# List of schemas with no NaNs
_init_list_no_nans = [
_longs_with_nulls,
_longs_with_no_nulls,
_grpkey_longs_with_nulls,
_grpkey_dbls_with_nulls,
_grpkey_floats_with_nulls,
_grpkey_strings_with_nulls,
_grpkey_nulls,
_grpkey_strings_with_extra_nulls]
# List of schemas with NaNs included
_init_list_with_nans_and_no_nans = [
_longs_with_nulls,
_longs_with_no_nulls,
_grpkey_longs_with_nulls,
_grpkey_dbls_with_nulls,
_grpkey_floats_with_nulls,
_grpkey_strings_with_nulls,
_grpkey_floats_with_nulls_and_nans]
# grouping decimals with nulls
_decimals_with_nulls = [('a', DecimalGen()), ('b', DecimalGen()), ('c', DecimalGen())]
# grouping decimals with no nulls
_decimals_with_no_nulls = [
('a', DecimalGen(nullable=False)),
('b', DecimalGen(nullable=False)),
('c', DecimalGen(nullable=False))]
_init_list_with_nans_and_no_nans_with_decimals = _init_list_with_nans_and_no_nans + [
_decimals_with_nulls, _decimals_with_no_nulls]
# Used to test ANSI-mode fallback
_no_overflow_ansi_gens = [
ByteGen(min_val = 1, max_val = 10, special_cases=[]),
ShortGen(min_val = 1, max_val = 100, special_cases=[]),
IntegerGen(min_val = 1, max_val = 1000, special_cases=[]),
LongGen(min_val = 1, max_val = 3000, special_cases=[])]
_decimal_gen_36_5 = DecimalGen(precision=36, scale=5)
_decimal_gen_36_neg5 = DecimalGen(precision=36, scale=-5)
_decimal_gen_38_10 = DecimalGen(precision=38, scale=10)
def get_params(init_list, marked_params=[]):
"""
A method to build the test inputs along with their passed in markers to allow testing
specific params with their relevant markers. Right now it is used to parametrize _confs with
allow_non_gpu which allows some operators to be enabled.
However, this can be used with any list of params to the test.
:arg init_list list of param values to be tested
:arg marked_params A list of tuples of (params, list of pytest markers)
Look at params_markers_for_confs as an example.
"""
list = init_list.copy()
for index in range(0, len(list)):
for test_case, marks in marked_params:
if list[index] == test_case:
list[index] = pytest.param(list[index], marks=marks)
return list
# Run these tests with in 4 modes, all on the GPU
_confs = [_float_conf, _float_smallbatch_conf, _float_conf_final, _float_conf_partial]
# Pytest marker for list of operators allowed to run on the CPU,
# esp. useful in partial and final only modes.
# but this ends up allowing close to everything being off the GPU so I am not sure how
# useful this really is
_excluded_operators_marker = pytest.mark.allow_non_gpu(
'HashAggregateExec', 'AggregateExpression', 'UnscaledValue', 'MakeDecimal',
'AttributeReference', 'Alias', 'Sum', 'Count', 'Max', 'Min', 'Average', 'Cast',
'StddevPop', 'StddevSamp', 'VariancePop', 'VarianceSamp',
'NormalizeNaNAndZero', 'GreaterThan', 'Literal', 'If',
'EqualTo', 'First', 'SortAggregateExec', 'Coalesce', 'IsNull', 'EqualNullSafe',
'PivotFirst', 'GetArrayItem', 'ShuffleExchangeExec', 'HashPartitioning')
params_markers_for_confs = [
(_float_conf_partial, [_excluded_operators_marker]),
(_float_conf_final, [_excluded_operators_marker]),
(_float_conf, [_excluded_operators_marker])
]
_grpkey_small_decimals = [
('a', RepeatSeqGen(DecimalGen(precision=7, scale=3, nullable=(True, 10.0)), length=50)),
('b', DecimalGen(precision=5, scale=2)),
('c', DecimalGen(precision=8, scale=3))]
_grpkey_big_decimals = [
('a', RepeatSeqGen(DecimalGen(precision=32, scale=10, nullable=(True, 10.0)), length=50)),
('b', DecimalGen(precision=20, scale=2)),
('c', DecimalGen(precision=36, scale=5))]
_grpkey_short_mid_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', decimal_gen_64bit),
('c', decimal_gen_64bit)]
_grpkey_short_big_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', decimal_gen_128bit),
('c', decimal_gen_128bit)]
# NOTE on older versions of Spark decimal 38 causes the CPU to crash
# instead of detect overflows, we have versions of this for both
# 36 and 38 so we can get some coverage for old versions and full
# coverage for newer versions
_grpkey_short_very_big_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', _decimal_gen_36_5),
('c', _decimal_gen_36_5)]
_grpkey_short_very_big_neg_scale_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', _decimal_gen_36_neg5),
('c', _decimal_gen_36_neg5)]
# Only use negative values to avoid the potential to hover around an overflow
# as values are added and subtracted during the sum. Non-deterministic ordering
# of values from shuffle cannot guarantee overflow calculation is predictable
# when the sum can move in both directions as new partitions are aggregated.
_decimal_gen_sum_38_0 = DecimalGen(precision=38, scale=0, avoid_positive_values=True)
_decimal_gen_sum_38_neg10 = DecimalGen(precision=38, scale=-10, avoid_positive_values=True)
_grpkey_short_sum_full_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', _decimal_gen_sum_38_0),
('c', _decimal_gen_sum_38_0)]
_grpkey_short_sum_full_neg_scale_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', _decimal_gen_sum_38_neg10),
('c', _decimal_gen_sum_38_neg10)]
_init_list_no_nans_with_decimal = _init_list_no_nans + [
_grpkey_small_decimals]
_init_list_no_nans_with_decimalbig = _init_list_no_nans + [
_grpkey_small_decimals, _grpkey_big_decimals, _grpkey_short_mid_decimals,
_grpkey_short_big_decimals, _grpkey_short_very_big_decimals,
_grpkey_short_very_big_neg_scale_decimals]
_init_list_with_nans_and_no_nans_with_decimalbig = _init_list_with_nans_and_no_nans + [
_grpkey_small_decimals, _grpkey_big_decimals, _grpkey_short_mid_decimals,
_grpkey_short_big_decimals, _grpkey_short_very_big_decimals,
_grpkey_short_very_big_neg_scale_decimals]
#Any smaller precision takes way too long to process on the CPU
# or results in using too much memory on the GPU
@nightly_gpu_mem_consuming_case
@pytest.mark.parametrize('precision', [38, 37, 36, 35, 34, 33, 32, 31], ids=idfn)
def test_hash_reduction_decimal_overflow_sum(precision):
constant = '9' * precision
count = pow(10, 38 - precision)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.range(count)\
.selectExpr("CAST('{}' as Decimal({}, 0)) as a".format(constant, precision))\
.selectExpr("SUM(a)"),
# This is set to 128m because of a number of other bugs that compound to having us
# run out of memory in some setups. These should not happen in production, because
# we really are just doing a really bad job at multiplying to get this result so
# some optimizations are conspiring against us.
conf = {'spark.rapids.sql.batchSizeBytes': '128m'})
@pytest.mark.parametrize('data_gen', [_longs_with_nulls], ids=idfn)
@pytest.mark.parametrize('override_split_until_size', [None, 1], ids=idfn)
@pytest.mark.parametrize('override_batch_size_bytes', [None, 1], ids=idfn)
def test_hash_grpby_sum_count_action(data_gen, override_split_until_size, override_batch_size_bytes):
conf = {
'spark.rapids.sql.test.overrides.splitUntilSize': override_split_until_size
}
if override_batch_size_bytes is not None:
conf["spark.rapids.sql.batchSizeBytes"] = override_batch_size_bytes
assert_gpu_and_cpu_row_counts_equal(
lambda spark: gen_df(spark, data_gen, length=100).groupby('a').agg(f.sum('b')),
conf = conf
)
@allow_non_gpu("SortAggregateExec", "SortExec", "ShuffleExchangeExec")
@ignore_order
@pytest.mark.parametrize('data_gen', _grpkey_nested_structs_with_array_basic_child + _grpkey_list_with_non_nested_children, ids=idfn)
def test_hash_grpby_list_min_max(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100).coalesce(1).groupby('a').agg(f.min('b'), f.max('b'))
)
@pytest.mark.parametrize('data_gen', [_longs_with_nulls], ids=idfn)
def test_hash_reduction_sum_count_action(data_gen):
assert_gpu_and_cpu_row_counts_equal(
lambda spark: gen_df(spark, data_gen, length=100).agg(f.sum('b'))
)
# Make sure that we can do computation in the group by columns
@ignore_order
def test_computation_in_grpby_columns():
conf = {'spark.rapids.sql.batchSizeBytes' : '250'}
data_gen = [
('a', RepeatSeqGen(StringGen('a{1,20}'), length=50)),
('b', short_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen).groupby(f.substring(f.col('a'), 2, 10)).agg(f.sum('b')),
conf = conf)
@shuffle_test
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans_with_decimalbig, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_sum(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100).groupby('a').agg(f.sum('b')),
conf = conf)
@shuffle_test
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', [_grpkey_short_sum_full_decimals, _grpkey_short_sum_full_neg_scale_decimals], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_sum_full_decimal(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100).groupby('a').agg(f.sum('b')),
conf = conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', numeric_gens + decimal_gens + [DecimalGen(precision=36, scale=5)], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_sum(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, length=100).selectExpr("SUM(a)"),
conf = conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', numeric_gens + decimal_gens + [
DecimalGen(precision=38, scale=0), DecimalGen(precision=38, scale=-10)], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_sum_full_decimal(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen, length=100).selectExpr("SUM(a)"),
conf = conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_with_nans_and_no_nans + [_grpkey_short_mid_decimals,
_grpkey_short_big_decimals, _grpkey_short_very_big_decimals, _grpkey_short_sum_full_decimals], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_avg(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=200).groupby('a').agg(f.avg('b')),
conf=conf
)
# tracks https://github.com/NVIDIA/spark-rapids/issues/154
@approximate_float
@ignore_order
@incompat
@pytest.mark.allow_non_gpu(
'HashAggregateExec', 'AggregateExpression',
'AttributeReference', 'Alias', 'Sum', 'Count', 'Max', 'Min', 'Average', 'Cast',
'NormalizeNaNAndZero', 'GreaterThan', 'Literal', 'If',
'EqualTo', 'First', 'SortAggregateExec')
@pytest.mark.parametrize('data_gen', [
StructGen(children=[('a', int_gen), ('b', int_gen)],nullable=False,
special_cases=[((None, None), 400.0), ((None, -1542301795), 100.0)])], ids=idfn)
@pytest.mark.xfail(condition=is_databricks104_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/4963')
def test_hash_avg_nulls_partial_only(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=2).agg(f.avg('b')),
conf=_float_conf_partial
)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans_with_decimalbig, ids=idfn)
def test_intersectAll(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, data_gen, length=100).intersectAll(gen_df(spark, data_gen, length=100)))
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans_with_decimalbig, ids=idfn)
def test_exceptAll(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, data_gen, length=100).exceptAll(gen_df(spark, data_gen, length=100).filter('a != b')))
# Spark fails to sort some decimal values due to overflow when calculating the sorting prefix.
# See https://issues.apache.org/jira/browse/SPARK-40129
# Since pivot orders by value, avoid generating these extreme values for this test.
_pivot_gen_128bit = DecimalGen(precision=20, scale=2, special_cases=[])
_pivot_big_decimals = [
('a', RepeatSeqGen(DecimalGen(precision=32, scale=10, nullable=(True, 10.0)), length=50)),
('b', _pivot_gen_128bit),
('c', DecimalGen(precision=36, scale=5))]
_pivot_short_big_decimals = [
('a', RepeatSeqGen(short_gen, length=50)),
('b', _pivot_gen_128bit),
('c', decimal_gen_128bit)]
_pivot_gens_with_decimals = _init_list_with_nans_and_no_nans + [
_grpkey_small_decimals, _pivot_big_decimals, _grpkey_short_mid_decimals,
_pivot_short_big_decimals, _grpkey_short_very_big_decimals,
_grpkey_short_very_big_neg_scale_decimals]
@approximate_float
@ignore_order(local=True)
@incompat
@pytest.mark.parametrize('data_gen', _pivot_gens_with_decimals, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_pivot(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.pivot('b')
.agg(f.sum('c')),
conf = conf)
@approximate_float
@ignore_order(local=True)
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_pivot_without_nans(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.pivot('b')
.agg(f.sum('c')),
conf=conf)
@approximate_float
@ignore_order(local=True)
@incompat
@pytest.mark.parametrize('data_gen', _init_list_with_nans_and_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_multiple_grpby_pivot(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a','b')
.pivot('b')
.agg(f.sum('c'), f.max('c')),
conf=conf)
@approximate_float
@ignore_order(local=True)
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_pivot_without_nans(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby()
.pivot('b')
.agg(f.sum('c')),
conf=conf)
@approximate_float
@ignore_order(local=True)
@incompat
@pytest.mark.parametrize('data_gen', _init_list_with_nans_and_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_pivot_with_nans(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby()
.pivot('b')
.agg(f.sum('c')),
conf=conf)
@approximate_float
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'PivotFirst', 'AggregateExpression', 'Alias', 'GetArrayItem',
'Literal', 'ShuffleExchangeExec', 'HashPartitioning', 'NormalizeNaNAndZero')
@incompat
@pytest.mark.parametrize('data_gen', [_grpkey_floats_with_nulls_and_nans], ids=idfn)
def test_hash_pivot_groupby_duplicates_fallback(data_gen):
# PivotFirst will not work on the GPU when pivot_values has duplicates
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.pivot('b', ['10.0', '10.0'])
.agg(f.sum('c')),
"PivotFirst",
conf=_float_conf)
_repeat_agg_column_for_collect_op = [
RepeatSeqGen(BooleanGen(), length=15),
RepeatSeqGen(IntegerGen(), length=15),
RepeatSeqGen(LongGen(), length=15),
RepeatSeqGen(ShortGen(), length=15),
RepeatSeqGen(DateGen(), length=15),
RepeatSeqGen(TimestampGen(), length=15),
RepeatSeqGen(ByteGen(), length=15),
RepeatSeqGen(StringGen(), length=15),
RepeatSeqGen(FloatGen(), length=15),
RepeatSeqGen(DoubleGen(), length=15),
RepeatSeqGen(DecimalGen(precision=8, scale=3), length=15),
# case to verify the NAN_UNEQUAL strategy
RepeatSeqGen(FloatGen().with_special_case(math.nan, 200.0), length=5),
]
_full_repeat_agg_column_for_collect_op = [
RepeatSeqGen(_decimal_gen_38_10, length=15)
]
_gen_data_for_collect_op = [[
('a', RepeatSeqGen(LongGen(), length=20)),
('b', value_gen),
('c', UniqueLongGen())] for value_gen in _repeat_agg_column_for_collect_op]
_full_gen_data_for_collect_op = _gen_data_for_collect_op + [[
('a', RepeatSeqGen(LongGen(), length=20)),
('b', value_gen),
('c', UniqueLongGen())] for value_gen in _full_repeat_agg_column_for_collect_op]
_repeat_agg_column_for_collect_list_op = [
RepeatSeqGen(ArrayGen(int_gen), length=15),
RepeatSeqGen(all_basic_struct_gen, length=15),
RepeatSeqGen(StructGen([['c0', all_basic_struct_gen]]), length=15),
RepeatSeqGen(simple_string_to_string_map_gen, length=15)]
_gen_data_for_collect_list_op = _full_gen_data_for_collect_op + [[
('a', RepeatSeqGen(LongGen(), length=20)),
('b', value_gen)] for value_gen in _repeat_agg_column_for_collect_list_op]
_repeat_agg_column_for_collect_set_op = [
RepeatSeqGen(all_basic_struct_gen, length=15),
RepeatSeqGen(StructGen([
['c0', all_basic_struct_gen], ['c1', int_gen]]), length=15)]
# data generating for collect_set based-nested Struct[Array] types
_repeat_agg_column_for_collect_set_op_nested = [
RepeatSeqGen(struct_array_gen, length=15),
RepeatSeqGen(StructGen([
['c0', struct_array_gen], ['c1', int_gen]]), length=15),
RepeatSeqGen(ArrayGen(all_basic_struct_gen), length=15)]
_array_of_array_gen = [RepeatSeqGen(ArrayGen(sub_gen), length=15) for sub_gen in single_level_array_gens]
_gen_data_for_collect_set_op = [[
('a', RepeatSeqGen(LongGen(), length=20)),
('b', value_gen)] for value_gen in _repeat_agg_column_for_collect_set_op]
_gen_data_for_collect_set_op_nested = [[
('a', RepeatSeqGen(LongGen(), length=20)),
('b', value_gen)] for value_gen in _repeat_agg_column_for_collect_set_op_nested + _array_of_array_gen]
_all_basic_gens_with_all_nans_cases = all_basic_gens + [SetValuesGen(t, [math.nan, None]) for t in [FloatType(), DoubleType()]]
# very simple test for just a count on decimals 128 values until we can support more with them
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_count_reduction(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('count(a)'))
# very simple test for just a count on decimals 128 values until we can support more with them
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_count_group_by(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, byte_gen, data_gen)
.groupby('a')
.agg(f.count('b')))
# very simple test for just a min/max on decimals 128 values until we can support more with them
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_min_max_reduction(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('min(a)', 'max(a)'))
# very simple test for just a min/max on decimals 128 values until we can support more with them
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', [decimal_gen_128bit], ids=idfn)
def test_decimal128_min_max_group_by(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, byte_gen, data_gen)
.groupby('a')
.agg(f.min('b'), f.max('b')))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _all_basic_gens_with_all_nans_cases, ids=idfn)
def test_min_max_group_by(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, byte_gen, data_gen)
.groupby('a')
.agg(f.min('b'), f.max('b')))
# to avoid ordering issues with collect_list we do it all in a single task
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_list_op, ids=idfn)
@pytest.mark.parametrize('use_obj_hash_agg', [True, False], ids=idfn)
def test_hash_groupby_collect_list(data_gen, use_obj_hash_agg):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100).coalesce(1)
.groupby('a')
.agg(f.collect_list('b')),
conf={'spark.sql.execution.useObjectHashAggregateExec': str(use_obj_hash_agg).lower(),
'spark.sql.shuffle.partitions': '1'})
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _full_gen_data_for_collect_op, ids=idfn)
def test_hash_groupby_collect_set(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.sort_array(f.collect_set('b')), f.count('b')))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_set_op, ids=idfn)
def test_hash_groupby_collect_set_on_nested_type(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.sort_array(f.collect_set('b'))))
# Note, using sort_array() on the CPU, because sort_array() does not yet
# support sorting certain nested/arbitrary types on the GPU
# See https://github.com/NVIDIA/spark-rapids/issues/3715
# and https://github.com/rapidsai/cudf/issues/11222
@ignore_order(local=True)
@allow_non_gpu("ProjectExec", "SortArray")
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_set_op_nested, ids=idfn)
def test_hash_groupby_collect_set_on_nested_array_type(data_gen):
conf = copy_and_update(_float_conf, {
"spark.rapids.sql.castFloatToString.enabled": "true",
"spark.rapids.sql.expression.SortArray": "false"
})
def do_it(spark):
df = gen_df(spark, data_gen, length=100)\
.groupby('a')\
.agg(f.collect_set('b').alias("collect_set"))
# pull out the rdd and schema and create a new dataframe to run SortArray
# to handle Spark 3.3.0+ optimization that moves SortArray from ProjectExec
# to ObjectHashAggregateExec
return spark.createDataFrame(df.rdd, schema=df.schema)\
.selectExpr("sort_array(collect_set)")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _full_gen_data_for_collect_op, ids=idfn)
def test_hash_reduction_collect_set(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.agg(f.sort_array(f.collect_set('b')), f.count('b')))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_set_op, ids=idfn)
def test_hash_reduction_collect_set_on_nested_type(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.agg(f.sort_array(f.collect_set('b'))))
# Note, using sort_array() on the CPU, because sort_array() does not yet
# support sorting certain nested/arbitrary types on the GPU
# See https://github.com/NVIDIA/spark-rapids/issues/3715
# and https://github.com/rapidsai/cudf/issues/11222
@ignore_order(local=True)
@allow_non_gpu("ProjectExec", "SortArray")
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_set_op_nested, ids=idfn)
def test_hash_reduction_collect_set_on_nested_array_type(data_gen):
conf = copy_and_update(_float_conf, {
"spark.rapids.sql.castFloatToString.enabled": "true",
"spark.rapids.sql.expression.SortArray": "false"
})
def do_it(spark):
df = gen_df(spark, data_gen, length=100)\
.agg(f.collect_set('b').alias("collect_set"))
# pull out the rdd and schema and create a new dataframe to run SortArray
# to handle Spark 3.3.0+ optimization that moves SortArray from ProjectExec
# to ObjectHashAggregateExec
return spark.createDataFrame(df.rdd, schema=df.schema)\
.selectExpr("sort_array(collect_set)")
assert_gpu_and_cpu_are_equal_collect(do_it, conf=conf)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _full_gen_data_for_collect_op, ids=idfn)
def test_hash_groupby_collect_with_single_distinct(data_gen):
# test collect_ops with other distinct aggregations
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.sort_array(f.collect_list('b')),
f.sort_array(f.collect_set('b')),
f.countDistinct('c'),
f.count('c')))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_op, ids=idfn)
def test_hash_groupby_single_distinct_collect(data_gen):
# test distinct collect
sql = """select a,
sort_array(collect_list(distinct b)),
sort_array(collect_set(distinct b))
from tbl group by a"""
assert_gpu_and_cpu_are_equal_sql(
df_fun=lambda spark: gen_df(spark, data_gen, length=100),
table_name="tbl", sql=sql)
# test distinct collect with nonDistinct aggregations
sql = """select a,
sort_array(collect_list(distinct b)),
sort_array(collect_set(b)),
count(distinct b),
count(c)
from tbl group by a"""
assert_gpu_and_cpu_are_equal_sql(
df_fun=lambda spark: gen_df(spark, data_gen, length=100),
table_name="tbl", sql=sql)
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _gen_data_for_collect_op, ids=idfn)
def test_hash_groupby_collect_with_multi_distinct(data_gen):
def spark_fn(spark_session):
return gen_df(spark_session, data_gen, length=100).groupby('a').agg(
f.sort_array(f.collect_list('b')),
f.sort_array(f.collect_set('b')),
f.countDistinct('b'),
f.countDistinct('c'))
assert_gpu_and_cpu_are_equal_collect(spark_fn)
_replace_modes_non_distinct = [
# Spark: GPU(Final) -> CPU(Partial)
# Databricks runtime: GPU(Complete)
'final|complete',
# Spark: CPU(Final) -> GPU(Partial)
# Databricks runtime: CPU(Complete)
'partial',
]
@ignore_order(local=True)
@allow_non_gpu('ObjectHashAggregateExec', 'SortAggregateExec',
'ShuffleExchangeExec', 'HashPartitioning', 'SortExec',
'SortArray', 'Alias', 'Literal', 'Count', 'CollectList', 'CollectSet',
'AggregateExpression', 'ProjectExec')
@pytest.mark.parametrize('data_gen', _full_gen_data_for_collect_op, ids=idfn)
@pytest.mark.parametrize('replace_mode', _replace_modes_non_distinct, ids=idfn)
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
@pytest.mark.parametrize('use_obj_hash_agg', ['false', 'true'], ids=idfn)
def test_hash_groupby_collect_partial_replace_fallback(data_gen,
replace_mode,
aqe_enabled,
use_obj_hash_agg):
conf = {'spark.rapids.sql.hashAgg.replaceMode': replace_mode,
'spark.sql.adaptive.enabled': aqe_enabled,
'spark.sql.execution.useObjectHashAggregateExec': use_obj_hash_agg}
cpu_clz, gpu_clz = ['CollectList', 'CollectSet'], ['GpuCollectList', 'GpuCollectSet']
exist_clz, non_exist_clz = [], []
# For aggregations without distinct, Databricks runtime removes the partial Aggregate stage (
# map-side combine). There only exists an AggregateExec in Databricks runtimes. So, we need to
# set the expected exist_classes according to runtime.
if is_databricks_runtime():
if replace_mode == 'partial':
exist_clz, non_exist_clz = cpu_clz, gpu_clz
else:
exist_clz, non_exist_clz = gpu_clz, cpu_clz
else:
exist_clz = cpu_clz + gpu_clz
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.sort_array(f.collect_list('b')), f.sort_array(f.collect_set('b'))),
exist_classes=','.join(exist_clz),
non_exist_classes=','.join(non_exist_clz),
conf=conf)
_replace_modes_single_distinct = [
# Spark: CPU -> CPU -> GPU(PartialMerge) -> GPU(Partial)
# Databricks runtime: CPU(Final and Complete) -> GPU(PartialMerge)
'partial|partialMerge',
# Spark: GPU(Final) -> GPU(PartialMerge&Partial) -> CPU(PartialMerge) -> CPU(Partial)
# Databricks runtime: GPU(Final&Complete) -> CPU(PartialMerge)
'final|partialMerge&partial|final&complete',
]
@ignore_order(local=True)
@allow_non_gpu('ObjectHashAggregateExec', 'SortAggregateExec',
'ShuffleExchangeExec', 'HashPartitioning', 'SortExec',
'SortArray', 'Alias', 'Literal', 'Count', 'CollectList', 'CollectSet',
'AggregateExpression', 'ProjectExec')
@pytest.mark.parametrize('data_gen', _full_gen_data_for_collect_op, ids=idfn)
@pytest.mark.parametrize('replace_mode', _replace_modes_single_distinct, ids=idfn)
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
@pytest.mark.parametrize('use_obj_hash_agg', ['false', 'true'], ids=idfn)
@pytest.mark.xfail(condition=is_databricks104_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/4963')
def test_hash_groupby_collect_partial_replace_with_distinct_fallback(data_gen,
replace_mode,
aqe_enabled,
use_obj_hash_agg):
conf = {'spark.rapids.sql.hashAgg.replaceMode': replace_mode,
'spark.sql.adaptive.enabled': aqe_enabled,
'spark.sql.execution.useObjectHashAggregateExec': use_obj_hash_agg}
# test with single Distinct
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.sort_array(f.collect_list('b')),
f.sort_array(f.collect_set('b')),
f.countDistinct('c')),
exist_classes='CollectList,CollectSet,GpuCollectList,GpuCollectSet',
conf=conf)
# test with Distinct Collect
assert_cpu_and_gpu_are_equal_sql_with_capture(
lambda spark: gen_df(spark, data_gen, length=100),
table_name='table',
exist_classes='CollectSet,GpuCollectSet,Count,GpuCount',
sql="""
select a,
sort_array(collect_list(distinct c)),
sort_array(collect_set(b)),
count(c)
from table
group by a""",
conf=conf)
@ignore_order(local=True)
@allow_non_gpu('ObjectHashAggregateExec', 'ShuffleExchangeExec',
'HashAggregateExec', 'HashPartitioning',
'ApproximatePercentile', 'Alias', 'Literal', 'AggregateExpression')
def test_hash_groupby_typed_imperative_agg_without_gpu_implementation_fallback():
assert_cpu_and_gpu_are_equal_sql_with_capture(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(LongGen(), length=20)),
('v', UniqueLongGen())], length=100),
exist_classes='ApproximatePercentile,ObjectHashAggregateExec',
non_exist_classes='GpuApproximatePercentile,GpuObjectHashAggregateExec',
table_name='table',
sql="""select k,
approx_percentile(v, array(0.25, 0.5, 0.75)) from table group by k""")
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_multiple_mode_query(data_gen, conf):
print_params(data_gen)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.groupby('a')
.agg(f.count('a'),
f.avg('b'),
f.avg('a'),
f.countDistinct('b'),
f.sum('a'),
f.min('a'),
f.max('a'),
f.sumDistinct('b'),
f.countDistinct('c')
), conf=conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs),
ids=idfn)
def test_hash_multiple_mode_query_avg_distincts(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.selectExpr('avg(distinct a)', 'avg(distinct b)','avg(distinct c)'),
conf=conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_query_multiple_distincts_with_non_distinct(data_gen, conf):
local_conf = copy_and_update(conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=100),
"hash_agg_table",
'select avg(a),' +
'avg(distinct b),' +
'avg(distinct c),' +
'sum(distinct a),' +
'count(distinct b),' +
'count(a),' +
'count(),' +
'sum(a),' +
'min(a),'+
'max(a) from hash_agg_table group by a',
conf=local_conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_query_max_with_multiple_distincts(data_gen, conf):
local_conf = copy_and_update(conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=100),
"hash_agg_table",
'select max(c),' +
'sum(distinct a),' +
'count(),' +
'count(distinct b) from hash_agg_table group by a',
conf=local_conf)
@ignore_order
@pytest.mark.parametrize('data_gen', _init_list_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_count_with_filter(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.selectExpr('count(a) filter (where c > 50)'),
conf=conf)
@approximate_float
@ignore_order
@incompat
@pytest.mark.parametrize('data_gen', _init_list_no_nans + [_grpkey_short_mid_decimals, _grpkey_short_big_decimals], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_multiple_filters(data_gen, conf):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=100),
"hash_agg_table",
'select count(a) filter (where c > 50),' +
'count(b) filter (where c > 100),' +
'avg(b) filter (where b > 20),' +
'min(a), max(b) filter (where c > 250) from hash_agg_table group by a',
conf)
@approximate_float
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_floats_with_nan_zero_grouping_keys,
_grpkey_doubles_with_nan_zero_grouping_keys], ids=idfn)
def test_hash_agg_with_nan_keys(data_gen):
local_conf = copy_and_update(_float_conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1024),
"hash_agg_table",
'select a, '
'count(*) as count_stars, '
'count() as count_parameterless, '
'count(b) as count_bees, '
'sum(b) as sum_of_bees, '
'max(c) as max_seas, '
'min(c) as min_seas, '
'count(distinct c) as count_distinct_cees, '
'avg(c) as average_seas '
'from hash_agg_table group by a',
local_conf)
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_structs_with_non_nested_children,
_grpkey_nested_structs], ids=idfn)
def test_hash_agg_with_struct_keys(data_gen):
local_conf = copy_and_update(_float_conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1024),
"hash_agg_table",
'select a, '
'count(*) as count_stars, '
'count() as count_parameterless, '
'count(b) as count_bees, '
'sum(b) as sum_of_bees, '
'max(c) as max_seas, '
'min(c) as min_seas, '
'count(distinct c) as count_distinct_cees, '
'avg(c) as average_seas '
'from hash_agg_table group by a',
conf=local_conf)
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'Avg', 'Count', 'Max', 'Min', 'Sum', 'Average',
'Cast', 'Literal', 'Alias', 'AggregateExpression',
'ShuffleExchangeExec', 'HashPartitioning')
@pytest.mark.parametrize('data_gen', [_grpkey_nested_structs_with_array_child], ids=idfn)
def test_hash_agg_with_struct_of_array_fallback(data_gen):
local_conf = copy_and_update(_float_conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_cpu_and_gpu_are_equal_sql_with_capture(
lambda spark : gen_df(spark, data_gen, length=100),
'select a, '
'count(*) as count_stars, '
'count() as count_parameterless, '
'count(b) as count_bees, '
'sum(b) as sum_of_bees, '
'max(c) as max_seas, '
'min(c) as min_seas, '
'avg(c) as average_seas '
'from hash_agg_table group by a',
"hash_agg_table",
exist_classes='HashAggregateExec',
non_exist_classes='GpuHashAggregateExec',
conf=local_conf)
@approximate_float
@ignore_order
@pytest.mark.parametrize('data_gen', [ _grpkey_doubles_with_nan_zero_grouping_keys], ids=idfn)
def test_count_distinct_with_nan_floats(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1024),
"hash_agg_table",
'select a, count(distinct b) as count_distinct_bees from hash_agg_table group by a',
_float_conf)
# TODO: Literal tests
# REDUCTIONS
_nested_gens = array_gens_sample + struct_gens_sample + map_gens_sample + [binary_gen]
@pytest.mark.parametrize('data_gen', decimal_gens, ids=idfn)
def test_first_last_reductions_decimal_types(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# Coalesce and sort are to make sure that first and last, which are non-deterministic
# become deterministic
lambda spark: unary_op_df(spark, data_gen).coalesce(1).selectExpr(
'first(a)', 'last(a)', 'first(a, true)', 'last(a, true)'))
@pytest.mark.parametrize('data_gen', _nested_gens, ids=idfn)
def test_first_last_reductions_nested_types(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# Coalesce and sort are to make sure that first and last, which are non-deterministic
# become deterministic
lambda spark: unary_op_df(spark, data_gen).coalesce(1).selectExpr(
'first(a)', 'last(a)', 'first(a, true)', 'last(a, true)'))
@pytest.mark.parametrize('data_gen', _all_basic_gens_with_all_nans_cases, ids=idfn)
def test_generic_reductions(data_gen):
local_conf = copy_and_update(_float_conf, {'spark.sql.legacy.allowParameterlessCount': 'true'})
assert_gpu_and_cpu_are_equal_collect(
# Coalesce and sort are to make sure that first and last, which are non-deterministic
# become deterministic
lambda spark : unary_op_df(spark, data_gen) \
.coalesce(1).selectExpr(
'min(a)',
'max(a)',
'first(a)',
'last(a)',
'count(a)',
'count()',
'count(1)'),
conf=local_conf)
@pytest.mark.parametrize('data_gen', all_gen + _nested_gens, ids=idfn)
def test_count(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen) \
.selectExpr(
'count(a)',
'count()',
'count()',
'count(1)'),
conf = {'spark.sql.legacy.allowParameterlessCount': 'true'})
@pytest.mark.parametrize('data_gen', all_basic_gens, ids=idfn)
def test_distinct_count_reductions(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'count(DISTINCT a)'))
@pytest.mark.parametrize('data_gen', [float_gen, double_gen], ids=idfn)
def test_distinct_float_count_reductions(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : binary_op_df(spark, data_gen).selectExpr(
'count(DISTINCT a)'))
@approximate_float
@pytest.mark.parametrize('data_gen', numeric_gens + [decimal_gen_64bit, decimal_gen_128bit], ids=idfn)
def test_arithmetic_reductions(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'sum(a)',
'avg(a)'),
conf = _float_conf)
@pytest.mark.parametrize('data_gen',
all_basic_gens + decimal_gens + _nested_gens,
ids=idfn)
def test_collect_list_reductions(data_gen):
assert_gpu_and_cpu_are_equal_collect(
# coalescing because collect_list is not deterministic
lambda spark: unary_op_df(spark, data_gen).coalesce(1).selectExpr('collect_list(a)'),
conf=_float_conf)
_no_neg_zero_all_basic_gens = [byte_gen, short_gen, int_gen, long_gen,
# -0.0 cannot work because of -0.0 == 0.0 in cudf for distinct and
# Spark fixed ordering of 0.0 and -0.0 in Spark 3.1 in the ordering
FloatGen(special_cases=[FLOAT_MIN, FLOAT_MAX, 0.0, 1.0, -1.0]), DoubleGen(special_cases=[]),
string_gen, boolean_gen, date_gen, timestamp_gen]
_struct_only_nested_gens = [all_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', all_basic_struct_gen]]),
StructGen([])]
@pytest.mark.parametrize('data_gen',
_no_neg_zero_all_basic_gens + decimal_gens + _struct_only_nested_gens,
ids=idfn)
def test_collect_set_reductions(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: unary_op_df(spark, data_gen).selectExpr('sort_array(collect_set(a))'),
conf=_float_conf)
def test_collect_empty():
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.sql("select collect_list(null)"))
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.sql("select collect_set(null)"))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + _nested_gens, ids=idfn)
def test_groupby_first_last(data_gen):
gen_fn = [('a', RepeatSeqGen(LongGen(), length=20)), ('b', data_gen)]
agg_fn = lambda df: df.groupBy('a').agg(
f.first('b'), f.last('b'), f.first('b', True), f.last('b', True))
assert_gpu_and_cpu_are_equal_collect(
# First and last are not deterministic when they are run in a real distributed setup.
# We set parallelism 1 to prevent nondeterministic results because of distributed setup.
lambda spark: agg_fn(gen_df(spark, gen_fn, num_slices=1)))
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + _struct_only_nested_gens, ids=idfn)
def test_sorted_groupby_first_last(data_gen):
gen_fn = [('a', RepeatSeqGen(LongGen(), length=20)), ('b', data_gen)]
# sort by more than the group by columns to be sure that first/last don't remove the ordering
agg_fn = lambda df: df.orderBy('a', 'b').groupBy('a').agg(
f.first('b'), f.last('b'), f.first('b', True), f.last('b', True))
assert_gpu_and_cpu_are_equal_collect(
# First and last are not deterministic when they are run in a real distributed setup.
# We set parallelism and partitions to 1 to prevent nondeterministic results because
# of distributed setups.
lambda spark: agg_fn(gen_df(spark, gen_fn, num_slices=1)),
conf = {'spark.sql.shuffle.partitions': '1'})
# Spark has a sorting bug with decimals, see https://issues.apache.org/jira/browse/SPARK-40129.
# Have pytest do the sorting rather than Spark as a workaround.
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@pytest.mark.parametrize('count_func', [f.count, f.countDistinct])
def test_agg_count(data_gen, count_func):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : gen_df(spark, [('a', data_gen), ('b', data_gen)],
length=1024).groupBy('a').agg(count_func("b")))
# Spark has a sorting bug with decimals, see https://issues.apache.org/jira/browse/SPARK-40129.
# Have pytest do the sorting rather than Spark as a workaround.
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression', 'Cast',
'HashPartitioning', 'ShuffleExchangeExec', 'Count')
@pytest.mark.parametrize('data_gen',
[ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))
, binary_gen], ids=idfn)
@pytest.mark.parametrize('count_func', [f.count, f.countDistinct])
def test_groupby_list_types_fallback(data_gen, count_func):
assert_gpu_fallback_collect(
lambda spark : gen_df(spark, [('a', data_gen), ('b', data_gen)],
length=1024).groupBy('a').agg(count_func("b")),
"HashAggregateExec")
def subquery_create_temp_views(spark, expr):
t1 = "select * from values (1,2) as t1(a,b)"
t2 = "select * from values (3,4) as t2(c,d)"
spark.sql(t1).createOrReplaceTempView("t1")
spark.sql(t2).createOrReplaceTempView("t2")
return spark.sql(expr)
# Adding these tests as they were added in SPARK-31620, and were shown to break in
# SPARK-32031, but our GPU hash aggregate does not seem to exhibit the same failure.
# The tests are being added more as a sanity check.
# Adaptive is being turned on and off so we invoke re-optimization at the logical plan level.
@pytest.mark.parametrize('adaptive', ["true", "false"])
@pytest.mark.parametrize('expr', [
"select sum(if(c > (select a from t1), d, 0)) as csum from t2",
"select c, sum(if(c > (select a from t1), d, 0)) as csum from t2 group by c",
"select avg(distinct(d)), sum(distinct(if(c > (select a from t1), d, 0))) as csum " +
"from t2 group by c",
"select sum(distinct(if(c > (select sum(distinct(a)) from t1), d, 0))) as csum " +
"from t2 group by c"
])
def test_subquery_in_agg(adaptive, expr):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: subquery_create_temp_views(spark, expr),
conf = {"spark.sql.adaptive.enabled" : adaptive})
# TODO support multi-level structs https://github.com/NVIDIA/spark-rapids/issues/2438
def assert_single_level_struct(df):
first_level_dt = df.schema['a'].dataType
second_level_dt = first_level_dt['aa'].dataType
assert isinstance(first_level_dt, StructType)
assert isinstance(second_level_dt, IntegerType)
# Prevent value deduplication bug across rows and struct columns
# https://github.com/apache/spark/pull/31778 by injecting
# extra literal columns
#
def workaround_dedupe_by_value(df, num_cols):
col_id_rng = range(0, num_cols)
return reduce(lambda df, i: df.withColumn(f"fake_col_{i}", f.lit(i)), col_id_rng, df)
@allow_non_gpu(any = True)
@pytest.mark.parametrize('key_data_gen', [
StructGen([
('aa', IntegerGen(min_val=0, max_val=9)),
], nullable=False),
StructGen([
('aa', IntegerGen(min_val=0, max_val=4)),
('ab', IntegerGen(min_val=5, max_val=9)),
], nullable=False),
], ids=idfn)
@ignore_order(local=True)
def test_struct_groupby_count(key_data_gen):
def group_by_count(spark):
df = two_col_df(spark, key_data_gen, IntegerGen())
assert_single_level_struct(df)
return workaround_dedupe_by_value(df.groupBy(df.a).count(), 3)
assert_gpu_and_cpu_are_equal_collect(group_by_count)
@pytest.mark.parametrize('cast_struct_tostring', ['LEGACY', 'SPARK311+'])
@pytest.mark.parametrize('key_data_gen', [
StructGen([
('aa', IntegerGen(min_val=0, max_val=9)),
], nullable=False),
StructGen([
('aa', IntegerGen(min_val=0, max_val=4)),
('ab', IntegerGen(min_val=5, max_val=9)),
], nullable=False)
], ids=idfn)
@ignore_order(local=True)
def test_struct_cast_groupby_count(cast_struct_tostring, key_data_gen):
def _group_by_struct_or_cast(spark):
df = two_col_df(spark, key_data_gen, IntegerGen())
assert_single_level_struct(df)
return df.groupBy(df.a.cast(StringType())).count()
assert_gpu_and_cpu_are_equal_collect(_group_by_struct_or_cast, {
'spark.sql.legacy.castComplexTypesToString.enabled': cast_struct_tostring == 'LEGACY'
})
@allow_non_gpu(any = True)
@pytest.mark.parametrize('key_data_gen', [
StructGen([
('a', StructGen([
('aa', IntegerGen(min_val=0, max_val=9))
]))], nullable=False),
StructGen([
('a', StructGen([
('aa', IntegerGen(min_val=0, max_val=4)),
('ab', IntegerGen(min_val=5, max_val=9)),
]))], nullable=False),
], ids=idfn)
@ignore_order(local=True)
def test_struct_count_distinct(key_data_gen):
def _count_distinct_by_struct(spark):
df = gen_df(spark, key_data_gen)
assert_single_level_struct(df)
return df.agg(f.countDistinct(df.a))
assert_gpu_and_cpu_are_equal_collect(_count_distinct_by_struct)
@pytest.mark.parametrize('cast_struct_tostring', ['LEGACY', 'SPARK311+'])
@pytest.mark.parametrize('key_data_gen', [
StructGen([
('a', StructGen([
('aa', IntegerGen(min_val=0, max_val=9))
]))], nullable=False),
StructGen([
('a', StructGen([
('aa', IntegerGen(min_val=0, max_val=4)),
('ab', IntegerGen(min_val=5, max_val=9)),
]))], nullable=False),
], ids=idfn)
@ignore_order(local=True)
def test_struct_count_distinct_cast(cast_struct_tostring, key_data_gen):
def _count_distinct_by_struct(spark):
df = gen_df(spark, key_data_gen)
assert_single_level_struct(df)
return df.agg(f.countDistinct(df.a.cast(StringType())))
assert_gpu_and_cpu_are_equal_collect(_count_distinct_by_struct, {
'spark.sql.legacy.castComplexTypesToString.enabled': cast_struct_tostring == 'LEGACY'
})
@ignore_order(local=True)
def test_reduction_nested_struct():
def do_it(spark):
df = unary_op_df(spark, StructGen([('aa', StructGen([('aaa', IntegerGen(min_val=0, max_val=4))]))]))
return df.agg(f.sum(df.a.aa.aaa))
assert_gpu_and_cpu_are_equal_collect(do_it)
@ignore_order(local=True)
def test_reduction_nested_array():
def do_it(spark):
df = unary_op_df(spark, ArrayGen(StructGen([('aa', IntegerGen(min_val=0, max_val=4))])))
return df.agg(f.sum(df.a[1].aa))
assert_gpu_and_cpu_are_equal_collect(do_it)
# The map here is a child not a top level, because we only support GetMapValue on String to String maps.
@ignore_order(local=True)
def test_reduction_nested_map():
def do_it(spark):
df = unary_op_df(spark, ArrayGen(MapGen(StringGen('a{1,5}', nullable=False), StringGen('[ab]{1,5}'))))
return df.agg(f.min(df.a[1]["a"]))
assert_gpu_and_cpu_are_equal_collect(do_it)
@ignore_order(local=True)
def test_agg_nested_struct():
def do_it(spark):
df = two_col_df(spark, StringGen('k{1,5}'), StructGen([('aa', StructGen([('aaa', IntegerGen(min_val=0, max_val=4))]))]))
return df.groupBy('a').agg(f.sum(df.b.aa.aaa))
assert_gpu_and_cpu_are_equal_collect(do_it)
@ignore_order(local=True)
def test_agg_nested_array():
def do_it(spark):
df = two_col_df(spark, StringGen('k{1,5}'), ArrayGen(StructGen([('aa', IntegerGen(min_val=0, max_val=4))])))
return df.groupBy('a').agg(f.sum(df.b[1].aa))
assert_gpu_and_cpu_are_equal_collect(do_it)
# The map here is a child not a top level, because we only support GetMapValue on String to String maps.
@ignore_order(local=True)
def test_agg_nested_map():
def do_it(spark):
df = two_col_df(spark, StringGen('k{1,5}'), ArrayGen(MapGen(StringGen('a{1,5}', nullable=False), StringGen('[ab]{1,5}'))))
return df.groupBy('a').agg(f.min(df.b[1]["a"]))
assert_gpu_and_cpu_are_equal_collect(do_it)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_reduction(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('v', DoubleGen())], length=100),
[0.05, 0.25, 0.5, 0.75, 0.95], conf, reduction = True)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_reduction_single_row(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('v', DoubleGen())], length=1),
[0.05, 0.25, 0.5, 0.75, 0.95], conf, reduction = True)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_reduction_no_rows(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('v', DoubleGen())], length=0),
[0.05, 0.25, 0.5, 0.75, 0.95], conf, reduction = True)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_byte(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', ByteGen())], length=100),
[0.05, 0.25, 0.5, 0.75, 0.95], conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_byte_scalar(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', ByteGen())], length=100),
0.5, conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_long_repeated_keys(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(LongGen(), length=20)),
('v', UniqueLongGen())], length=100),
[0.05, 0.25, 0.5, 0.75, 0.95], conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_long(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', UniqueLongGen())], length=100),
[0.05, 0.25, 0.5, 0.75, 0.95], conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_long_single(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', UniqueLongGen())], length=100),
0.5, conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_double(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', DoubleGen())], length=100),
[0.05, 0.25, 0.5, 0.75, 0.95], conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
def test_hash_groupby_approx_percentile_double_single(aqe_enabled):
conf = {'spark.sql.adaptive.enabled': aqe_enabled}
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', StringGen(nullable=False)),
('v', DoubleGen())], length=100),
0.05, conf)
@incompat
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
@ignore_order(local=True)
@allow_non_gpu('TakeOrderedAndProjectExec', 'Alias', 'Cast', 'ObjectHashAggregateExec', 'AggregateExpression',
'ApproximatePercentile', 'Literal', 'ShuffleExchangeExec', 'HashPartitioning', 'CollectLimitExec')
def test_hash_groupby_approx_percentile_partial_fallback_to_cpu(aqe_enabled):
conf = {
'spark.rapids.sql.hashAgg.replaceMode': 'partial',
'spark.sql.adaptive.enabled': aqe_enabled
}
def approx_percentile_query(spark):
df = gen_df(spark, [('k', StringGen(nullable=False)),
('v', DoubleGen())], length=100)
df.createOrReplaceTempView("t")
return spark.sql("select k, approx_percentile(v, array(0.1, 0.2)) from t group by k")
assert_gpu_fallback_collect(lambda spark: approx_percentile_query(spark), 'ApproximatePercentile', conf)
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal32():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(6, 2))]),
[0.05, 0.25, 0.5, 0.75, 0.95])
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal32_single():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(6, 2))]),
0.05)
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal64():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(10, 9))]),
[0.05, 0.25, 0.5, 0.75, 0.95])
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal64_single():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(10, 9))]),
0.05)
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal128():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(19, 18))]),
[0.05, 0.25, 0.5, 0.75, 0.95])
@incompat
@ignore_order(local=True)
def test_hash_groupby_approx_percentile_decimal128_single():
compare_percentile_approx(
lambda spark: gen_df(spark, [('k', RepeatSeqGen(ByteGen(nullable=False), length=2)),
('v', DecimalGen(19, 18))]),
0.05)
# The percentile approx tests differ from other tests because we do not expect the CPU and GPU to produce the same
# results due to the different algorithms being used. Instead we compute an exact percentile on the CPU and then
# compute approximate percentiles on CPU and GPU and assert that the GPU numbers are accurate within some percentage
# of the CPU numbers
def compare_percentile_approx(df_fun, percentiles, conf = {}, reduction = False):
# create SQL statements for exact and approx percentiles
p_exact_sql = create_percentile_sql("percentile", percentiles, reduction)
p_approx_sql = create_percentile_sql("approx_percentile", percentiles, reduction)
def run_exact(spark):
df = df_fun(spark)
df.createOrReplaceTempView("t")
return spark.sql(p_exact_sql)
def run_approx(spark):
df = df_fun(spark)
df.createOrReplaceTempView("t")
return spark.sql(p_approx_sql)
# run exact percentile on CPU
exact = run_with_cpu(run_exact, 'COLLECT', conf)
# run approx_percentile on CPU and GPU
approx_cpu, approx_gpu = run_with_cpu_and_gpu(run_approx, 'COLLECT', conf)
assert len(exact) == len(approx_cpu)
assert len(exact) == len(approx_gpu)
for i in range(len(exact)):
cpu_exact_result = exact[i]
cpu_approx_result = approx_cpu[i]
gpu_approx_result = approx_gpu[i]
# assert that keys match
if not reduction:
assert cpu_exact_result['k'] == cpu_approx_result['k']
assert cpu_exact_result['k'] == gpu_approx_result['k']
# extract the percentile result column
exact_percentile = cpu_exact_result['the_percentile']
cpu_approx_percentile = cpu_approx_result['the_percentile']
gpu_approx_percentile = gpu_approx_result['the_percentile']
if exact_percentile is None:
assert cpu_approx_percentile is None
assert gpu_approx_percentile is None
else:
assert cpu_approx_percentile is not None
assert gpu_approx_percentile is not None
if isinstance(exact_percentile, list):
for j in range(len(exact_percentile)):
assert cpu_approx_percentile[j] is not None
assert gpu_approx_percentile[j] is not None
gpu_delta = abs(float(gpu_approx_percentile[j]) - float(exact_percentile[j]))
cpu_delta = abs(float(cpu_approx_percentile[j]) - float(exact_percentile[j]))
if gpu_delta > cpu_delta:
# GPU is less accurate so make sure we are within some tolerance
if gpu_delta == 0:
assert abs(gpu_delta / cpu_delta) - 1 < 0.001
else:
assert abs(cpu_delta / gpu_delta) - 1 < 0.001
else:
gpu_delta = abs(float(gpu_approx_percentile) - float(exact_percentile))
cpu_delta = abs(float(cpu_approx_percentile) - float(exact_percentile))
if gpu_delta > cpu_delta:
# GPU is less accurate so make sure we are within some tolerance
if gpu_delta == 0:
assert abs(gpu_delta / cpu_delta) - 1 < 0.001
else:
assert abs(cpu_delta / gpu_delta) - 1 < 0.001
def create_percentile_sql(func_name, percentiles, reduction):
if reduction:
if isinstance(percentiles, list):
return """select {}(v, array({})) as the_percentile from t""".format(
func_name, ",".join(str(i) for i in percentiles))
else:
return """select {}(v, {}) as the_percentile from t""".format(
func_name, percentiles)
else:
if isinstance(percentiles, list):
return """select k, {}(v, array({})) as the_percentile from t group by k order by k""".format(
func_name, ",".join(str(i) for i in percentiles))
else:
return """select k, {}(v, {}) as the_percentile from t group by k order by k""".format(
func_name, percentiles)
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_strings_with_extra_nulls], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_avg_nulls(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100).groupby('a')
.agg(f.avg('c')),
conf=conf
)
@ignore_order
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression', 'Cast',
'HashPartitioning', 'ShuffleExchangeExec', 'Average')
@pytest.mark.parametrize('data_gen', [_grpkey_strings_with_extra_nulls], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_grpby_avg_nulls_ansi(data_gen, conf):
local_conf = copy_and_update(conf, {'spark.sql.ansi.enabled': 'true'})
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, data_gen, length=100).groupby('a')
.agg(f.avg('c')),
'Average',
conf=local_conf
)
@ignore_order
@pytest.mark.parametrize('data_gen', [_grpkey_strings_with_extra_nulls], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_avg_nulls(data_gen, conf):
assert_gpu_and_cpu_are_equal_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.agg(f.avg('c')),
conf=conf
)
@ignore_order
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression', 'Cast',
'HashPartitioning', 'ShuffleExchangeExec', 'Average')
@pytest.mark.parametrize('data_gen', [_grpkey_strings_with_extra_nulls], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_hash_reduction_avg_nulls_ansi(data_gen, conf):
local_conf = copy_and_update(conf, {'spark.sql.ansi.enabled': 'true'})
assert_gpu_fallback_collect(
lambda spark: gen_df(spark, data_gen, length=100)
.agg(f.avg('c')),
'Average',
conf=local_conf
)
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression', 'Cast',
'HashPartitioning', 'ShuffleExchangeExec', 'Sum')
@pytest.mark.parametrize('data_gen', _no_overflow_ansi_gens, ids=idfn)
def test_sum_fallback_when_ansi_enabled(data_gen):
def do_it(spark):
df = gen_df(spark, [('a', data_gen), ('b', data_gen)], length=100)
return df.groupBy('a').agg(f.sum("b"))
assert_gpu_fallback_collect(do_it, 'Sum',
conf={'spark.sql.ansi.enabled': 'true'})
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression', 'Cast',
'HashPartitioning', 'ShuffleExchangeExec', 'Average')
@pytest.mark.parametrize('data_gen', _no_overflow_ansi_gens, ids=idfn)
def test_avg_fallback_when_ansi_enabled(data_gen):
def do_it(spark):
df = gen_df(spark, [('a', data_gen), ('b', data_gen)], length=100)
return df.groupBy('a').agg(f.avg("b"))
assert_gpu_fallback_collect(do_it, 'Average',
conf={'spark.sql.ansi.enabled': 'true'})
@ignore_order(local=True)
@allow_non_gpu('HashAggregateExec', 'Alias', 'AggregateExpression',
'HashPartitioning', 'ShuffleExchangeExec', 'Count', 'Literal')
@pytest.mark.parametrize('data_gen', _no_overflow_ansi_gens, ids=idfn)
def test_count_fallback_when_ansi_enabled(data_gen):
def do_it(spark):
df = gen_df(spark, [('a', data_gen), ('b', data_gen)], length=100)
return df.groupBy('a').agg(f.count("b"), f.count("*"))
assert_gpu_fallback_collect(do_it, 'Count',
conf={'spark.sql.ansi.enabled': 'true'})
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', _no_overflow_ansi_gens, ids=idfn)
def test_no_fallback_when_ansi_enabled(data_gen):
def do_it(spark):
df = gen_df(spark, [('a', data_gen), ('b', data_gen)], length=100)
# coalescing because first/last are not deterministic
df = df.coalesce(1).orderBy("a", "b")
return df.groupBy('a').agg(f.first("b"), f.last("b"), f.min("b"), f.max("b"))
assert_gpu_and_cpu_are_equal_collect(do_it,
conf={'spark.sql.ansi.enabled': 'true'})
# Tests for standard deviation and variance aggregations.
@ignore_order(local=True)
@approximate_float
@incompat
@pytest.mark.parametrize('data_gen', _init_list_with_nans_and_no_nans_with_decimals, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
def test_std_variance(data_gen, conf):
local_conf = copy_and_update(conf, {
'spark.rapids.sql.castDecimalToFloat.enabled': 'true'})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1000),
"data_table",
'select ' +
'stddev(b),' +
'stddev_pop(b),' +
'stddev_samp(b),' +
'variance(b),' +
'var_pop(b),' +
'var_samp(b)' +
' from data_table group by a',
conf=local_conf)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1000),
"data_table",
'select ' +
'stddev(b),' +
'stddev_samp(b)'
' from data_table',
conf=local_conf)
@ignore_order(local=True)
@approximate_float
@incompat
@pytest.mark.parametrize('data_gen', [_grpkey_strings_with_extra_nulls], ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
@pytest.mark.parametrize('ansi_enabled', ['true', 'false'])
def test_std_variance_nulls(data_gen, conf, ansi_enabled):
local_conf = copy_and_update(conf, {'spark.sql.ansi.enabled': ansi_enabled})
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1000),
"data_table",
'select ' +
'stddev(c),' +
'stddev_pop(c),' +
'stddev_samp(c),' +
'variance(c),' +
'var_pop(c),' +
'var_samp(c)' +
' from data_table group by a',
conf=local_conf)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, data_gen, length=1000),
"data_table",
'select ' +
'stddev(c),' +
'stddev_samp(c)'
' from data_table',
conf=local_conf)
@ignore_order(local=True)
@approximate_float
@allow_non_gpu('NormalizeNaNAndZero',
'HashAggregateExec', 'SortAggregateExec',
'Cast',
'ShuffleExchangeExec', 'HashPartitioning', 'SortExec',
'StddevPop', 'StddevSamp', 'VariancePop', 'VarianceSamp',
'SortArray', 'Alias', 'Literal', 'Count',
'AggregateExpression', 'ProjectExec')
@pytest.mark.parametrize('data_gen', _init_list_with_nans_and_no_nans, ids=idfn)
@pytest.mark.parametrize('conf', get_params(_confs, params_markers_for_confs), ids=idfn)
@pytest.mark.parametrize('replace_mode', _replace_modes_non_distinct, ids=idfn)
@pytest.mark.parametrize('aqe_enabled', ['false', 'true'], ids=idfn)
@pytest.mark.xfail(condition=is_databricks104_or_later(), reason='https://github.com/NVIDIA/spark-rapids/issues/4963')
def test_std_variance_partial_replace_fallback(data_gen,
conf,
replace_mode,
aqe_enabled):
local_conf = copy_and_update(conf, {'spark.rapids.sql.hashAgg.replaceMode': replace_mode,
'spark.sql.adaptive.enabled': aqe_enabled})
exist_clz = ['StddevPop', 'StddevSamp', 'VariancePop', 'VarianceSamp',
'GpuStddevPop', 'GpuStddevSamp', 'GpuVariancePop', 'GpuVarianceSamp']
non_exist_clz = []
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: gen_df(spark, data_gen, length=1000)
.groupby('a')
.agg(
f.stddev('b'),
f.stddev_pop('b'),
f.stddev_samp('b'),
f.variance('b'),
f.var_pop('b'),
f.var_samp('b')
),
exist_classes=','.join(exist_clz),
non_exist_classes=','.join(non_exist_clz),
conf=local_conf)
exist_clz = ['StddevSamp',
'GpuStddevSamp']
assert_cpu_and_gpu_are_equal_collect_with_capture(
lambda spark: gen_df(spark, data_gen, length=1000)
.agg(
f.stddev('b'),
f.stddev_samp('b')
),
exist_classes=','.join(exist_clz),
non_exist_classes=','.join(non_exist_clz),
conf=local_conf)
#
# Test min/max aggregations on simple type (integer) keys and nested type values.
#
gens_for_max_min = [byte_gen, short_gen, int_gen, long_gen,
float_gen, double_gen,
string_gen, boolean_gen,
date_gen, timestamp_gen,
DecimalGen(precision=12, scale=2),
DecimalGen(precision=36, scale=5),
null_gen] + array_gens_sample + struct_gens_sample
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', gens_for_max_min, ids=idfn)
def test_min_max_in_groupby_and_reduction(data_gen):
df_gen = [('a', data_gen), ('b', RepeatSeqGen(IntegerGen(), length=20))]
# test max
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, df_gen),
"hash_agg_table",
'select b, max(a) from hash_agg_table group by b',
_float_conf)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, df_gen),
"hash_agg_table",
'select max(a) from hash_agg_table',
_float_conf)
# test min
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, df_gen, length=1024),
"hash_agg_table",
'select b, min(a) from hash_agg_table group by b',
_float_conf)
assert_gpu_and_cpu_are_equal_sql(
lambda spark : gen_df(spark, df_gen, length=1024),
"hash_agg_table",
'select min(a) from hash_agg_table',
_float_conf)
# Some Spark implementations will optimize this aggregation as a
# complete aggregation (i.e.: only one aggregation node in the plan)
@ignore_order(local=True)
def test_hash_aggregate_complete_with_grouping_expressions():
assert_gpu_and_cpu_are_equal_sql(
lambda spark : spark.range(10).withColumn("id2", f.col("id")),
"hash_agg_complete_table",
"select id, avg(id) from hash_agg_complete_table group by id, id2 + 1")
@ignore_order(local=True)
@pytest.mark.parametrize('cast_key_to', ["byte", "short", "int",
"long", "string", "DECIMAL(38,5)"], ids=idfn)
def test_hash_agg_force_pre_sort(cast_key_to):
def do_it(spark):
gen = StructGen([("key", UniqueLongGen()), ("value", long_gen)], nullable=False)
df = gen_df(spark, gen)
return df.selectExpr("CAST((key div 10) as " + cast_key_to + ") as key", "value").groupBy("key").sum("value")
assert_gpu_and_cpu_are_equal_collect(do_it,
conf={'spark.rapids.sql.agg.forceSinglePassPartialSort': True,
'spark.rapids.sql.agg.singlePassPartialSortEnabled': True})
| spark-rapids-branch-23.10 | integration_tests/src/main/python/hash_aggregate_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import os
import re
import shutil
import subprocess
import zipfile
def shell_exec(shell_cmd):
ret_code = subprocess.call(shell_cmd)
if ret_code != 0:
self.fail("failed to execute %s" % shell_cmd)
artifacts = attributes.get('artifact_csv').split(',')
buildver_list = re.sub(r'\s+', '', project.getProperty('included_buildvers'),
flags=re.UNICODE).split(',')
project_basedir = project.getProperty('spark.rapids.source.basedir')
project_version = project.getProperty('project.version')
scala_version = project.getProperty('scala.binary.version')
project_build_dir = project.getProperty('project.build.directory')
deps_dir = os.sep.join([project_build_dir, 'deps'])
top_dist_jar_dir = os.sep.join([project_build_dir, 'parallel-world'])
urm_url = project.getProperty('env.URM_URL')
jenkins_settings = os.sep.join([project_basedir, 'jenkins', 'settings.xml'])
repo_local = project.getProperty('maven.repo.local')
for bv in buildver_list:
classifier = 'spark' + bv
for art in artifacts:
build_dir = os.sep.join([project_basedir, art, 'target', classifier])
art_id = '-'.join(['rapids-4-spark', art + '_' + scala_version])
art_jar = '-'.join([art_id, project_version, classifier]) + '.jar'
art_jar_path = os.sep.join([build_dir, art_jar])
if os.path.isfile(art_jar_path):
shutil.copy(art_jar_path, deps_dir)
else:
mvn_home = project.getProperty('maven.home')
mvn_cmd = [
os.sep.join([mvn_home, 'bin', 'mvn']),
# TODO dest property is removed in 3.x, switch to the 'copy' goal
# however it does not support overriding local repo via property
# need an issue to sort this out better.
'org.apache.maven.plugins:maven-dependency-plugin:2.10:get',
'-B',
'='.join(['-Ddest', deps_dir]),
'='.join(['-DgroupId','com.nvidia']),
'='.join(['-DartifactId', art_id]),
'='.join(['-Dversion', project_version]),
'='.join(['-Dpackaging', 'jar']),
'='.join(['-Dclassifier', classifier]),
'='.join(['-Dtransitive', 'false'])
]
if urm_url:
mvn_cmd.extend(['-s', jenkins_settings])
if repo_local:
mvn_cmd.append('='.join(['-Dmaven.repo.local', repo_local]))
shell_exec(mvn_cmd)
dist_dir = os.sep.join([project_basedir, 'dist'])
with open(os.sep.join([dist_dir, 'unshimmed-common-from-spark311.txt']), 'r') as f:
from_spark311 = f.read().splitlines()
with open(os.sep.join([dist_dir, 'unshimmed-from-each-spark3xx.txt']), 'r') as f:
from_each = f.read().splitlines()
with zipfile.ZipFile(os.sep.join([deps_dir, art_jar]), 'r') as zip_handle:
if project.getProperty('should.build.conventional.jar'):
zip_handle.extractall(path=top_dist_jar_dir)
else:
zip_handle.extractall(path=os.sep.join([top_dist_jar_dir, classifier]))
# IMPORTANT unconditional extract from first to the top
if bv == buildver_list[0] and art == 'sql-plugin-api':
zip_handle.extractall(path=top_dist_jar_dir)
# TODO deprecate
namelist = zip_handle.namelist()
matching_members = []
glob_list = from_spark311 + from_each if bv == buildver_list[0] else from_each
for pat in glob_list:
new_matches = fnmatch.filter(namelist, pat)
matching_members += new_matches
zip_handle.extractall(path=top_dist_jar_dir, members=matching_members)
| spark-rapids-branch-23.10 | dist/build/package-parallel-worlds.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def do_it():
outstanding = {}
line_num = 0
for l in sys.stdin:
line_num = line_num + 1
line = l.strip()
if line.startswith("TRACK"):
parts = line.split("\t")
if (len(parts) < 3):
print("PROBLEM ON LINE %s %s"%(line_num, line))
else:
address = parts[1]
func = parts[2]
if (func == "MALLOC"):
if address in outstanding:
print("DOUBLE ALLOC: %s"%(address))
else:
outstanding[address] = line
elif (func == "FREE"):
if address in outstanding:
del outstanding[address]
else:
print("FREE WITHOUT ALLOC: %s"%(address))
else:
print("UNEXPECTED LINE %s"%(line))
print("LEAKS: %s"%(len(outstanding)))
for address in outstanding:
print(outstanding[address])
if __name__ == '__main__':
do_it()
| spark-rapids-branch-23.10 | dev/host_memory_leaks/find_leak.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of <shimplify> Ant task in Python2.7 for Jython
Simplifies the prior version-range directory system
The if=false version is run by default during the generate-sources phase. If the user is
to run shimplify to perform file modifications for converting to the new system or to add a new
shim, it is recommended albeit not required to do it in a dedicated run after running `mvn install`.
```bash
mvn clean install -DskipTests
mvn generate-sources
```
Switches:
shimplify - property passed to task attribute `if` whether to modify files
shimplify.add.base - old buildver to base the new one provided by shimplify.add.shim
shimplify.add.shim - add new shim/buildver based on the one provided by shimplify.add.base
shimplify.dirs - comma-separated list of dirs to modify, supersedes shimplify.shims
shimplify.move - property to allow moving files to canonical location, otherwise update
without moving
shimplify.overwrite - property to allow shimplify executing file changes repeatedly,
error out otherwise
shimplify.shims - comma-separated list of shims to simplify instead of all, superseded by
shimplify.dirs
shimplify.remove.shim - drop support for shim/buildver, its exclusive files are removed
shimplify.trace - property to enable trace logging
If the task attribute "if" evaluates to false shimplify does not alter files on the filesystem.
The task merely suggests to consolidate shim directories if it finds some shims that are comprised
of multiple directories not shared with other shims. The prior design expects to find only one such
directory per shim.
If the task attribute "if" evaluates to true shimplify is allowed to make changes on the filesystem.
It is expected that this is only done under a local git repo for an easy undo (otherwise making
manual backups is recommended). Undo typically involves three steps
First undo all potential renames
git restore --staged sql-plugin tests
Then undo all non-staged changes including produced by the command above.
git restore sql-plugin tests
Optionally review and remove empty directories
git clean -f -d [--dry-run]
Each shim Scala/Java file receives a comment describing all Spark builds it
belongs to. Lines are sorted by the Spark `buildver` lexicographically.
Each line is assumed to be a JSON to keep it extensible.
/*** spark-rapids-shim-json-lines
{"spark": "312"}
{"spark": "323"}
spark-rapids-shim-json-lines ***/
The canonical location of a source file shared by multiple shims is
src/main/<top_buildver_in_the_comment>
You can find all shim files for a particular shim, e.g. 312, easily by executing:
git grep '{"spark": "312"}' '*.java' '*.scala'
"""
import errno
import json
import logging
import os
import re
import subprocess
def __project():
"""
Wraps access to the implicitly provided Ant project reference[1] to reduce undefined-name
linting warnings
TODO Pass only Python types if possible
[1] https://ant.apache.org/manual/api/org/apache/tools/ant/Project.html
"""
return project
def __task():
"""
Wraps access to the implicitly provided Ant task attributes map to reduce
undefined-name linting warnings
"""
return self
def __fail(message):
"Fails this task with the error message"
__task().fail(message)
def __attributes():
"""
Wraps access to the implicitly provided Ant task attributes map to reduce
undefined-name linting warnings
TODO Pass only Python types if possible
"""
return attributes
def __ant_proj_prop(name):
"""Returns an Ant project property value as a Python string"""
prop_val = __project().getProperty(name)
return None if prop_val is None else str(prop_val)
def __ant_attr(name):
"""Returns this Ant task attribute value as a Python string"""
attr_val = __attributes().get(name)
return None if attr_val is None else str(attr_val)
def __is_enabled_property(prop_name):
"""Returns True if the required project property is set to true"""
assert prop_name is not None, "Invalid property: None"
prop_val = __ant_proj_prop(prop_name)
return str(True).lower() == prop_val
def __is_enabled_attr(attr):
"""Returns True if the required project property is set to true"""
assert attr is not None, "Invalid attribute: None"
attr_val = __ant_attr(attr)
return attr_val is not None and __is_enabled_property(attr_val)
def __csv_ant_prop_as_arr(name):
"""Splits a CSV value for a property into a list"""
prop_val = __ant_proj_prop(name)
return __csv_as_arr(prop_val)
def __csv_as_arr(str_val):
"""Splits a string CSV value into a list, returns [] if undefined or empty"""
if str_val in (None, ''):
return []
else:
return str_val.translate(None, ' ' + os.linesep).split(',')
__should_add_comment = __is_enabled_attr('if')
# should we move files?
__should_move_files = __is_enabled_property('shimplify.move')
# enable log tracing?
__should_trace = __is_enabled_property('shimplify.trace')
__add_shim_buildver = __ant_proj_prop('shimplify.add.shim')
__add_shim_base = __ant_proj_prop('shimplify.add.base')
__remove_shim_buildver = __ant_proj_prop('shimplify.remove.shim')
# allowed to overwrite the existing comment?
__should_overwrite = (__is_enabled_property('shimplify.overwrite')
or __add_shim_buildver is not None
or __remove_shim_buildver is not None)
__shim_comment_tag = 'spark-rapids-shim-json-lines'
__opening_shim_tag = '/*** ' + __shim_comment_tag
__closing_shim_tag = __shim_comment_tag + ' ***/'
__shims_arr = sorted(__csv_ant_prop_as_arr('shimplify.shims'))
__dirs_to_derive_shims = sorted(__csv_ant_prop_as_arr('shimplify.dirs'))
__all_shims_arr = sorted(__csv_ant_prop_as_arr('all.buildvers'))
__log = logging.getLogger('shimplify')
__log.setLevel(logging.DEBUG if __should_trace else logging.INFO)
__ch = logging.StreamHandler()
__ch.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
__log.addHandler(__ch)
__shim_dir_pattern = re.compile(r'spark\d{3}')
__shim_comment_pattern = re.compile(re.escape(__opening_shim_tag) +
r'\n(.*)\n' +
re.escape(__closing_shim_tag), re.DOTALL)
def __upsert_shim_json(filename, bv_list):
with open(filename, 'r') as file:
contents = file.readlines()
__delete_prior_comment_if_allowed(contents, __shim_comment_tag, filename)
shim_comment = [__opening_shim_tag]
for build_ver in bv_list:
shim_comment.append(json.dumps({'spark': build_ver}))
shim_comment.append(__closing_shim_tag)
shim_comment = map(lambda x: x + '\n', shim_comment)
__log.debug("Inserting comment %s to %s", shim_comment, filename)
package_line = next(i for i in range(len(contents)) if str(contents[i]).startswith('package'))
__log.debug("filename %s package_line_number=%d", filename, package_line)
for i in range(len(shim_comment)):
contents.insert(package_line + i, shim_comment[i])
with open(filename, 'w') as file:
file.writelines(contents)
def __delete_prior_comment_if_allowed(contents, tag, filename):
opening_shim_comment_line = None
closing_shim_comment_line = None
try:
opening_shim_comment_line = next(i for i in range(len(contents))
if tag in str(contents[i]))
shim_comment_and_below = range(opening_shim_comment_line + 1, len(contents))
closing_shim_comment_line = next(i for i in shim_comment_and_below
if tag in str(contents[i]))
except StopIteration as si_exc:
if (opening_shim_comment_line is not None) and (closing_shim_comment_line is None):
__fail("%s: no closing comment for %s:%d"
% (si_exc, filename, opening_shim_comment_line))
if opening_shim_comment_line is None:
# no work
return
if not __should_overwrite:
__fail("found shim comment from prior execution at %s:%d, use -Dshimplify.overwrite=true "
"to overwrite" % (filename, opening_shim_comment_line))
assert (opening_shim_comment_line is not None) and (closing_shim_comment_line is not None)
__log.debug("removing comments %s:%d:%d", filename, opening_shim_comment_line,
closing_shim_comment_line)
del contents[opening_shim_comment_line:(closing_shim_comment_line + 1)]
def __git_rename_or_copy(shim_file, owner_shim, from_shim=None):
__log.debug("git rename %s to the canonical dir of %s", shim_file, owner_shim)
parent_pom_dir = __ant_proj_prop('spark.rapids.source.basedir')
# sql-plugin/src/main/320+-nondb/scala/org/apache/spark/...
rel_path = os.path.relpath(shim_file, parent_pom_dir)
__log.debug("spark-rapids root dir: %s", parent_pom_dir)
__log.debug("shim file path relative to root dir: %s", rel_path)
path_parts = rel_path.split(os.sep)
owner_path_comp = "spark%s" % owner_shim
from_path_comp = None
if from_shim is not None:
# may have to update package path
from_path_comp = "spark%s" % from_shim
path_parts = [p.replace(from_path_comp, owner_path_comp) for p in path_parts]
# to enable both builds at the same time the original location should change too
# <module>/src/test/331 => <module>/src/test/spark331
path_parts[3] = owner_path_comp
new_shim_file = os.sep.join([parent_pom_dir] + path_parts)
if shim_file == new_shim_file:
__log.info("%s is already at the right location, skipping git rename", shim_file)
else:
new_shim_dir = os.path.dirname(new_shim_file)
__log.debug("creating new shim path %s", new_shim_dir)
__makedirs(new_shim_dir)
if from_path_comp is None:
shell_cmd = ['git', 'mv', shim_file, new_shim_file]
__shell_exec(shell_cmd)
else:
with open(shim_file, 'r') as src_shim_fh:
with open(new_shim_file, 'w') as dst_shim_fh:
content = src_shim_fh.read()
dst_content = content.replace(from_path_comp, owner_path_comp)
dst_shim_fh.write(dst_content)
git_add_cmd = ['git', 'add', new_shim_file]
__shell_exec(git_add_cmd)
return new_shim_file
def __shell_exec(shell_cmd):
ret_code = subprocess.call(shell_cmd)
if ret_code != 0:
__fail("failed to execute %s" % shell_cmd)
def __makedirs(new_dir):
try:
__log.debug("__makedirs %s", new_dir)
os.makedirs(new_dir)
except OSError as ose:
if not (ose.errno == errno.EEXIST and os.path.isdir(new_dir)):
raise
def task_impl():
"""Ant task entry point """
__log.info('# Starting Jython Task Shimplify #')
config_format = """# config:
# shimplify (if)=%s
# shimplify.add.base=%s
# shimplify.add.shim=%s
# shimplify.dirs=%s
# shimplify.move=%s
# shimplify.overwrite=%s
# shimplify.shims=%s
# shimplify.trace=%s"""
__log.info(config_format,
__should_add_comment,
__add_shim_base,
__add_shim_buildver,
__dirs_to_derive_shims,
__should_move_files,
__should_overwrite,
__shims_arr,
__should_trace)
__log.info("review changes and `git restore` if necessary")
buildvers_from_dirs = []
dirs2bv = {}
for prop_pattern in ["spark%s.sources", "spark%s.test.sources"]:
per_pattern_dir_map = __build_dirs_to_buildvers_map(prop_pattern)
__log.debug("Map dirs2bv = %s", per_pattern_dir_map)
__warn_shims_with_multiple_dedicated_dirs(per_pattern_dir_map)
dirs2bv.update(per_pattern_dir_map)
# restrict set of dirs to shimplify.dirs?
for dir, buildvers in dirs2bv.items():
for dir_substr in __dirs_to_derive_shims:
if dir_substr in dir:
buildvers_from_dirs += buildvers
buildvers_from_dirs_sorted_deduped = sorted(set(buildvers_from_dirs))
if len(buildvers_from_dirs_sorted_deduped) > 0:
__log.info("shimplify.dirs = %s, overriding shims from dirs: %s", __dirs_to_derive_shims,
buildvers_from_dirs_sorted_deduped)
__shims_arr[:] = buildvers_from_dirs_sorted_deduped
if __should_add_comment:
__log.info('Shim layout is being updated! Review and git commit (or restore to undo)'
'-Dshimplify=true`. New symlinks will be generated in a regular build with the '
'default -Dshimplify=false')
__shimplify_layout()
else:
__log.info('Shim layout is not updated! If desired invoke '
'`mvn generate-sources -Dshimplify=true` to manipulate shims')
__generate_symlinks()
def __generate_symlinks():
"""
link
<module>/src/<main|test>/spark<buildver>/scala/<package_path>/SomeClass.scala
<module>/target/<buildver>/generated/src/<main|test>/scala/<package_path>/SomeClass.scala
"""
buildver = __ant_proj_prop('buildver')
for src_type in ['main', 'test']:
__log.info("# generating symlinks for shim %s %s files", buildver, src_type)
__traverse_source_tree_of_all_shims(
src_type,
lambda src_type, path, build_ver_arr: __generate_symlink_to_file(buildver,
src_type,
path,
build_ver_arr))
def __traverse_source_tree_of_all_shims(src_type, func):
"""Walks src/<src_type>/sparkXYZ"""
base_dir = str(__project().getBaseDir())
src_root = os.path.join(base_dir, 'src', src_type)
for dir, subdirs, files in os.walk(src_root, topdown=True):
if dir == src_root:
subdirs[:] = [d for d in subdirs if re.match(__shim_dir_pattern, d)]
for f in files:
shim_file_path = os.path.join(dir, f)
__log.debug("processing shim comment at %s", shim_file_path)
with open(shim_file_path, 'r') as shim_file:
shim_file_txt = shim_file.read()
shim_match = __shim_comment_pattern.search(shim_file_txt)
assert shim_match is not None and shim_match.groups(), \
"no shim comment located in %s, " \
"orphan shim files should be deleted" % shim_file_path
shim_arr = shim_match.group(1).split(os.linesep)
assert len(shim_arr) > 0, "invalid empty shim comment,"\
"orphan shim files should be deleted"
build_ver_arr = map(lambda s: str(json.loads(s).get('spark')), shim_arr)
__log.debug("extracted shims %s", build_ver_arr)
assert build_ver_arr == sorted(build_ver_arr),\
"%s shim list is not properly sorted" % shim_file_path
func(src_type, shim_file_path, build_ver_arr)
def __generate_symlink_to_file(buildver, src_type, shim_file_path, build_ver_arr):
if buildver in build_ver_arr:
base_dir = str(__project().getBaseDir())
src_root = os.path.join(base_dir, 'src', src_type)
target_root = os.path.join(base_dir, 'target', "spark%s" % buildver, 'generated', 'src',
src_type)
first_build_ver = build_ver_arr[0]
__log.debug("top shim comment %s", first_build_ver)
shim_file_rel_path = os.path.relpath(shim_file_path, src_root)
expected_prefix = "spark%s%s" % (first_build_ver, os.sep)
assert shim_file_rel_path.startswith(expected_prefix), "Unexpected: %s is not prefixed " \
"by %s" % (shim_file_rel_path, expected_prefix)
shim_file_rel_path_parts = shim_file_rel_path.split(os.sep)
# drop spark3XY from spark3XY/scala/com/nvidia
target_file_parts = shim_file_rel_path_parts[1:]
target_rel_path = os.sep.join(target_file_parts)
target_shim_file_path = os.path.join(target_root, target_rel_path)
__log.debug("creating symlink %s -> %s", target_shim_file_path, shim_file_path)
__makedirs(os.path.dirname(target_shim_file_path))
if __should_overwrite:
__remove_file(target_shim_file_path)
__symlink(shim_file_path, target_shim_file_path)
def __symlink(src, target):
try:
os.symlink(src, target)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
def __remove_file(target_shim_file_path):
try:
os.remove(target_shim_file_path)
except OSError as ose:
# ignore non-exisiting files
if ose.errno != errno.ENOENT:
raise
def __shimplify_layout():
__log.info("executing __shimplify_layout")
assert ((__add_shim_buildver is None) and (__add_shim_base is None) or
(__add_shim_buildver is not None) and (__add_shim_base is not None)),\
"shimplify.add.shim cannot be specified without shimplify.add.base and vice versa"
assert __add_shim_base is None or __add_shim_base in __shims_arr,\
"shimplify.add.base is not in %s" % __shims_arr
assert __add_shim_buildver is None or __remove_shim_buildver is None,\
"Adding and deleting a shim in a single invocation is not supported!"
# map file -> [shims it's part of]
files2bv = {}
# if the user allows to overwrite / reorganize shimplified shims,
# commonly while adding or removing shims we must include new shim locations
if __should_overwrite:
for src_type in ['main', 'test']:
__traverse_source_tree_of_all_shims(
src_type,
lambda unused_src_type, shim_file_path, build_ver_arr:
__update_files2bv(files2bv, shim_file_path, build_ver_arr))
# adding a new shim?
if __add_shim_buildver is not None:
__add_new_shim_to_file_map(files2bv)
if __remove_shim_buildver is not None:
__remove_shim_from_file_map(files2bv)
for shim_file, bv_list in files2bv.items():
if len(bv_list) == 0:
if __should_move_files:
__log.info("Removing orphaned file %s", shim_file)
__shell_exec(['git', 'rm', shim_file])
else:
__log.info("Detected an orphaned shim file %s, possibly while removing a shim."
" git rm it manually or rerun with -Dshimplify.move=true",
shim_file)
else:
sorted_build_vers = sorted(bv_list)
__log.debug("calling upsert_shim_json on shim_file %s bv_list=%s", shim_file,
sorted_build_vers)
owner_shim = sorted_build_vers[0]
if owner_shim in __shims_arr:
__upsert_shim_json(shim_file, sorted_build_vers)
if __should_move_files:
__git_rename_or_copy(shim_file, owner_shim)
def __update_files2bv(files2bv, path, buildver_arr):
assert path not in files2bv.keys(), "new path %s %s should be "\
"encountered only once, current map\n%s" % (path, buildver_arr, files2bv)
__log.debug("Adding %s %s to files to shim map", path, buildver_arr)
files2bv[path] = buildver_arr
def __add_new_shim_to_file_map(files2bv):
if __add_shim_buildver not in __all_shims_arr:
__log.warning("Update pom.xml to add %s to all.buildvers", __add_shim_buildver)
if __add_shim_buildver not in __shims_arr:
# TODO should we just bail and ask the user to add to all.buildvers manually first?
__shims_arr.append(__add_shim_buildver)
# copy keys to be able to modify the original dictionary while iterating
for shim_file in set(files2bv.keys()):
bv_list = files2bv[shim_file]
if __add_shim_base in bv_list:
# adding a lookalike
# case 1) dedicated per-shim files with a spark${buildver} in the package path,
# which implies a substring like /spark332/ occurs at least twice in the path:
# CLONE the file with modifications
# case 2) otherwise simply add the new buildver to the files2bv[shimfile] mapping
#
if shim_file.count("%sspark%s%s" % (os.sep, __add_shim_base, os.sep)) > 1:
assert len(bv_list) == 1, "Per-shim file %s is expected to belong to a single "\
"shim, actual shims: %s" % (shim_file, bv_list)
new_shim_file = __git_rename_or_copy(shim_file, __add_shim_buildver,
from_shim=__add_shim_base)
# schedule new file for comment update
__log.info("Adding a per-shim file %s for %s", new_shim_file,
__add_shim_buildver)
files2bv[new_shim_file] = [__add_shim_buildver]
else:
# TODO figure out why __add_shim_buildver is unicode class, not a simple str
# and if we care
__log.info("Appending %s to %s for %s", __add_shim_buildver, bv_list, shim_file)
bv_list.append(__add_shim_buildver)
def __remove_shim_from_file_map(files2bv):
__log.info("Removing %s shim, pom.xml should be updated manually.", __remove_shim_buildver)
# copy keys to be able to modify the original dictionary while iterating
for shim_file in set(files2bv.keys()):
bv_list = files2bv[shim_file]
try:
bv_list.remove(__remove_shim_buildver)
except ValueError as ve:
# __remove_shim_buildver is not in the list
__log.debug("%s: file %s does not belong to shim %s, skipping it", ve, shim_file,
__remove_shim_buildver)
pass
def __warn_shims_with_multiple_dedicated_dirs(dirs2bv):
# each shim has at least one dedicated dir, report shims
# with multiple dedicated dirs because they can be merged
single_shim_dirs = {dir: shims[0] for dir, shims in dirs2bv.items() if len(shims) == 1}
__log.debug("shims with exclusive dirs %s", single_shim_dirs)
multi_dir_shims = {}
for dir, single_shim in single_shim_dirs.items():
if single_shim in multi_dir_shims.keys():
multi_dir_shims[single_shim].append(dir)
else:
multi_dir_shims[single_shim] = [dir]
for shim, dirs in multi_dir_shims.items():
if len(dirs) > 1:
__log.warning("Consider consolidating %s, it spans multiple dedicated directories %s",
shim, dirs)
def __build_dirs_to_buildvers_map(prop_pattern):
dirs2bv = {}
for build_ver in __all_shims_arr:
__log.debug("updating dirs2bv for %s", build_ver)
shim_dirs = __csv_ant_prop_as_arr(prop_pattern % build_ver)
for dir in shim_dirs:
if dir not in dirs2bv.keys():
dirs2bv[dir] = [build_ver]
else:
dirs2bv[dir] += [build_ver]
__log.debug("Map build_ver -> shim_dirs %s" % dirs2bv)
return dirs2bv
task_impl()
| spark-rapids-branch-23.10 | build/shimplify.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import json
import time
import os
import requests
import sys
class ClusterUtils(object):
@staticmethod
def generate_create_templ(sshKey, cluster_name, runtime, idle_timeout,
num_workers, driver_node_type, worker_node_type, cloud_provider, init_scripts, aws_zone,
printLoc=sys.stdout):
timeStr = str(int(time.time()))
uniq_name = cluster_name + "-" + timeStr
templ = {}
templ['cluster_name'] = uniq_name
print("cluster name is going to be %s" % uniq_name, file=printLoc)
templ['spark_version'] = runtime
if (cloud_provider == 'aws'):
templ['aws_attributes'] = {
"zone_id": aws_zone,
"first_on_demand": 1,
"availability": "SPOT_WITH_FALLBACK",
"spot_bid_price_percent": 100,
"ebs_volume_count": 0
}
templ['autotermination_minutes'] = idle_timeout
templ['enable_elastic_disk'] = 'false'
templ['enable_local_disk_encryption'] = 'false'
templ['node_type_id'] = worker_node_type
templ['driver_node_type_id'] = driver_node_type
templ['ssh_public_keys'] = [ sshKey ]
templ['num_workers'] = num_workers
if (init_scripts != ''):
templ['init_scripts']=[]
path_list = init_scripts.split(',')
for path in path_list:
templ['init_scripts'].append(
{
'workspace' : {
'destination' : path
}
}
)
return templ
@staticmethod
def create_cluster(workspace, jsonCreateTempl, token, printLoc=sys.stdout):
resp = requests.post(workspace + "/api/2.0/clusters/create", headers={'Authorization': 'Bearer %s' % token}, json=jsonCreateTempl)
print("create response is %s" % resp.text, file=printLoc)
clusterid = resp.json()['cluster_id']
print("cluster id is %s" % clusterid, file=printLoc)
return clusterid
@staticmethod
def wait_for_cluster_start(workspace, clusterid, token, retries=20, printLoc=sys.stdout):
p = 0
waiting = True
jsonout = None
while waiting:
time.sleep(30)
jsonout = ClusterUtils.cluster_state(workspace, clusterid, token, printLoc=printLoc)
current_state = jsonout['state']
print(clusterid + " state:" + current_state, file=printLoc)
if current_state in ['RUNNING']:
break
if current_state in ['INTERNAL_ERROR', 'SKIPPED', 'TERMINATED'] or p >= 60:
if p >= retries:
print("Waited %d times already, stopping" % p)
sys.exit(4)
p = p + 1
print("Done starting cluster", file=printLoc)
return jsonout
@staticmethod
def is_cluster_running(jsonout):
current_state = jsonout['state']
if current_state in ['RUNNING', 'RESIZING']:
return True
else:
return False
@staticmethod
def terminate_cluster(workspace, clusterid, token, printLoc=sys.stdout):
jsonout = ClusterUtils.cluster_state(workspace, clusterid, token, printLoc=printLoc)
if not ClusterUtils.is_cluster_unning(jsonout):
print("Cluster is not running", file=printLoc)
sys.exit(1)
print("Stopping cluster: " + clusterid, file=printLoc)
resp = requests.post(workspace + "/api/2.0/clusters/delete", headers={'Authorization': 'Bearer %s' % token}, json={'cluster_id': clusterid})
print("stop response is %s" % resp.text, file=printLoc)
print("Done stopping cluster", file=printLoc)
@staticmethod
def delete_cluster(workspace, clusterid, token, printLoc=sys.stdout):
print("Deleting cluster: " + clusterid, file=printLoc)
resp = requests.post(workspace + "/api/2.0/clusters/permanent-delete", headers={'Authorization': 'Bearer %s' % token}, json={'cluster_id': clusterid})
print("delete response is %s" % resp.text, file=printLoc)
print("Done deleting cluster", file=printLoc)
@staticmethod
def start_existing_cluster(workspace, clusterid, token, printLoc=sys.stdout):
print("Starting cluster: " + clusterid, file=printLoc)
resp = requests.post(workspace + "/api/2.0/clusters/start", headers={'Authorization': 'Bearer %s' % token}, json={'cluster_id': clusterid})
print("start response is %s" % resp.text, file=printLoc)
@staticmethod
def cluster_state(workspace, clusterid, token, printLoc=sys.stdout):
clusterresp = requests.get(workspace + "/api/2.0/clusters/get?cluster_id=%s" % clusterid, headers={'Authorization': 'Bearer %s' % token})
clusterjson = clusterresp.text
print("cluster response is %s" % clusterjson, file=printLoc)
jsonout = json.loads(clusterjson)
return jsonout
@staticmethod
def get_master_addr_from_json(jsonout):
master_addr = None
if ClusterUtils.is_cluster_running(jsonout):
driver = jsonout['driver']
master_addr = driver["public_dns"]
return master_addr
@staticmethod
def cluster_list(workspace, token, printLoc=sys.stdout):
clusterresp = requests.get(workspace + "/api/2.0/clusters/list", headers={'Authorization': 'Bearer %s' % token})
clusterjson = clusterresp.text
print("cluster list is %s" % clusterjson, file=printLoc)
jsonout = json.loads(clusterjson)
return jsonout
@staticmethod
def cluster_get_master_addr(workspace, clusterid, token, printLoc=sys.stdout):
jsonout = ClusterUtils.cluster_state(workspace, clusterid, token, printLoc=printLoc)
addr = ClusterUtils.get_master_addr_from_json(jsonout)
print("master addr is %s" % addr, file=printLoc)
return addr
| spark-rapids-branch-23.10 | jenkins/databricks/clusterutils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse input parameters."""
import getopt
import sys
workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
token = ''
private_key_file = "~/.ssh/id_rsa"
local_script = 'build.sh'
script_dest = '/home/ubuntu/build.sh'
source_tgz = 'spark-rapids-ci.tgz'
tgz_dest = '/home/ubuntu/spark-rapids-ci.tgz'
base_spark_pom_version = '3.2.1'
base_spark_version_to_install_databricks_jars = base_spark_pom_version
clusterid = ''
# can take comma seperated maven options, e.g., -Pfoo=1,-Dbar=2,...
mvn_opt = ''
jar_path = ''
# `spark_conf` can take comma seperated multiple spark configurations, e.g., spark.foo=1,spark.bar=2,...'
spark_conf = ''
def usage():
"""Define usage."""
print('Usage: ' + sys.argv[0] +
' -s <workspace>'
' -t <token>'
' -c <clusterid>'
' -p <privatekeyfile>'
' -l <localscript>'
' -d <scriptdestination>'
' -z <sparktgz>'
' -v <basesparkpomversion>'
' -b <mvnoptions>'
' -j <jarpath>'
' -n <skipstartingcluster>'
' -f <sparkconf>'
' -i <sparkinstallver>')
try:
opts, script_args = getopt.getopt(sys.argv[1:], 'hw:t:c:p:l:d:z:m:v:b:j:f:i:',
['workspace=',
'token=',
'clusterid=',
'private=',
'localscript=',
'dest=',
'sparktgz=',
'basesparkpomversion=',
'mvnoptions=',
'jarpath',
'sparkconf',
'sparkinstallver='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(1)
elif opt in ('-w', '--workspace'):
workspace = arg
elif opt in ('-t', '--token'):
token = arg
elif opt in ('-c', '--clusterid'):
clusterid = arg
elif opt in ('-p', '--private'):
private_key_file = arg
elif opt in ('-l', '--localscript'):
local_script = arg
elif opt in ('-d', '--dest'):
script_dest = arg
elif opt in ('-z', '--sparktgz'):
source_tgz = arg
elif opt in ('-v', '--basesparkpomversion'):
base_spark_pom_version = arg
elif opt in ('-b', '--mvnoptions'):
mvn_opt = arg
elif opt in ('-j', '--jarpath'):
jar_path = arg
elif opt in ('-f', '--sparkconf'):
spark_conf = arg
elif opt in ('-i', '--sparkinstallver'):
base_spark_version_to_install_databricks_jars = arg
print('-w is ' + workspace)
print('-c is ' + clusterid)
print('-p is ' + private_key_file)
print('-l is ' + local_script)
print('-d is ' + script_dest)
print('script_args is ' + ' '.join(script_args))
print('-z is ' + source_tgz)
print('-v is ' + base_spark_pom_version)
print('-j is ' + jar_path)
print('-f is ' + spark_conf)
print('-i is ' + base_spark_version_to_install_databricks_jars)
| spark-rapids-branch-23.10 | jenkins/databricks/params.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import sys
class Artifact:
def __init__(self, group_id, artifact_id, filename):
self.group_id = group_id
self.artifact_id = artifact_id
self.filename = filename
def __repr__(self):
return f'{self.group_id} {self.artifact_id} {self.filename}'
def define_deps(spark_version, scala_version):
hadoop_version = "3.2"
hive_version = "2.3"
spark_prefix = '----workspace'
mvn_prefix = '--maven-trees'
if spark_version.startswith('3.2'):
spark_prefix = '----workspace_spark_3_2'
elif spark_version.startswith('3.3'):
spark_prefix = '----ws_3_3'
mvn_prefix = '--mvn'
spark_suffix = f'hive-{hive_version}__hadoop-{hadoop_version}_{scala_version}'
if spark_version.startswith('3.2'):
prefix_ws_sp_mvn_hadoop = f'{spark_prefix}{mvn_prefix}--hive-{hive_version}__hadoop-{hadoop_version}'
else:
prefix_ws_sp_mvn_hadoop = f'{spark_prefix}{mvn_prefix}--hadoop3'
deps = [
# Spark
Artifact('org.apache.spark', f'spark-network-common_{scala_version}',
f'{spark_prefix}--common--network-common--network-common-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-network-shuffle_{scala_version}',
f'{spark_prefix}--common--network-shuffle--network-shuffle-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-unsafe_{scala_version}',
f'{spark_prefix}--common--unsafe--unsafe-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-launcher_{scala_version}',
f'{spark_prefix}--launcher--launcher-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-sql_{scala_version}',
f'{spark_prefix}--sql--core--core-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-catalyst_{scala_version}',
f'{spark_prefix}--sql--catalyst--catalyst-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-annotation_{scala_version}',
f'{spark_prefix}--common--tags--tags-{spark_suffix}_deploy.jar'),
Artifact('org.apache.spark', f'spark-core_{scala_version}',
f'{spark_prefix}--core--core-{spark_suffix}_deploy.jar'),
# Spark Hive Patches
Artifact('org.apache.spark', f'spark-hive_{scala_version}',
f'{spark_prefix}--sql--hive--hive-{spark_suffix}_*.jar'),
Artifact('org.apache.hive', 'hive-exec',
f'{spark_prefix}--patched-hive-with-glue--hive-exec*.jar'),
Artifact('org.apache.hive', 'hive-metastore-client-patched',
f'{spark_prefix}--patched-hive-with-glue--hive-*-patch-{spark_suffix}_deploy.jar'),
# Hive
Artifact('org.apache.hive', 'hive-serde',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.hive--hive-serde--org.apache.hive__hive-serde__*.jar'),
Artifact('org.apache.hive', 'hive-storage-api',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.hive--hive-storage-api--org.apache.hive__hive-storage-api__*.jar'),
# Parquet
Artifact('org.apache.parquet', 'parquet-hadoop',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.parquet--parquet-hadoop--org.apache.parquet__parquet-hadoop__*-databricks*.jar'),
Artifact('org.apache.parquet', 'parquet-common',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.parquet--parquet-common--org.apache.parquet__parquet-common__*-databricks*.jar'),
Artifact('org.apache.parquet', 'parquet-column',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.parquet--parquet-column--org.apache.parquet__parquet-column__*-databricks*.jar'),
Artifact('org.apache.parquet', 'parquet-format',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.parquet--parquet-format-structures--org.apache.parquet__parquet-format-structures__*-databricks*.jar'),
# Orc
Artifact('org.apache.orc', 'orc-core',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.orc--orc-core--org.apache.orc__orc-core__*.jar'),
Artifact('org.apache.orc', 'orc-shims',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.orc--orc-shims--org.apache.orc__orc-shims__*.jar'),
Artifact('org.apache.orc', 'orc-mapreduce',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.orc--orc-mapreduce--org.apache.orc__orc-mapreduce__*.jar'),
# Arrow
Artifact('org.apache.arrow', 'arrow-format',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.arrow--arrow-format--org.apache.arrow__arrow-format__*.jar'),
Artifact('org.apache.arrow', 'arrow-memory',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.arrow--arrow-memory-core--org.apache.arrow__arrow-memory-core__*.jar'),
Artifact('org.apache.arrow', 'arrow-vector',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.arrow--arrow-vector--org.apache.arrow__arrow-vector__*.jar'),
Artifact('com.google.protobuf', 'protobuf-java',
f'{prefix_ws_sp_mvn_hadoop}--com.google.protobuf--protobuf-java--com.google.protobuf__protobuf-java__*.jar'),
Artifact('com.esotericsoftware.kryo', f'kryo-shaded-db',
f'{prefix_ws_sp_mvn_hadoop}--com.esotericsoftware--kryo-shaded--com.esotericsoftware__kryo-shaded__*.jar'),
Artifact('org.apache.commons', 'commons-io',
f'{prefix_ws_sp_mvn_hadoop}--commons-io--commons-io--commons-io__commons-io__*.jar'),
Artifact('org.apache.commons', 'commons-lang3',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.commons--commons-lang3--org.apache.commons__commons-lang3__*.jar'),
Artifact('org.json4s', f'json4s-ast_{scala_version}',
f'{prefix_ws_sp_mvn_hadoop}--org.json4s--json4s-ast_{scala_version}--org.json4s__json4s-ast_{scala_version}__*.jar'),
Artifact('org.json4s', f'json4s-core_{scala_version}',
f'{prefix_ws_sp_mvn_hadoop}--org.json4s--json4s-core_{scala_version}--org.json4s__json4s-core_{scala_version}__*.jar'),
Artifact('org.json4s', f'json4s-jackson_{scala_version}',
f'{prefix_ws_sp_mvn_hadoop}--org.json4s--json4s-jackson_{scala_version}--org.json4s__json4s-jackson_{scala_version}__*.jar'),
Artifact('org.javaassist', 'javaassist',
f'{prefix_ws_sp_mvn_hadoop}--org.javassist--javassist--org.javassist__javassist__*.jar'),
Artifact('com.fasterxml.jackson.core', 'jackson-core',
f'{prefix_ws_sp_mvn_hadoop}--com.fasterxml.jackson.core--jackson-databind--com.fasterxml.jackson.core__jackson-databind__*.jar'),
Artifact('com.fasterxml.jackson.core', 'jackson-annotations',
f'{prefix_ws_sp_mvn_hadoop}--com.fasterxml.jackson.core--jackson-annotations--com.fasterxml.jackson.core__jackson-annotations__*.jar'),
Artifact('org.apache.spark', f'spark-avro_{scala_version}',
f'{spark_prefix}--vendor--avro--avro-*.jar'),
Artifact('org.apache.avro', 'avro-mapred',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.avro--avro-mapred--org.apache.avro__avro-mapred__*.jar'),
Artifact('org.apache.avro', 'avro',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.avro--avro--org.apache.avro__avro__*.jar'),
]
# log4j-core
if spark_version.startswith('3.3'):
deps += Artifact('org.apache.logging.log4j', 'log4j-core',
f'{prefix_ws_sp_mvn_hadoop}--org.apache.logging.log4j--log4j-core--org.apache.logging.log4j__log4j-core__*.jar'),
return deps
def install_deps(deps, spark_version_to_install_databricks_jars, m2_dir, jar_dir, file):
pom_xml_header = """<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.nvidia</groupId>
<artifactId>rapids-4-spark-databricks-deps-installer</artifactId>
<description>bulk databricks deps installer</description>
<version>${SPARK_PLUGIN_JAR_VERSION}</version>
<packaging>pom</packaging>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-install-plugin</artifactId>
<version>2.4</version>
<executions>
"""
print(pom_xml_header, file=file)
i = 0
for artifact in deps:
print(f'Generating an execution for {artifact}', file=sys.stderr)
group_id = artifact.group_id
artifact_id = artifact.artifact_id
filename = artifact.filename
files = glob.glob(f'{jar_dir}/{filename}')
if len(files) == 0:
raise Exception("No jar found that matches pattern {}".format(filename))
elif len(files) > 1:
raise Exception("Ambiguous filename pattern {} matches multiple files: {}".format(filename, files))
jar = files[0]
key = str(i)
i = i + 1
pom_xml_dep = f"""
<execution>
<id>install-db-jar-{key}</id>
<phase>initialize</phase>
<goals><goal>install-file</goal></goals>
<configuration>
<localRepositoryPath>{m2_dir}</localRepositoryPath>
<file>{jar}</file>
<groupId>{group_id}</groupId>
<artifactId>{artifact_id}</artifactId>
<version>{spark_version_to_install_databricks_jars}</version>
<packaging>jar</packaging>
</configuration>
</execution>"""
print(pom_xml_dep, file=file)
pom_xml_footer = """
</executions>
</plugin>
</plugins>
</build>
</project>
"""
print(pom_xml_footer, file=file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('spark_version')
parser.add_argument('spark_version_to_install_databricks_jars')
parser.add_argument('scala_version')
parser.add_argument('m2_dir')
parser.add_argument('jar_dir')
parser.add_argument('pom_filename')
args = parser.parse_args()
deps = define_deps(args.spark_version, args.scala_version)
with open(args.pom_filename, "w") as f:
install_deps(deps, args.spark_version_to_install_databricks_jars, args.m2_dir, args.jar_dir, f)
| spark-rapids-branch-23.10 | jenkins/databricks/install_deps.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clusterutils import ClusterUtils
import getopt
import sys
# This scripts create and starts a Databricks cluster and waits for it to be running.
#
# The name parameter is meant to be a unique name used when creating the cluster. Note we
# append the epoch time to the end of it to help prevent collisions.
#
# Returns cluster id to stdout, all other logs default to stderr
#
# User is responsible for removing cluster if a failure or when done with cluster.
def main():
workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
token = ''
sshkey = ''
cluster_name = 'CI-GPU-databricks-23.10.0-SNAPSHOT'
idletime = 240
runtime = '7.0.x-gpu-ml-scala2.12'
num_workers = 1
worker_type = 'g4dn.xlarge'
driver_type = 'g4dn.xlarge'
cloud_provider = 'aws'
# comma separated init scripts in Databricks workspace, e.g. /foo,/bar,...
init_scripts = ''
aws_zone='us-west-2c'
try:
opts, args = getopt.getopt(sys.argv[1:], 'hw:t:k:n:i:r:o:d:e:s:f:z:',
['workspace=', 'token=', 'sshkey=', 'clustername=', 'idletime=',
'runtime=', 'workertype=', 'drivertype=', 'numworkers=', 'cloudprovider=', 'initscripts=', 'awszone='])
except getopt.GetoptError:
print(
'create.py -w <workspace> -t <token> -k <sshkey> -n <clustername> -i <idletime> -r <runtime> -o <workernodetype> -d <drivernodetype> -e <numworkers> -s <cloudprovider> -f <initscripts> -z <awszone>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(
'create.py -w <workspace> -t <token> -k <sshkey> -n <clustername> -i <idletime> -r <runtime> -o <workernodetype> -d <drivernodetype> -e <numworkers> -s <cloudprovider> -f <initscripts> -z <awszone>')
sys.exit()
elif opt in ('-w', '--workspace'):
workspace = arg
elif opt in ('-t', '--token'):
token = arg
elif opt in ('-k', '--sshkey'):
sshkey = arg
elif opt in ('-n', '--clustername'):
cluster_name = arg
elif opt in ('-i', '--idletime'):
idletime = arg
elif opt in ('-r', '--runtime'):
runtime = arg
elif opt in ('-o', '--workertype'):
worker_type = arg
elif opt in ('-d', '--drivertype'):
driver_type = arg
elif opt in ('-e', '--numworkers'):
num_workers = arg
elif opt in ('-s', '--cloudprovider'):
cloud_provider = arg
elif opt in ('-f', '--initscripts'):
init_scripts = arg
elif opt in ('-z', '--awszone'):
aws_zone = arg
print('-w is ' + workspace, file=sys.stderr)
print('-k is ' + sshkey, file=sys.stderr)
print('-n is ' + cluster_name, file=sys.stderr)
print('-i is ' + str(idletime), file=sys.stderr)
print('-r is ' + runtime, file=sys.stderr)
print('-o is ' + worker_type, file=sys.stderr)
print('-d is ' + driver_type, file=sys.stderr)
print('-e is ' + str(num_workers), file=sys.stderr)
print('-s is ' + cloud_provider, file=sys.stderr)
print('-f is ' + init_scripts, file=sys.stderr)
print('-z is ' + aws_zone, file=sys.stderr)
if not sshkey:
print("You must specify an sshkey!", file=sys.stderr)
sys.exit(2)
if not token:
print("You must specify an token!", file=sys.stderr)
sys.exit(2)
templ = ClusterUtils.generate_create_templ(sshkey, cluster_name, runtime, idletime,
num_workers, driver_type, worker_type, cloud_provider, init_scripts, aws_zone, printLoc=sys.stderr)
clusterid = ClusterUtils.create_cluster(workspace, templ, token, printLoc=sys.stderr)
ClusterUtils.wait_for_cluster_start(workspace, clusterid, token, printLoc=sys.stderr)
# only print the clusterid to stdout so a calling script can get it easily
print(clusterid, file=sys.stdout)
if __name__ == '__main__':
main()
| spark-rapids-branch-23.10 | jenkins/databricks/create.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clusterutils import ClusterUtils
import getopt
import sys
# shutdown or delete a databricks cluster
def main():
workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
token = ''
clusterid = '0617-140138-umiak14'
delete = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'hs:t:c:d',
['workspace=', 'token=', 'clusterid=', 'delete'])
except getopt.GetoptError:
print(
'shutdown.py -s <workspace> -t <token> -c <clusterid>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(
'shutdown.py -s <workspace> -t <token> -c <clusterid>')
sys.exit()
elif opt in ('-s', '--workspace'):
workspace = arg
elif opt in ('-t', '--token'):
token = arg
elif opt in ('-c', '--clusterid'):
clusterid = arg
elif opt in ('-d', '--delete'):
delete = True
print('-s is ' + workspace)
print('-c is ' + clusterid)
if not clusterid:
print("You must specify clusterid!")
sys.exit(1)
if not token:
print("You must specify token!")
sys.exit(1)
if delete:
ClusterUtils.delete_cluster(workspace, clusterid, token)
else:
ClusterUtils.terminate_cluster(workspace, clusterid, token)
if __name__ == '__main__':
main()
| spark-rapids-branch-23.10 | jenkins/databricks/shutdown.py |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
import sys
import getopt
import time
import os
import subprocess
from clusterutils import ClusterUtils
import params
def main():
master_addr = ClusterUtils.cluster_get_master_addr(params.workspace, params.clusterid, params.token)
if master_addr is None:
print("Error, didn't get master address")
sys.exit(1)
print("Master node address is: %s" % master_addr)
print("Copying script")
rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (params.private_key_file, params.local_script, master_addr, params.script_dest)
print("rsync command: %s" % rsync_command)
subprocess.check_call(rsync_command, shell = True)
print("Copying source")
rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (params.private_key_file, params.source_tgz, master_addr, params.tgz_dest)
print("rsync command: %s" % rsync_command)
subprocess.check_call(rsync_command, shell = True)
ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s " \
"'SPARKSRCTGZ=%s BASE_SPARK_VERSION=%s BASE_SPARK_VERSION_TO_INSTALL_DATABRICKS_JARS=%s MVN_OPT=%s \
bash %s %s 2>&1 | tee buildout; if [ `echo ${PIPESTATUS[0]}` -ne 0 ]; then false; else true; fi'" % \
(master_addr, params.private_key_file, params.tgz_dest, params.base_spark_pom_version, params.base_spark_version_to_install_databricks_jars, params.mvn_opt, params.script_dest, ' '.join(params.script_args))
print("ssh command: %s" % ssh_command)
subprocess.check_call(ssh_command, shell = True)
print("Copying built tarball back")
rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" ubuntu@%s:/home/ubuntu/spark-rapids-built.tgz ./" % (params.private_key_file, master_addr)
print("rsync command to get built tarball: %s" % rsync_command)
subprocess.check_call(rsync_command, shell = True)
if __name__ == '__main__':
main()
| spark-rapids-branch-23.10 | jenkins/databricks/run-build.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload & run test script on Databricks cluster."""
import subprocess
import sys
from clusterutils import ClusterUtils
import params
def main():
"""Define main function."""
master_addr = ClusterUtils.cluster_get_master_addr(params.workspace, params.clusterid, params.token)
if master_addr is None:
print("Error, didn't get master address")
sys.exit(1)
print("Master node address is: %s" % master_addr)
print("Copying script")
rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\"" \
" %s ubuntu@%s:%s" % (params.private_key_file, params.local_script, master_addr, params.script_dest)
print("rsync command: %s" % rsync_command)
subprocess.check_call(rsync_command, shell=True)
ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s " \
"'LOCAL_JAR_PATH=%s SPARK_CONF=%s BASE_SPARK_VERSION=%s bash %s %s 2>&1 | tee testout; " \
"if [ ${PIPESTATUS[0]} -ne 0 ]; then false; else true; fi'" % \
(master_addr, params.private_key_file, params.jar_path, params.spark_conf, params.base_spark_pom_version,
params.script_dest, ' '.join(params.script_args))
print("ssh command: %s" % ssh_command)
try:
subprocess.check_call(ssh_command, shell=True)
finally:
print("Copying test report tarball back")
report_path_prefix = params.jar_path if params.jar_path else "/home/ubuntu/spark-rapids"
rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\"" \
" ubuntu@%s:%s/integration_tests/target/run_dir*/TEST-pytest-*.xml ./" % \
(params.private_key_file, master_addr, report_path_prefix)
print("rsync command: %s" % rsync_command)
subprocess.check_call(rsync_command, shell = True)
if __name__ == '__main__':
main()
| spark-rapids-branch-23.10 | jenkins/databricks/run-tests.py |
#!/usr/bin/python3
import setuptools
with open('README.md', 'r') as file:
long_description = file.read()
setuptools.setup(
name = 'python-jetson',
version = '0.0.0',
author = 'Thierry Reding',
author_email = '[email protected]',
description = 'NVIDIA Jetson utilities',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://githum.com/NVIDIA/python-jetson',
packages = setuptools.find_packages(),
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approvied :: MIT License',
'Operating System :: OS Independent',
],
scripts = [
'bin/jetson-control',
'bin/jetson-demux',
],
package_dir = { '': '.' },
package_data = {
'jetson': [ ],
}
)
| python-jetson-master | setup.py |
#!/usr/bin/python3
import enum, string, sys
from pyftdi.ftdi import Ftdi
from pyftdi.i2c import I2cController
from pyftdi.usbtools import UsbTools, UsbToolsError
class cbus_func(enum.IntEnum):
CBUS_TXDEN = 0
CBUS_PWREN = 1
CBUS_RXLED = 2
CBUS_TXLED = 3
CBUS_TXRXLED = 4
CBUS_SLEEP = 5
CBUS_CLK48 = 6
CBUS_CLK24 = 7
CBUS_CLK12 = 8
CBUS_CLK6 = 9
CBUS_IOMODE = 10
CBUS_BB_WR = 11
CBUS_BB_RD = 12
CBUS_BB = 13
'''
Determines whether or not a byte can be printed. Note that this excludes
whitespace that doesn't expand to a single character, such as tab, newline
and carriage-return.
'''
def is_printable(byte):
if chr(byte) in string.printable:
if byte >= 32:
return True
return False
'''
Prints a hexdump of the given array of bytes to a given file-like object. By
default, an ASCII representation of the hexdump will be printed next to the
hexadecimal values. This can be switched off by passing False for the
use_ascii keyword argument.
'''
def hexdump(data, use_ascii = True, file = sys.stdout):
if use_ascii:
dump = ' | '
offset = 0
for byte in data:
print(' %02x' % byte, end = '', file = file)
if use_ascii:
if is_printable(byte):
dump += chr(byte)
else:
dump += '.'
offset += 1
if offset % 16 == 0:
if use_ascii:
print(dump, end = '', file = file)
dump = ' | '
print('', file = file)
'''
Base class for exceptions in this module.
'''
class Error(Exception):
pass
'''
Represents an FTDI device that can be controlled by this utility. A device
consists of an EEPROM that can be read and written as well as a set of
virtual buttons for which press and release events can be simulated.
'''
class Device():
'''
Represents a button backed by an I2C register. A specific bit in a given
register is used to control the status of the button.
'''
class I2cButton():
def __init__(self, port, name, config, output, bit):
self.name = name
self.port = port
self.config = config
self.output = output
self.mask = 1 << bit
def press(self):
value = self.port.read_from(self.config, 1)
value[0] &= ~self.mask
self.port.write_to(self.config, value)
value = self.port.read_from(self.output, 1)
value[0] &= ~self.mask
self.port.write_to(self.output, value)
def release(self):
value = self.port.read_from(self.config, 1)
value[0] |= self.mask
self.port.write_to(self.config, value)
value = self.port.read_from(self.output, 1)
value[0] |= self.mask
self.port.write_to(self.output, value)
'''
Represents a GPIO controller found in FTDI chips. These usually have 8 or
16 pins. The implementation currently assumes 8 pins.
'''
class GpioController():
'''
Represents a single pin in the GPIO controller.
'''
class Pin():
def __init__(self, gpio, pin):
self.gpio = gpio
self.pin = pin
def set(self, value):
self.gpio.set_output(self.pin, value)
def __init__(self, ftdi):
self.ftdi = ftdi
self.direction = 0xf3
value = self._read()
value &= 0xf0
value |= 0x03
self._write(value)
def _read(self):
command = bytes([ Ftdi.GET_BITS_LOW, Ftdi.SEND_IMMEDIATE ])
self.ftdi.write_data(command)
data = self.ftdi.read_data_bytes(1, 4)
return data[0]
def _write(self, value):
command = bytes([ Ftdi.SET_BITS_LOW, value, self.direction ])
self.ftdi.write_data(command)
def set_output(self, pin, value):
mask = 1 << (pin % 8)
self.direction |= mask
data = self._read()
if value:
data &= ~mask
else:
data |= mask
self._write(data)
class CBUSGpioController:
class Pin:
def __init__(self, gpio, pin):
self.gpio = gpio
self.pin = pin
gpio.direction_output(pin)
def set(self, value):
self.gpio.set(self.pin, value)
def __init__(self, ftdi):
self.ftdi = ftdi
# default all pins to input
self.pins = 0x00
def direction_input(self, pin):
mask = 1 << pin
self.pins &= ~(mask << 4)
def direction_output(self, pin):
mask = 1 << pin
self.pins |= mask << 4
def set(self, pin, value):
mask = 1 << pin
# configure pin direction to output
if not self.pins & (mask << 4):
print('pin %u is not configured as an output' % pin)
return
if not value:
self.pins &= ~mask
else:
self.pins |= mask
self.ftdi.set_bitmode(self.pins, Ftdi.BITMODE_CBUS)
'''
Represents a button backed by a GPIO pin.
'''
class GpioButton():
def __init__(self, gpio, pin, name):
self.gpio = gpio.Pin(gpio, pin)
self.name = name
def press(self):
self.gpio.set(1)
def release(self):
self.gpio.set(0)
'''
Represents the EEPROM found on an FTDI chip. This is typically 128 bytes,
but can be larger or smaller depending on the specific chip.
'''
class Eeprom():
class Descriptor():
def __init__(self, size):
self.vendor_id = None
self.product_id = None
self.release = None
self.manufacturer = None
self.product = None
self.serial = None
self.cbus = []
self.size = size
def write(self):
data = bytearray(self.size)
data[0x00] = 0x88
data[0x01] = 0x88
if self.vendor_id:
data[0x02] = (self.vendor_id >> 0) & 0xff
data[0x03] = (self.vendor_id >> 8) & 0xff
if self.product_id:
data[0x04] = (self.product_id >> 0) & 0xff
data[0x05] = (self.product_id >> 8) & 0xff
if self.release:
data[0x06] = (self.release >> 0) & 0xff
data[0x07] = (self.release >> 8) & 0xff
data[0x08] = 0x80
data[0x09] = 500 >> 1
data[0x0a] = 0x08
data[0x0b] = 0x00
data[0x0c] = 0x00
data[0x0d] = 0x00
offset = 0x1a
if self.manufacturer:
data[0x0e] = 0x80 | offset
data[0x0f] = len(self.manufacturer) * 2 + 2
data[offset + 0] = len(self.manufacturer) * 2 + 2
data[offset + 1] = 0x03
offset += 2
for byte in self.manufacturer.encode('ASCII'):
data[offset + 0] = byte
data[offset + 1] = 0x00
offset += 2
if self.product:
data[0x10] = 0x80 | offset
data[0x11] = len(self.product) * 2 + 2
data[offset + 0] = len(self.product) * 2 + 2
data[offset + 1] = 0x03
offset += 2
for byte in self.product.encode('ASCII'):
data[offset + 0] = byte
data[offset + 1] = 0x00
offset += 2
if self.serial:
data[0x12] = 0x80 | offset
data[0x13] = len(self.serial) * 2 + 2
data[offset + 0] = len(self.serial) * 2 + 2
data[offset + 1] = 0x03
offset += 2
for byte in self.serial.encode('ASCII'):
data[offset + 0] = byte
data[offset + 1] = 0x00
offset += 2
data[offset + 0] = 0x02
data[offset + 1] = 0x03
offset += 2
checksum = Device.Eeprom.checksum(data)
data[-1] = (checksum >> 8) & 0xff
data[-2] = (checksum >> 0) & 0xff
return data
def __init__(self, ftdi):
self.ftdi = ftdi
# XXX parameterize based on chip
self.size = 128
self.valid = False
self.data = []
@staticmethod
def parse_string(data, offset, length):
length = data[offset]
type = data[offset + 1]
start = offset + 2
end = start + length - 2
return data[start:end:2].decode('ASCII')
@staticmethod
def parse(data):
desc = Device.Eeprom.Descriptor(len(data))
desc.vendor_id = (data[0x03] << 8) | data[0x02]
desc.product_id = (data[0x05] << 8) | data[0x04]
desc.release = (data[0x07] << 8) | data[0x06]
offset = data[0x0e] & 0x7f
length = data[0x0f] / 2
desc.manufacturer = Device.Eeprom.parse_string(data, offset, length)
offset = data[0x10] & 0x7f
length = data[0x11] / 2
desc.product = Device.Eeprom.parse_string(data, offset, length)
offset = data[0x12] & 0x7f
length = data[0x13] / 2
desc.cbus.append(cbus_func(data[0x14] & 0xf))
desc.cbus.append(cbus_func(data[0x14] >> 4))
desc.cbus.append(cbus_func(data[0x15] & 0xf))
desc.cbus.append(cbus_func(data[0x15] >> 4))
desc.cbus.append(cbus_func(data[0x16] & 0xf))
desc.serial = Device.Eeprom.parse_string(data, offset, length)
return desc
@staticmethod
def checksum(data):
checksum = 0xaaaa
i = 0
while i < len(data) - 2:
checksum ^= (data[i + 1] << 8) | data[i]
checksum &= 0xffff
checksum = (checksum << 1) | (checksum >> 15)
checksum &= 0xffff
i += 2
return checksum
def read(self):
self.data = bytearray()
self.erased = True
ep = self.ftdi.usb_dev
offset = 0
while offset < self.size / 2:
data = ep.ctrl_transfer(Ftdi.REQ_IN, Ftdi.SIO_REQ_READ_EEPROM, 0,
offset, 2, self.ftdi._usb_read_timeout)
if not data:
break
if data[0] != 0xff or data[1] != 0xff:
self.erased = False
self.data.extend(data)
offset += 1
checksum = Device.Eeprom.checksum(self.data)
verify = (data[1] << 8) | data[0]
if checksum != verify:
if not self.erased:
print('checksum error: expected %04x, got %04x' %
(checksum, verify))
self.valid = False
else:
self.valid = True
return self.data
def save(self, data, output):
desc = Device.Eeprom.parse(data)
output.write(desc.write())
def write(self, data):
desc = Device.Eeprom.parse(data)
ep = self.ftdi.usb_dev
offset = 0
checksum = Device.Eeprom.checksum(data)
verify = (data[-1] << 8) | data[-2]
if checksum != verify:
print('checksum error: expected %04x, got %04x' % (checksum, verify))
sys.exit(1)
while offset < self.size / 2:
value = (data[offset * 2 + 1] << 8) | data[offset * 2 + 0]
ep.ctrl_transfer(Ftdi.REQ_OUT, Ftdi.SIO_REQ_WRITE_EEPROM, value,
offset, 2, self.ftdi.usb_write_timeout)
offset += 1
def erase(self):
ep = self.ftdi.usb_dev
ep.ctrl_transfer(Ftdi.REQ_OUT, Ftdi.SIO_REQ_ERASE_EEPROM, 0, 0, 0,
self.ftdi.usb_write_timeout)
def show(self, file = sys.stdout):
desc = Device.Eeprom.parse(self.data)
print('Vendor: %04x' % desc.vendor_id, file = file)
print('Product: %04x' % desc.product_id, file = file)
print('Release: %04x' % desc.release, file = file)
print('Manufacturer:', desc.manufacturer, file = file)
print('Product:', desc.product, file = file)
print('Serial:', desc.serial, file = file)
print('CBUS:')
print(' 0:', desc.cbus[0])
print(' 1:', desc.cbus[1])
print(' 2:', desc.cbus[2])
print(' 3:', desc.cbus[3])
print(' 4:', desc.cbus[4])
def dump(self, use_ascii = True, file = sys.stdout):
hexdump(self.data, use_ascii = use_ascii, file = file)
class PowerRail():
def __init__(self, port, name, config, input, bit):
self.port = port
self.name = name
self.config = config
self.input = input
self.bit = bit
def status(self):
value = self.port.read_from(self.config, 1)
value[0] |= 0xc0;
self.port.write_to(self.config, value)
value = self.port.read_from(self.input, 1)
if value[0] & 1 << self.bit:
return False
else:
return True
def __init__(self, ftdi, device, interface):
self.ftdi = ftdi
self.device = device
self.interface = interface
self.buttons = []
self.rails = []
# XXX this is destructive, the device cannot be normally used after this
def unlock(self):
self.ftdi.open_from_device(self.device, self.interface)
self.ftdi.poll_modem_status()
self.ftdi.set_latency_timer(0x77)
class PM342(Device):
def __init__(self, ftdi, device, interface):
super().__init__(ftdi, device, interface)
self.ftdi.open_mpsse_from_device(device, interface)
self.gpio = Device.GpioController(self.ftdi)
self.eeprom = Device.Eeprom(self.ftdi)
self.i2c = I2cController()
self.i2c.set_retry_count(1)
self.i2c.configure(device)
port = self.i2c.get_port(0x74)
self.power = Device.I2cButton(port, "power", 0x7, 0x3, 4)
self.buttons.append(self.power)
self.reset = Device.I2cButton(port, "reset", 0x6, 0x2, 3)
self.buttons.append(self.reset)
self.recovery = Device.I2cButton(port, "recovery", 0x7, 0x3, 3)
self.buttons.append(self.recovery)
self.force = Device.GpioButton(self.gpio, 6, "force-off")
self.buttons.append(self.force)
self.core = Device.PowerRail(port, "core", 0x7, 0x1, 6)
self.rails.append(self.core)
self.cpu = Device.PowerRail(port, "cpu", 0x7, 0x1, 7)
self.rails.append(self.cpu)
class NanoDebug(Device):
def __init__(self, ftdi, device, interface):
super().__init__(ftdi, device, interface)
self.ftdi.open_bitbang_from_device(device, interface, direction = 0x0,
latency = 16)
self.gpio = Device.CBUSGpioController(self.ftdi)
self.eeprom = Device.Eeprom(self.ftdi)
self.reset = Device.GpioButton(self.gpio, 0, 'reset')
self.buttons.append(self.reset)
self.recovery = Device.GpioButton(self.gpio, 1, 'recovery')
self.buttons.append(self.recovery)
self.power = Device.GpioButton(self.gpio, 3, 'power')
self.buttons.append(self.power)
def open(url):
ftdi = Ftdi()
desc, interface = ftdi.get_identifiers(url)
dev = UsbTools.get_device(desc)
if desc.vid == 0x0403 and desc.pid == 0x6011:
return PM342(ftdi, dev, interface)
if desc.vid == 0x0403 and desc.pid == 0x6015:
return NanoDebug(ftdi, dev, interface)
raise Exception('Unsupported device %04x:%04x' % (desc.vid, desc.pid))
class Product():
def __init__(self, descriptor):
self.vid = descriptor.vid
self.pid = descriptor.pid
self.bus = descriptor.bus
self.address = descriptor.address
self.serial = descriptor.sn
self.index = descriptor.index
self.description = descriptor.description
def find():
supported = {
'pm342': ( 0x0403, 0x6011 ),
'nano': ( 0x0403, 0x6015 )
}
vps = supported.values()
products = []
for descriptor, interfaces in Ftdi.find_all(vps):
product = Product(descriptor)
products.append(product)
return products
# vim: set et sw=4 sts=4 ts=4:
| python-jetson-master | jetson/ftdi.py |
#!/usr/bin/python3
| python-jetson-master | jetson/__init__.py |
#!/usr/bin/python3
import binascii, fcntl, os, pty, struct, termios, tty
import selectors, time
tags = {
0xe5: 'RCE',
0xe2: 'BPMP',
0xe3: 'SCE',
0xe0: 'SPE',
0xe4: 'TZ',
0xe1: 'CCPLEX',
}
'''
Represents a TCU stream that is identified by its tag. A pseudo terminal is
created for each stream and a terminal emulator can connect to the pseudo
terminal slave to send and receive data on that stream.
'''
class Stream:
'''
Initializes the stream given its name and tag. Opens a pseudo terminal
and stores the path to the slave so that it can be reported to the user
and passed to a terminal emulator.
'''
def __init__(self, name, tag):
self.name = name
self.tag = tag
self.master, self.slave = pty.openpty()
self.path = os.ttyname(self.slave)
'''
Returns the file descriptor of the PTY master. This is required in order
for objects of this class to behave file-like.
'''
def fileno(self):
return self.master
'''
Writes a string of characters to the pseudo terminal of this stream.
'''
def write(self, data):
return os.write(self.master, data)
'''
Reads a string of characters from the pseudo terminal of this stream.
'''
def read(self, count):
return os.read(self.master, count)
'''
Small helper class to deal with TTYs.
'''
class TTY:
'''
Creates a TTY object for the TTY specified by the given path.
'''
def __init__(self, path, baudrate = 115200):
self.fd = os.open(path, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
'''
Returns the file descriptor of the TTY master. This is required in order
for objects of this class to behave file-like.
'''
def fileno(self):
return self.fd
'''
Sets the baud rate of the TTY.
'''
def set_baud_rate(self, baud_rate):
attrs = termios.tcgetattr(self.fd)
if baud_rate == 115200:
attr = termios.B115200
else:
raise Exception('Unsupported baud rate: %u' % baud_rate)
attrs[4] = attr
attrs[5] = attr
termios.tcsetattr(self.fd, termios.TCSANOW, attrs)
'''
Enables raw mode on the TTY.
'''
def set_raw(self):
tty.setraw(self.fd)
'''
Establishes an exclusive (write) lock for the TTY.
'''
def lock(self):
args = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(self.fd, fcntl.F_SETLK, args)
'''
Write a string of characters to the TTY.
'''
def write(self, data):
return os.write(self.fd, data)
'''
Read a string of characters from the TTY.
'''
def read(self, count):
return os.read(self.fd, count)
'''
Implements a demuxer for the Tegra Combined UART.
'''
class Demux:
'''
Creates a demuxer for the Tegra Combined UART found on the given TTY.
'''
def __init__(self, path):
self.tty = TTY(path)
self.streams = { }
self.escape = False
self.output = None
self.input = None
self.tty.set_baud_rate(115200)
self.tty.set_raw()
self.tty.lock()
for tag, name in tags.items():
stream = Stream(name, tag)
self.streams[tag] = stream
'''
Returns a dict object containing the TCU streams.
'''
def get_streams(self):
streams = {}
for stream in self.streams.values():
streams[stream.name] = stream.path
return streams
'''
Requests a reset of the TCU streams.
'''
def reset(self):
data = bytes([0xff, 0xfd])
self.tty.write(data)
'''
Processes output events from the TTY.
'''
def process_output(self, fileobj, mask):
if mask & selectors.EVENT_READ:
data = self.tty.read(4096)
for byte in data:
if self.escape:
if byte in self.streams:
stream = self.streams[byte]
if stream != self.output:
self.output = stream
else:
if byte == 0xfd:
print('TODO: implement reset')
else:
print('unhandled command: %02x' % byte)
self.escape = False
continue
if byte == 0xff:
self.escape = True
continue
if not self.output:
print('ERROR: data received but no stream is active: %02x' % byte)
continue
self.output.write(bytes([byte]))
'''
Processes input events for data coming from the pseudo terminal slaves.
'''
def process_input(self, fileobj, mask):
if mask & selectors.EVENT_READ:
if self.input != fileobj:
data = bytes([0xff, fileobj.tag])
self.tty.write(data)
self.input = fileobj
data = fileobj.read(4096)
try:
self.tty.write(data)
except:
time.sleep(1)
self.tty.write(data)
'''
Sets up I/O multiplexing for the TTY and all pseudo terminal slaves with
the given selector for use in an application's event loop.
'''
def select(self, selector):
for stream in self.streams.values():
selector.register(stream, selectors.EVENT_READ, self.process_input)
selector.register(self.tty, selectors.EVENT_READ, self.process_output)
| python-jetson-master | jetson/tcu.py |
import logging
import pandas
from rdkit import Chem
from rdkit.Chem import QED, Descriptors, Lipinski
from cuchem.wf.generative import MolBART
from tests.utils import _create_context
from cuchem.decorator import LipinskiRuleOfFiveDecorator
import dask.dataframe as dd
import multiprocessing
logger = logging.getLogger(__name__)
MAX_LOGP = 3
MAX_MOL_WT = 300
MAX_H_DONORS = 6
MAX_H_ACCEPTORS = 6
MAX_ROTATABLE_BONDS = 3
MAX_QED = 3
def score_molecule(smiles):
lipinski_score = 0
qed = LipinskiRuleOfFiveDecorator.MAX_QED + 1
try:
m = Chem.MolFromSmiles(smiles)
logp = Descriptors.MolLogP(m)
lipinski_score += 1 if logp < LipinskiRuleOfFiveDecorator.MAX_LOGP else 0
wt = Descriptors.MolWt(m)
lipinski_score += 1 if wt < LipinskiRuleOfFiveDecorator.MAX_MOL_WT else 0
hdonor = Lipinski.NumHDonors(m)
lipinski_score += 1 if hdonor < LipinskiRuleOfFiveDecorator.MAX_H_DONORS else 0
hacceptor = Lipinski.NumHAcceptors(m)
lipinski_score += 1 if hacceptor < LipinskiRuleOfFiveDecorator.MAX_H_DONORS else 0
rotatable_bond = Lipinski.NumRotatableBonds(m)
lipinski_score += 1 if rotatable_bond < LipinskiRuleOfFiveDecorator.MAX_ROTATABLE_BONDS else 0
qed = QED.qed(m)
except Exception as ex:
lipinski_score = 0
logger.exception(ex)
return lipinski_score, qed
def generate():
wf = MolBART()
num_to_add = 21
def _generate(data):
smiles = data['canonical_smiles']
lipinski_score, qed = score_molecule(smiles)
num_to_generate = 40
lipinski_scores = []
qed_scores = []
valid_list = []
try:
if lipinski_score >= 3 and qed <= LipinskiRuleOfFiveDecorator.MAX_QED:
generated_list = wf.find_similars_smiles_list(smiles,
num_requested=num_to_generate,
radius=0.0001)
for new_smiles in generated_list:
lipinski_score, qed = score_molecule(new_smiles)
if lipinski_score >= 3 and qed <= LipinskiRuleOfFiveDecorator.MAX_QED:
valid_list.append(new_smiles)
lipinski_scores.append(lipinski_score)
qed_scores.append(qed)
if len(valid_list) >= num_to_add:
break
except Exception as ex:
pass
valid_list += [''] * ((num_to_add) - len(valid_list))
lipinski_scores += [0] * (num_to_add - len(lipinski_scores))
qed_scores += [0] * (num_to_add - len(qed_scores))
return valid_list + lipinski_scores + qed_scores
data = pandas.read_csv('/workspace/tests/data/benchmark_approved_drugs.csv')
prop_meta = dict(zip([i for i in range(num_to_add)],
[pandas.Series([], dtype='object') for i in range(num_to_add)]))
prop_meta.update(dict(zip([num_to_add + i for i in range(num_to_add)],
[pandas.Series([], dtype='int8') for i in range(num_to_add)])))
prop_meta.update(dict(zip([(2 * num_to_add) + i for i in range(num_to_add)],
[pandas.Series([], dtype='float64') for i in range(num_to_add)])))
meta_df = pandas.DataFrame(prop_meta)
_create_context()
ddf = dd.from_pandas(data, npartitions=4 * multiprocessing.cpu_count())
ddf = ddf.map_partitions(
lambda dframe: dframe.apply(_generate, result_type='expand', axis=1),
meta=meta_df)
ddf = ddf.compute(scheduler='processes')
ddf.to_csv("/workspace/similar_mols.csv")
if __name__ == '__main__':
generate()
| cheminformatics-master | misc/generate_mols.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from tritonclient.utils import *
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
import numpy as np
model_name = "molbart"
smiles = 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C'
with httpclient.InferenceServerClient("localhost:8000") as client:
input0_data = np.array([smiles]).astype(np.object)
inputs = [
httpclient.InferInput("INPUT0", input0_data.shape,
np_to_triton_dtype(input0_data.dtype)),
]
inputs[0].set_data_from_numpy(input0_data)
outputs = [
httpclient.InferRequestedOutput("OUTPUT0"),
]
response = client.infer(model_name,
inputs,
request_id=str(1),
outputs=outputs)
result = response.get_response()
print("INPUT0 ({}) + = OUTPUT0 ({})".format(
input0_data, response.as_numpy("OUTPUT0"))) | cheminformatics-master | misc/triton/molbart/client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import sys
import json
import os
import shutil
from subprocess import run, Popen
from typing import List
import numpy as np
import pandas as pd
import torch
from functools import singledispatch
import logging
import pandas as pd
from typing import List
import torch
import torch.nn
import pickle
from pathlib import Path
import numpy as np
from functools import partial
from rdkit import Chem
from rdkit.Chem import Draw, PandasTools
import triton_python_backend_utils as pb_utils
CDDD_DEFAULT_MODLE_LOC = '/models/cddd'
@singledispatch
def add_jitter(embedding, radius, cnt):
return NotImplemented
@add_jitter.register(np.ndarray)
def _(embedding, radius, cnt):
noise = np.random.normal(0, radius, (cnt,) + embedding.shape)
return noise + embedding
@add_jitter.register(torch.Tensor)
def _(embedding, radius, cnt):
permuted_emb = embedding.permute(1, 0, 2)
noise = torch.normal(0, radius, (cnt,) + permuted_emb.shape[1:]).to(embedding.device)
return noise + permuted_emb
class TritonPythonModel:
def initialize(self, args):
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(
model_config, "OUTPUT0")
# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config['data_type'])
max_seq_len = 64
self.download_cddd_models()
tokenizer_path = '/models/molbart/mol_opt_tokeniser.pickle'
model_chk_path = '/models/molbart/az_molbart_pretrain.ckpt'
if torch.cuda.is_available():
self.device = 'cuda'
else:
self.device = 'cpu'
self.tokenizer = self.load_tokenizer(tokenizer_path)
self.bart_model = self.load_model(model_chk_path, self.tokenizer, max_seq_len)
self.bart_model.to('cuda')
def execute(self, requests):
output0_dtype = self.output0_dtype
responses = []
for request in requests:
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
input_smiles = in_0.as_numpy()[0].decode()
print('processing', input_smiles)
generated_smiles, neighboring_embeddings, pad_mask = \
self.find_similars_smiles_list(input_smiles,
num_requested=10,
force_unique=True)
out_0 = np.array(generated_smiles).astype(np.object)
out_tensor_0 = pb_utils.Tensor("OUTPUT0",
out_0.astype(output0_dtype))
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0])
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
def load_tokenizer(self, tokenizer_path):
"""Load pickled tokenizer
Params:
tokenizer_path: str, path to pickled tokenizer
Returns:
MolEncTokenizer tokenizer object
"""
tokenizer_path = Path(tokenizer_path)
with open(tokenizer_path, 'rb') as fh:
tokenizer = pickle.load(fh)
return tokenizer
def load_model(self, model_checkpoint_path, tokenizer, max_seq_len):
"""Load saved model checkpoint
Params:
model_checkpoint_path: str, path to saved model checkpoint
tokenizer: MolEncTokenizer tokenizer object
max_seq_len: int, maximum sequence length
Returns:
MolBART trained model
"""
from molbart.models import BARTModel
from molbart.decode import DecodeSampler
sampler = DecodeSampler(tokenizer, max_seq_len)
pad_token_idx = tokenizer.vocab[tokenizer.pad_token]
bart_model = BARTModel.load_from_checkpoint(model_checkpoint_path,
decode_sampler=sampler,
pad_token_idx=pad_token_idx)
bart_model.sampler.device = "cuda"
return bart_model.cuda()
def smiles2embedding(self, bart_model, smiles, tokenizer, pad_length=None):
"""Calculate embedding and padding mask for smiles with optional extra padding
Params
smiles: string, input SMILES molecule
tokenizer: MolEncTokeniser tokenizer object
pad_length: optional extra
Returns
embedding array and boolean mask
"""
assert isinstance(smiles, str)
if pad_length:
assert pad_length >= len(smiles) + 2
tokens = tokenizer.tokenise([smiles], pad=True)
# Append to tokens and mask if appropriate
if pad_length:
for i in range(len(tokens['original_tokens'])):
n_pad = pad_length - len(tokens['original_tokens'][i])
tokens['original_tokens'][i] += [tokenizer.pad_token] * n_pad
tokens['pad_masks'][i] += [1] * n_pad
token_ids = torch.tensor(tokenizer.convert_tokens_to_ids(tokens['original_tokens'])).cuda().T
pad_mask = torch.tensor(tokens['pad_masks']).bool().cuda().T
encode_input = {"encoder_input": token_ids, "encoder_pad_mask": pad_mask}
embedding = bart_model.encode(encode_input)
torch.cuda.empty_cache()
return embedding, pad_mask
def inverse_transform(self, embeddings, k=1, mem_pad_mask=None):
smiles_interp_list = []
batch_size = 1 # TODO: parallelize this loop as a batch
for memory in embeddings:
decode_fn = partial(self.bart_model._decode_fn,
mem_pad_mask=mem_pad_mask,
memory=memory)
mol_strs, log_lhs = self.bart_model.sampler.beam_decode(decode_fn,
batch_size=batch_size,
k=k)
mol_strs = sum(mol_strs, []) # flatten list
for smiles in mol_strs:
mol = Chem.MolFromSmiles(smiles)
if (mol is not None) and (smiles not in smiles_interp_list):
smiles_interp_list.append(smiles)
break
return smiles_interp_list
def find_similars_smiles_list(self,
smiles:str,
num_requested:int=10,
radius=0.0001,
force_unique=False):
embedding, pad_mask = self.smiles2embedding(self.bart_model,
smiles,
self.tokenizer)
neighboring_embeddings = self.addjitter(embedding, radius, cnt=num_requested)
generated_mols = self.inverse_transform(neighboring_embeddings, k=1, mem_pad_mask=pad_mask.bool().cuda())
generated_mols = [smiles] + generated_mols
return generated_mols, neighboring_embeddings, pad_mask
def download_cddd_models(self, target_dir=CDDD_DEFAULT_MODLE_LOC):
"""
Downloads CDDD model
"""
if os.path.exists(os.path.join(target_dir, 'default_model', 'hparams.json')):
print('Directory already exists. To re-download please delete', target_dir)
return os.path.join(target_dir, 'default_model')
else:
shutil.rmtree(os.path.join(target_dir, 'default_model'), ignore_errors=True)
download_script = '/opt/cddd/download_default_model.sh'
if not os.path.exists(download_script):
download_script = '/tmp/download_default_model.sh'
run(['bash', '-c',
'wget --quiet -O %s https://raw.githubusercontent.com/jrwnter/cddd/master/download_default_model.sh && chmod +x %s' % (download_script, download_script)])
run(['bash', '-c',
'mkdir -p %s && cd %s; %s' % (target_dir, target_dir, download_script)],
check=True)
return os.path.join(target_dir, 'default_model')
def addjitter(self,
embedding,
radius,
cnt=1):
return add_jitter(embedding, radius, cnt)
def _compute_radius(self, scaled_radius):
if scaled_radius:
return float(scaled_radius * self.min_jitter_radius)
else:
return self.min_jitter_radius
def compute_unique_smiles(self,
interp_df,
embeddings,
embedding_funct,
scaled_radius=0.5):
"""
Identify duplicate SMILES and distorts the embedding. The input df
must have columns 'SMILES' and 'Generated' at 0th and 1st position.
'Generated' colunm must contain boolean to classify SMILES into input
SMILES(False) and generated SMILES(True).
This function does not make any assumptions about order of embeddings.
Instead it simply orders the df by SMILES to identify the duplicates.
"""
distance = self._compute_radius(scaled_radius)
for i in range(5):
smiles = interp_df['SMILES'].sort_values()
duplicates = set()
for idx in range(0, smiles.shape[0] - 1):
if smiles.iat[idx] == smiles.iat[idx + 1]:
duplicates.add(smiles.index[idx])
duplicates.add(smiles.index[idx + 1])
if len(duplicates) > 0:
for dup_idx in duplicates:
if interp_df.iat[dup_idx, 1]:
# add jitter to generated molecules only
embeddings[dup_idx] = self.addjitter(
embeddings[dup_idx], distance, 1)
smiles = embedding_funct(embeddings)
else:
break
# Ensure all generated molecules are valid.
for i in range(5):
PandasTools.AddMoleculeColumnToFrame(interp_df,'SMILES')
invalid_mol_df = interp_df[interp_df['ROMol'].isnull()]
if not invalid_mol_df.empty:
invalid_index = invalid_mol_df.index.to_list()
for idx in invalid_index:
embeddings[idx] = self.addjitter(embeddings[idx],
distance,
cnt=1)
smiles = embedding_funct(embeddings)
else:
break
# Cleanup
if 'ROMol' in interp_df.columns:
interp_df = interp_df.drop('ROMol', axis=1)
return interp_df | cheminformatics-master | misc/triton/molbart/model.py |
#!/usr/bin/env python
#
# Copyright 2019-2020 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Script to check for NVIDIA Copyright headers in source files and
add optionally copyright header to them.
"""
import argparse
import io
import os
import re
import stat
from subprocess import check_output
import sys
cpp_exts = {".hpp", ".cpp", ".cu", ".cuh", ".cc", ".h", ".c"}
cpp_copyright = r"""/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
other_copyright = r"""#
# Copyright 2019-2020 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
def get_tracked_files(repo_root):
"""
Get list of files tracked by git.
Args:
repo_root - Root folder for git
"""
os.chdir(repo_root)
branch = check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode('utf-8')
branch = branch.replace('\n', '')
filelist = check_output(["git", "ls-tree", "-r", branch, "--name-only"])
tracked_files = list()
for tracked_file in filelist.splitlines():
tracked_files.append(os.path.join(repo_root, tracked_file.decode('utf-8')))
return tracked_files
def filter_files(files, regex):
"""
Filter a list of file names by removing entries that match
the regular expression passed in.
Args:
files - List of file names
regex - Regular expression for filtering files
"""
filtered_files = []
for fname in files:
if (not os.path.isfile(fname)):
continue
elif (not re.search(regex, fname)):
filtered_files.append(fname)
return filtered_files
def add_copyright(f):
"""
Add copyright header to the file passed in.
Args:
f - Path to file
"""
extension = os.path.splitext(f)[1]
original_permissions = os.stat(f).st_mode
temp_f = f + ".copyright"
with open(temp_f, "w") as myfile:
copyright_text = cpp_copyright if extension in cpp_exts else other_copyright
with open(f, "r") as orig:
line = orig.readline()
if (line and '#!' in line):
# If file is a script and starts
# with #!, then keep that line
# as the first line and insert header
# after that.
myfile.write(line)
myfile.write("\n")
myfile.write(copyright_text)
line = orig.readline()
else:
myfile.write(copyright_text)
while line:
myfile.write(line)
line = orig.readline()
os.rename(temp_f, f)
# Resotre original file permissions
os.chmod(f, stat.S_IMODE(original_permissions))
def copyright_present(f):
"""
Check if file already has copyright header.
Args:
f - Path to file
"""
with io.open(f, "r", encoding="utf-8") as fh:
return re.search('Copyright (20[0-9][0-9]-)?20[0-9][0-9] NVIDIA CORPORATION', fh.read())
def parse_args():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(
description="Add NVIDIA copyright headers to source files")
parser.add_argument('--add-header',
help='Add copyright header to the files which do not have it',
action='store_true')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# Get list of files to check.
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.dirname(os.path.dirname(script_dir)))
files = get_tracked_files(root_dir)
files = filter_files(files, r"LICENSE|README|VERSION|data\/|docs\/")
# Git list of files missing headers.
missing_header_files = [f for f in files if not copyright_present(f)]
# Check/Add headers if need be.
if (args.add_header):
[add_copyright(f) for f in missing_header_files]
else:
if (len(missing_header_files) > 0):
print("List of files missing copyright headers - ")
print("\n".join(missing_header_files))
sys.exit(1)
| cheminformatics-master | ci/checks/check_copyright.py |
#!/usr/bin/env python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import atexit
import logging
import warnings
import argparse
from datetime import datetime
from dask.distributed import Client, LocalCluster
from cuchemcommon.context import Context
from cuchemcommon.data.helper.chembldata import ChEmblData
from cuchemcommon.data.cluster_wf import FINGER_PRINT_FILES
from cuchemcommon.fingerprint import MorganFingerprint, Embeddings
from cuchemcommon.utils.logger import initialize_logfile, log_results
from cuchem.utils.dask import initialize_cluster
warnings.filterwarnings('ignore', 'Expected ')
warnings.simplefilter('ignore')
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cuchem.cheminformatics')
formatter = logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s]: %(message)s')
client = None
cluster = None
@atexit.register
def closing():
if cluster:
cluster.close()
if client:
client.close()
class Launcher(object):
"""
Application launcher. This class can execute the workflows in headless (for
benchmarking and testing) and with UI.
"""
def __init__(self):
parser = argparse.ArgumentParser(
description='Nvidia Cheminformatics',
usage='''
start <command> [<args>]
Following commands are supported:
cache : Create cache
analyze : Start Jupyter notebook in a container
service : Start in service mode
grpc : Start in grpc service
To start dash:
./start analyze
To create cache:
./start cache -p
To start dash:
./start service
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def cache(self):
"""
Create Cache
"""
context = Context()
data_dir = context.get_config('data_mount_path', default='/data')
cache_dir = os.path.join(data_dir, 'cache')
parser = argparse.ArgumentParser(description='Create cache')
parser.add_argument('-ct', '--cache_type',
dest='cache_type',
type=str,
default='MorganFingerprint',
choices=['MorganFingerprint','Embeddings'],
help='Type of data preprocessing (MorganFingerprint or Embeddings)')
parser.add_argument('-c', '--cache_directory',
dest='cache_directory',
type=str,
default=cache_dir,
help='Location to create fingerprint cache')
parser.add_argument('--batch_size',
dest='batch_size',
type=int,
default=100000,
help='Chunksize.')
parser.add_argument('--n_cpu',
dest='n_cpu',
type=int,
default=12,
help='Number of CPU workers to use')
parser.add_argument('-d', '--debug',
dest='debug',
action='store_true',
default=False,
help='Show debug message')
parser.add_argument('-m', '--n_mol',
dest='n_mol',
type=int,
default=-1,
help='Number of molecules for analysis. Use negative numbers for using the whole dataset.')
args = parser.parse_args(sys.argv[2:])
if args.debug:
logger.setLevel(logging.DEBUG)
cluster = LocalCluster(dashboard_address=':9001',
n_workers=args.n_cpu,
threads_per_worker=4)
client = Client(cluster)
with client:
task_start_time = datetime.now()
if not os.path.exists(args.cache_directory):
logger.info('Creating folder %s...' % args.cache_directory)
os.makedirs(args.cache_directory)
if (args.cache_type == 'MorganFingerprint'):
prepocess_type = MorganFingerprint
elif (args.cache_type == 'Embeddings'):
prepocess_type = Embeddings
chem_data = ChEmblData(fp_type=prepocess_type)
chem_data.save_fingerprints(
os.path.join(args.cache_directory, FINGER_PRINT_FILES), num_recs = args.n_mol,
batch_size=args.batch_size)
logger.info('Fingerprint generated in (hh:mm:ss.ms) {}'.format(
datetime.now() - task_start_time))
def service(self):
"""
Start services
"""
parser = argparse.ArgumentParser(description='Service')
parser.add_argument('-d', '--debug',
dest='debug',
action='store_true',
default=False,
help='Show debug message')
args = parser.parse_args(sys.argv[2:])
if args.debug:
logger.setLevel(logging.DEBUG)
from waitress import serve
from api import app
Context()
# port = context.get_config('plotly_port', 6000)
port = 8081
serve(app, host='0.0.0.0', port=port)
def grpc(self):
"""
Start services
"""
parser = argparse.ArgumentParser(description='Service')
parser.add_argument('-p', '--port',
dest='port',
type=int,
default=50051,
help='GRPC server Port')
parser.add_argument('-d', '--debug',
dest='debug',
action='store_true',
default=False,
help='Show debug message')
args = parser.parse_args(sys.argv[2:])
if args.debug:
logger.setLevel(logging.DEBUG)
sys.path.insert(0, "generated")
import grpc
import similaritysampler_pb2_grpc
from concurrent import futures
from cuchem.cheminformatics.grpc import SimilaritySampler
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
similaritysampler_pb2_grpc.add_SimilaritySamplerServicer_to_server(SimilaritySampler(), server)
server.add_insecure_port(f'[::]:{args.port}')
server.start()
server.wait_for_termination()
def analyze(self):
"""
Start analysis
"""
parser = argparse.ArgumentParser(description='Analyze')
parser.add_argument('--cpu',
dest='cpu',
action='store_true',
default=False,
help='Use CPU')
parser.add_argument('-b', '--benchmark',
dest='benchmark',
action='store_true',
default=False,
help='Execute for benchmark')
parser.add_argument('-p', '--pca_comps',
dest='pca_comps',
type=int,
default=64,
help='Number of PCA components')
parser.add_argument('-n', '--num_clusters',
dest='num_clusters',
type=int,
default=7,
help='Numer of clusters')
parser.add_argument('-c', '--cache_directory',
dest='cache_directory',
type=str,
default=None,
help='Location to pick fingerprint from')
parser.add_argument('-m', '--n_mol',
dest='n_mol',
type=int,
default=10000,
help='Number of molecules for analysis. Use negative numbers for using the whole dataset.')
parser.add_argument('--batch_size',
dest='batch_size',
type=int,
default=100000,
help='Chunksize.')
parser.add_argument('-o', '--output_dir',
dest='output_dir',
default=".",
type=str,
help='Output directory for benchmark results')
parser.add_argument('--n_gpu',
dest='n_gpu',
type=int,
default=-1,
help='Number of GPUs to use')
parser.add_argument('--n_cpu',
dest='n_cpu',
type=int,
default=12,
help='Number of CPU workers to use')
parser.add_argument('-d', '--debug',
dest='debug',
action='store_true',
default=False,
help='Show debug message')
args = parser.parse_args(sys.argv[2:])
if args.debug:
logger.setLevel(logging.DEBUG)
benchmark_file = initialize_logfile()
client = initialize_cluster(not args.cpu,
n_cpu=args.n_cpu,
n_gpu=args.n_gpu)
# Set the context
context = Context()
context.dask_client = client
context.is_benchmark = args.benchmark
context.benchmark_file = benchmark_file
context.cache_directory = args.cache_directory
context.n_molecule = args.n_mol
context.batch_size = args.batch_size
if args.cpu:
context.compute_type = 'cpu'
else:
logger.info('Number of workers %d.', len(client.scheduler_info()['workers'].keys()))
start_time = datetime.now()
task_start_time = datetime.now()
n_molecules = args.n_mol
if not args.cpu:
from cuchem.wf.cluster.gpukmeansumap import GpuKmeansUmapHybrid
workflow = GpuKmeansUmapHybrid(n_molecules=n_molecules,
pca_comps=args.pca_comps,
n_clusters=args.num_clusters)
else:
from cuchem.wf.cluster.cpukmeansumap import CpuKmeansUmap
workflow = CpuKmeansUmap(n_molecules=n_molecules,
pca_comps=args.pca_comps,
n_clusters=args.num_clusters)
mol_df = workflow.cluster()
if args.benchmark:
workflow.compute_qa_matric()
if not args.cpu:
mol_df = mol_df.compute()
n_workers = args.n_gpu
else:
n_workers = args.n_cpu
n_molecules = workflow.n_molecules
runtime = datetime.now() - task_start_time
logger.info('Runtime workflow (hh:mm:ss.ms) {}'.format(runtime))
log_results(task_start_time, context.compute_type, 'workflow',
runtime, n_molecules, n_workers, metric_name='',
metric_value='', benchmark_file=benchmark_file)
runtime = datetime.now() - start_time
logger.info('Runtime Total (hh:mm:ss.ms) {}'.format(runtime))
log_results(task_start_time, context.compute_type, 'total',
runtime, n_molecules, n_workers, metric_name='',
metric_value='', benchmark_file=benchmark_file)
else:
port = context.get_config('plotly_port', 5000)
logger.info("Starting interactive visualization...")
from cuchem.interactive.chemvisualize import ChemVisualization
v = ChemVisualization(workflow)
logger.info('navigate to https://localhost: %s' % port)
v.start('0.0.0.0', port=port)
def main():
Launcher()
if __name__ == '__main__':
main()
| cheminformatics-master | cuchem/startdash.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import os
import pandas as pd
import numpy as np
from math import isnan
from scipy.stats import rankdata as rankdata_cpu
from scipy.stats import spearmanr as spearmanr_cpu
from sklearn.metrics import silhouette_score
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.DataManip.Metric import GetTanimotoDistMat
import cupy
import cudf
from cuml.metrics import pairwise_distances
# Define paths
_this_directory = os.path.dirname(os.path.realpath(__file__))
_parent_directory = os.path.dirname(_this_directory)
sys.path.insert(0, _parent_directory) # TODO is there a better way to add nvidia directory to the path
from cuchem.utils.metrics import batched_silhouette_scores, rankdata, get_kth_unique_value, \
corr_pairwise, spearmanr
from cuchem.utils.distance import tanimoto_calculate
_data_dir = os.path.join(_this_directory, 'data')
benchmark_approved_drugs_path = os.path.join(_data_dir, 'benchmark_approved_drugs.csv')
fingerprint_approved_drugs_path = os.path.join(_data_dir, 'fingerprints_approved_drugs.csv')
pca_approved_drugs_path = os.path.join(_data_dir, 'pca_approved_drugs.csv')
# Test parameters
run_tanimoto_params = [(benchmark_approved_drugs_path, 'canonical_smiles')]
run_silhouette_score_params = [(pca_approved_drugs_path, 'clusters')]
run_rankdata_params = [(10, 10, 5, 0), (10, 10, 5, 1), (10, 20, 10, 0), (10, 20, 10, 1)]
run_corr_pairwise = [(10, 10, 5, 0), (10, 10, 5, 2), (10, 20, 10, 0), (10, 20, 10, 5)]
run_get_kth_unique_value_params = [(10, 10, 5, 2, 0), (10, 10, 5, 2, 1),
(10, 20, 10, 5, 0), (10, 20, 10, 5, 1), (10, 20, 10, 100, 1)]
run_spearman_rho_params = [(pca_approved_drugs_path, fingerprint_approved_drugs_path, 'clusters', 2, 100)]
# Accessory functions
def _random_nans(data1, data2, num_nans):
"""Randomly add NaNs in identical positions to two numpy arrays"""
n_rows, n_cols = data1.shape
row_array = np.random.choice(np.arange(0, n_rows), num_nans)
col_array = np.random.choice(np.arange(0, n_cols), num_nans)
data1[row_array, col_array] = np.NaN
data2[row_array, col_array] = np.NaN
return data1, data2
def _rowwise_numpy_corr(data1, data2, func):
"""Pariwise correlation function on CPU"""
corr_array = []
for d1, d2 in zip(data1, data2):
mask = np.invert(np.isnan(d1) | np.isnan(d2))
val = func(d1[mask], d2[mask])
if hasattr(val, 'correlation'):
val = val.correlation
if hasattr(val, '__len__'):
val = val[1, 0]
corr_array.append(val)
return np.array(corr_array)
def _get_kth_unique_array_cpu(data, k, axis):
"""Return kth unique values for a sorted array along row/column"""
data = data.T if axis == 0 else data
kth_values = []
for vector in data:
pos = 0
prev_val = np.NaN
for val in vector:
if not isnan(val):
if val != prev_val:
prev_val = val
pos += 1
if pos == k:
break
kth_values.append(prev_val)
return np.array(kth_values)
# The unit tests
@pytest.mark.parametrize('benchmark_data_csv, column_name', run_tanimoto_params)
def test_run_tanimoto(benchmark_data_csv, column_name):
"""Validate tanimoto distance calculation"""
# Load data and calculate Morgan Fingerprints
smiles_data = pd.read_csv(benchmark_data_csv)[column_name]
mol_data = [Chem.MolFromSmiles(x) for x in smiles_data]
morganfp_data = [AllChem.GetMorganFingerprintAsBitVect(x, 2) for x in mol_data]
# RDKit Tanimoto distance on CPU is the baseline
tanimoto_dist_rdkit = GetTanimotoDistMat(morganfp_data)
# Compare to GPU version
idx = np.tril_indices(len(morganfp_data), k=-1)
fparray = [x.ToBitString() for x in morganfp_data]
fparray = [np.array(list(x)).astype(np.int) for x in fparray]
tanimoto_dist_gpu = tanimoto_calculate(cupy.array(fparray), calc_distance=True)
tanimoto_dist_gpu = cupy.asnumpy(tanimoto_dist_gpu)[idx]
assert np.allclose(cupy.asnumpy(tanimoto_dist_gpu), tanimoto_dist_rdkit)
@pytest.mark.parametrize('pca_approved_csv, cluster_column', run_silhouette_score_params)
def test_run_silhouette_score(pca_approved_csv, cluster_column):
"""Validate the silhouette score"""
pca_data = pd.read_csv(pca_approved_csv).set_index('molregno')
clusters = pca_data[cluster_column]
pca_data.drop(cluster_column, axis=1, inplace=True)
score_cpu = silhouette_score(pca_data, clusters)
# TODO copy pca_data or ensure it doesn't modify original
n_data = pca_data.shape[0]
score_gpu1 = batched_silhouette_scores(pca_data, clusters, batch_size=n_data)
score_gpu2 = batched_silhouette_scores(cudf.DataFrame(
pca_data), cudf.Series(clusters), batch_size=n_data)
assert np.allclose(score_cpu, score_gpu1) & np.allclose(score_cpu, score_gpu2)
@pytest.mark.parametrize('n_rows, n_cols, max_int, axis', run_rankdata_params)
def test_run_rankdata(n_rows, n_cols, max_int, axis):
"""Test the GPU ranking function relative to the CPU baseline"""
# TODO Add tests for ranking with NaNs once it's fixed in cuDF
# Use integers to ensure there will be ties
data = np.random.randint(0, max_int, (n_rows, n_cols)).astype(np.float)
rank_cpu = rankdata_cpu(data, axis=axis)
rank_gpu = rankdata(cupy.asarray(data), axis=axis)
assert np.allclose(rank_cpu, cupy.asnumpy(rank_gpu))
if n_rows == n_cols:
data2 = data * data.T
rank_cpu2 = rankdata_cpu(data2, axis=axis)
rank_gpu2 = rankdata(cupy.asarray(data2), axis=axis, is_symmetric=True)
assert np.allclose(rank_cpu2, cupy.asnumpy(rank_gpu2))
@pytest.mark.parametrize('n_rows, n_cols, max_int, num_nans', run_corr_pairwise)
def test_run_corr_pairwise(n_rows, n_cols, max_int, num_nans):
"""Test the pairwise covariance matrix calculation and the Pearson correlation coefficient"""
data1c = np.random.randint(0, max_int, (n_rows, n_cols)).astype(np.float)
data2c = np.random.randint(0, max_int, (n_rows, n_cols)).astype(np.float)
if num_nans > 0:
data1c, data2c = _random_nans(data1c, data2c, num_nans)
data1g = cupy.array(data1c)
data2g = cupy.array(data2c)
# Covariance matrix
cov_cpu = _rowwise_numpy_corr(data1c, data2c, np.cov)
cov_gpu = corr_pairwise(data1g, data2g, False).squeeze()
assert np.allclose(cov_cpu, cupy.asnumpy(cov_gpu), equal_nan=True)
# Pearson correlation
corcoef_cpu = _rowwise_numpy_corr(data1c, data2c, np.corrcoef)
corcoef_gpu = corr_pairwise(data1g, data2g, True).squeeze()
assert np.allclose(corcoef_cpu, cupy.asnumpy(corcoef_gpu), equal_nan=True)
@pytest.mark.parametrize('n_rows, n_cols, max_int, top_k, axis', run_get_kth_unique_value_params)
def test_run_get_kth_unique_value(n_rows, n_cols, max_int, top_k, axis):
"""Test the GPU function to get the kth unique value relative to the CPU baseline"""
data = np.random.randint(0, max_int, (n_rows, n_cols)).astype(np.float)
data = rankdata_cpu(data, axis=axis)
data.sort(axis=axis)
# Test without NaNs
kth_values_cpu = _get_kth_unique_array_cpu(data, top_k, axis)
kth_values_gpu = get_kth_unique_value(cupy.array(data), top_k, axis=axis).squeeze()
assert np.allclose(kth_values_cpu, cupy.asnumpy(kth_values_gpu), equal_nan=True)
# And with NaNs
np.fill_diagonal(data, np.NaN)
data[2, :] = np.NaN
kth_values_cpu = _get_kth_unique_array_cpu(data, top_k, axis)
kth_values_gpu = get_kth_unique_value(cupy.array(data), top_k, axis=axis).squeeze()
assert np.allclose(kth_values_cpu, cupy.asnumpy(kth_values_gpu), equal_nan=True)
@pytest.mark.parametrize(
'pca_approved_drugs_csv, fingerprint_approved_drugs_csv, cluster_column, n_dims_eucl_data, top_k',
run_spearman_rho_params)
def test_run_spearman_rho(pca_approved_drugs_csv, fingerprint_approved_drugs_csv, cluster_column, n_dims_eucl_data,
top_k):
"""Validate the spearman rho scoring"""
# Load PCA data to use as Euclidean distances
pca_data = pd.read_csv(pca_approved_drugs_csv).set_index('molregno').drop(cluster_column, axis=1)
float_data = pca_data[pca_data.columns[:n_dims_eucl_data]]
euclidean_dist = pairwise_distances(cupy.array(float_data))
# Load fingerprints and calculate tanimoto distance
fp_data = pd.read_csv(fingerprint_approved_drugs_csv).set_index('molregno')
tanimoto_dist = tanimoto_calculate(cupy.array(fp_data), calc_distance=True)
# Check all data compared to the CPU version
all_data_gpu = spearmanr(tanimoto_dist, euclidean_dist)
euclidean_dist_cpu = cupy.asnumpy(euclidean_dist)
tanimoto_dist_cpu = cupy.asnumpy(tanimoto_dist)
all_data_cpu = _rowwise_numpy_corr(tanimoto_dist_cpu, euclidean_dist_cpu, spearmanr_cpu)
cupy.allclose(cupy.array(all_data_cpu), all_data_gpu, atol=0.005, equal_nan=True)
# Check using top k calculation compared to the CPU version
top_k_data_gpu = spearmanr(tanimoto_dist, euclidean_dist, top_k=top_k, axis=1)
cupy.fill_diagonal(tanimoto_dist, cupy.NaN)
kth_lim = get_kth_unique_value(tanimoto_dist, top_k, axis=1)
mask = tanimoto_dist > kth_lim
tanimoto_dist[mask] = cupy.NaN
euclidean_dist[mask] = cupy.NaN
euclidean_dist_cpu = cupy.asnumpy(euclidean_dist)
tanimoto_dist_cpu = cupy.asnumpy(tanimoto_dist)
top_k_data_cpu = _rowwise_numpy_corr(tanimoto_dist_cpu, euclidean_dist_cpu, spearmanr_cpu)
cupy.allclose(cupy.array(top_k_data_cpu), top_k_data_gpu, atol=0.005, equal_nan=True)
| cheminformatics-master | cuchem/tests/test_metrics.py |
import cudf
import logging
from tests.utils import _fetch_chembl_test_dataset, _create_context
# from cuchem.wf.cluster.cpukmeansumap import CpuKmeansUmap
from cuchem.wf.cluster.gpukmeansumap import GpuKmeansUmap, GpuKmeansUmapHybrid
from cuchem.wf.cluster.gpurandomprojection import GpuWorkflowRandomProjection
from cuchemcommon.data.helper.chembldata import ChEmblData
logger = logging.getLogger(__name__)
# def test_cpukmeansumap():
# """
# Verify fetching data from chemblDB when the input is a pandas df.
# """
# context = _create_context()
# n_molecules, dao, mol_df = _fetch_chembl_test_dataset(n_molecules=10000)
# logger.info(context.batch_size)
# wf = CpuKmeansUmap(n_molecules=n_molecules,
# dao=dao, n_pca=64)
# embedding = wf.cluster(df_molecular_embedding=mol_df)
# logger.info(embedding.head())
def test_random_proj():
"""
Verify fetching data from chemblDB when the input is a pandas df.
"""
_create_context()
n_molecules, dao, mol_df = _fetch_chembl_test_dataset()
wf = GpuWorkflowRandomProjection(n_molecules=n_molecules,
dao=dao)
wf.cluster(df_mol_embedding=mol_df)
def test_gpukmeansumap_dask():
"""
Verify fetching data from chemblDB when the input is a pandas df.
"""
_create_context()
n_molecules, dao, mol_df = _fetch_chembl_test_dataset()
wf = GpuKmeansUmap(n_molecules=n_molecules,
dao=dao, pca_comps=64)
wf.cluster(df_mol_embedding=mol_df)
def test_gpukmeansumap_cudf():
"""
Verify fetching data from chemblDB when the input is a cudf df.
"""
_create_context()
n_molecules, dao, mol_df = _fetch_chembl_test_dataset()
wf = GpuKmeansUmap(n_molecules=n_molecules,
dao=dao, pca_comps=64)
mol_df = mol_df.compute()
wf.cluster(df_mol_embedding=mol_df)
def test_add_molecule_GpuKmeansUmap():
"""
Verify fetching data from chemblDB when the input is a cudf df.
"""
_create_context()
n_molecules, dao, mol_df = _fetch_chembl_test_dataset()
if hasattr(mol_df, 'compute'):
mol_df = mol_df.compute()
mol_df = cudf.from_pandas(mol_df)
n_molecules = mol_df.shape[0]
# test mol should container aviable and new molecules
test_mol = mol_df[n_molecules - 20:]
mols_tobe_added = test_mol['id'].to_array().tolist()
chData = ChEmblData()
logger.info('Fetching ChEMBLLE id for %s', mols_tobe_added)
mols_tobe_added = [str(row[0]) for row in chData.fetch_chemblId_by_molregno(mols_tobe_added)]
logger.info('ChEMBL ids to be added %s', mols_tobe_added)
# Molecules to be used for clustering
mol_df = mol_df[:n_molecules - 10]
wf = GpuKmeansUmap(n_molecules=n_molecules, dao=dao, pca_comps=64)
wf.cluster(df_mol_embedding=mol_df)
missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added)
assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols)
# TODO: Once the issue with add_molecule in multi-gpu env. is fixed, the
# number of missing_molregno found should be 0
missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added)
assert len(missing_mols) == 0, 'Expected no missing molecules found %d' % len(missing_mols)
# assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols)
def test_add_molecule_hybrid_wf():
"""
Verify fetching data from chemblDB when the input is a cudf df.
"""
_create_context()
n_molecules, dao, mol_df = _fetch_chembl_test_dataset()
if hasattr(mol_df, 'compute'):
mol_df = mol_df.compute()
mol_df = cudf.from_pandas(mol_df)
n_molecules = mol_df.shape[0]
# test mol should container aviable and new molecules
test_mol = mol_df[n_molecules - 20:]
mols_tobe_added = test_mol['id'].to_array().tolist()
chData = ChEmblData()
logger.info('Fetching ChEMBLLE id for %s', mols_tobe_added)
mols_tobe_added = [str(row[0]) for row in chData.fetch_chemblId_by_molregno(mols_tobe_added)]
logger.info('ChEMBL ids to be added %s', mols_tobe_added)
# Molecules to be used for clustering
mol_df = mol_df[:n_molecules - 10]
wf = GpuKmeansUmapHybrid(n_molecules=n_molecules, dao=dao, pca_comps=64)
wf.cluster(df_mol_embedding=mol_df)
missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added)
assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols)
# TODO: Once the issue with add_molecule in multi-gpu env. is fixed, the
# number of missing_molregno found should be 0
missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added)
assert len(missing_mols) == 0, 'Expected no missing molecules found %d' % len(missing_mols)
# assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols)
| cheminformatics-master | cuchem/tests/test_cluster_wf.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.