repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-6.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
template <typename T>
auto get_multi_ptr(T *raw_ptr) {
auto multi_ptr =
sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(raw_ptr);
return multi_ptr;
}
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(
sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
auto sg = it.get_sub_group();
sycl::vec<int, 8> x;
int base = (it.get_group(0) * 32 +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
16;
x = sg.load<8>(get_multi_ptr(&(data2[base + 0])));
sg.store<8>(get_multi_ptr(&(data[base + 0])), x);
x = sg.load<8>(get_multi_ptr(&(data2[base + 128])));
sg.store<8>(get_multi_ptr(&(data[base + 128])), x);
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-sizes.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main(void) {
sycl::queue q{sycl::gpu_selector_v};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
std::cout << "Sub-group Sizes: ";
for (const auto &s :
q.get_device().get_info<sycl::info::device::sub_group_sizes>()) {
std::cout << s << " ";
}
std::cout << std::endl;
// Snippet end
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-3.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
i = (i / sgSize) * sgSize * 16 + (i % sgSize);
for (int j = 0; j < sgSize * 16; j += sgSize) {
data[i + j] = -1;
}
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-1.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
q.submit([&](auto &h) {
sycl::stream out(65536, 256, h);
h.parallel_for(sycl::nd_range(sycl::range{32}, sycl::range{32}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(32)]] {
int groupId = it.get_group(0);
int globalId = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroupId = sg.get_group_id()[0];
int sgId = sg.get_local_id()[0];
out << "globalId = " << sycl::setw(2) << globalId
<< " groupId = " << groupId
<< " sgGroupId = " << sgGroupId << " sgId = " << sgId
<< " sgSize = " << sycl::setw(2) << sgSize
<< sycl::endl;
});
});
// Snippet end
q.wait();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-2.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
i = i * 16;
for (int j = i; j < (i + 16); j++) {
data[j] = -1;
}
});
});
q.wait();
// Snippet end
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-7.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
template <typename T>
auto get_multi_ptr(T *raw_ptr) {
auto multi_ptr =
sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(raw_ptr);
return multi_ptr;
}
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(
sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
auto sg = it.get_sub_group();
sycl::vec<int, 4> x;
int base = (it.get_group(0) * 32 +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
16;
auto load_ptr0 = get_multi_ptr(&(data2[base + 0*64]));
x = sg.load<4>(load_ptr0);
auto store_ptr0 = get_multi_ptr(&(data[base + 0*64]));
sg.store<4>(store_ptr0, x);
auto load_ptr1 = get_multi_ptr(&(data2[base + 1*64]));
x = sg.load<4>(load_ptr1);
auto store_ptr1 = get_multi_ptr(&(data[base + 1*64]));
sg.store<4>(store_ptr1, x);
auto load_ptr2 = get_multi_ptr(&(data2[base + 2*64]));
x = sg.load<4>(load_ptr2);
auto store_ptr2 = get_multi_ptr(&(data[base + 2*64]));
sg.store<4>(store_ptr2, x);
auto load_ptr3 = get_multi_ptr(&(data2[base + 3*64]));
x = sg.load<4>(load_ptr3);
auto store_ptr3 = get_multi_ptr(&(data[base + 3*64]));
sg.store<4>(store_ptr3, x);
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/transpose.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iomanip>
#include <iostream>
#include <vector>
#include <CL/sycl.hpp>
constexpr size_t N = 16;
typedef unsigned int uint;
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
std::vector<uint> matrix(N * N);
for (uint i = 0; i < N * N; ++i) {
matrix[i] = i;
}
std::cout << "Matrix: " << std::endl;
for (uint i = 0; i < N; i++) {
for (uint j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
{
// Snippet begin
constexpr size_t BLOCK_SIZE = 16;
sycl::buffer<uint, 2> m(matrix.data(), sycl::range<2>(N, N));
auto e = q.submit([&](auto &h) {
sycl::accessor marr(m, h);
sycl::local_accessor<uint, 2> barr1(
sycl::range<2>(BLOCK_SIZE, BLOCK_SIZE), h);
sycl::local_accessor<uint, 2> barr2(
sycl::range<2>(BLOCK_SIZE, BLOCK_SIZE), h);
h.parallel_for(
sycl::nd_range<2>(sycl::range<2>(N / BLOCK_SIZE, N),
sycl::range<2>(1, BLOCK_SIZE)),
[=](sycl::nd_item<2> it) [[intel::reqd_sub_group_size(16)]] {
int gi = it.get_group(0);
int gj = it.get_group(1);
auto sg = it.get_sub_group();
uint sgId = sg.get_local_id()[0];
uint bcol[BLOCK_SIZE];
int ai = BLOCK_SIZE * gi;
int aj = BLOCK_SIZE * gj;
for (uint k = 0; k < BLOCK_SIZE; k++) {
bcol[k] = sg.load(marr.get_pointer() + (ai + k) * N + aj);
}
uint tcol[BLOCK_SIZE];
for (uint n = 0; n < BLOCK_SIZE; n++) {
if (sgId == n) {
for (uint k = 0; k < BLOCK_SIZE; k++) {
tcol[k] = sg.shuffle(bcol[n], k);
}
}
}
for (uint k = 0; k < BLOCK_SIZE; k++) {
sg.store(marr.get_pointer() + (ai + k) * N + aj, tcol[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_time = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << std::endl
<< "Kernel Execution Time: " << kernel_time * 1e-6 << " msec"
<< std::endl;
}
std::cout << std::endl << "Transposed Matrix: " << std::endl;
for (uint i = 0; i < N; i++) {
for (uint j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-8.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
i = (i / sgSize) * sgSize * 16 + (i % sgSize) * 4;
for (int j = 0; j < 4; j++) {
sycl::vec<int, 4> x;
sycl::vec<int, 4> *q =
(sycl::vec<int, 4> *)(&(data2[i + j * sgSize * 4]));
x = *q;
sycl::vec<int, 4> *r =
(sycl::vec<int, 4> *)(&(data[i + j * sgSize * 4]));
*r = x;
}
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-5.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
i = (i / sgSize) * sgSize * 16 + (i % sgSize);
for (int j = 0; j < sgSize * 16; j += sgSize) {
data[i + j] = data2[i + j];
}
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sg-max-size.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <algorithm>
#include <chrono>
#include <cmath>
#include <iostream>
#include <random>
#include <vector>
#include <CL/sycl.hpp>
constexpr int N = 7;
// DPC++ asynchronous exception handler
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
std::cout << "Failure" << std::endl;
std::terminate();
}
}
};
int main() {
sycl::queue q{sycl::gpu_selector_v, exception_handler,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
std::cout << "Max Compute Units: "
<< q.get_device().get_info<sycl::info::device::max_compute_units>()
<< std::endl;
std::cout << "Max Work Item Size: "
<< q.get_device()
.get_info<sycl::info::device::max_work_item_sizes<3>>()[0]
<< " "
<< q.get_device()
.get_info<sycl::info::device::max_work_item_sizes<3>>()[1]
<< " "
<< q.get_device()
.get_info<sycl::info::device::max_work_item_sizes<3>>()[2]
<< std::endl;
std::cout
<< "Max Work Group Size: "
<< q.get_device().get_info<sycl::info::device::max_work_group_size>()
<< std::endl;
std::cout << "Preffered Vector Width Float: "
<< q.get_device()
.get_info<sycl::info::device::preferred_vector_width_float>()
<< std::endl;
std::cout << "Native Vector Width Float: "
<< q.get_device()
.get_info<sycl::info::device::native_vector_width_float>()
<< std::endl;
std::cout << "Local Memory Size: "
<< q.get_device().get_info<sycl::info::device::local_mem_size>()
<< std::endl;
int *data = sycl::malloc_shared<int>(N + N + 2, q);
for (int i = 0; i < N + N + 2; i++) {
data[i] = i;
}
// Snippet begin
auto e = q.submit([&](auto &h) {
sycl::stream out(65536, 128, h);
h.parallel_for(sycl::nd_range<1>(7, 7),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgMaxSize = sg.get_max_local_range()[0];
int sId = sg.get_local_id()[0];
int j = data[i];
int k = data[i + sgSize];
out << "globalId = " << i << " sgMaxSize = " << sgMaxSize
<< " sgSize = " << sgSize << " sId = " << sId
<< " j = " << j << " k = " << k << sycl::endl;
});
});
q.wait();
// Snippet end
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/atomics/global_atomics_ref.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
constexpr int N = 256 * 256;
constexpr int M = 512;
int total = 0;
int *a = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++)
a[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> buf(&total, 1);
sycl::buffer<int> bufa(a, N);
auto e = q.submit([&](sycl::handler &h) {
sycl::accessor acc(buf, h);
sycl::accessor acc_a(bufa, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id();
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device,
sycl::access::address_space::global_space>
atomic_op(acc[0]);
atomic_op += acc_a[i];
});
});
sycl::host_accessor h_a(buf);
std::cout << "Reduction Sum : " << h_a[0] << "\n";
std::cout
<< "Kernel Execution Time of Global Atomics Ref: "
<< e.get_profiling_info<sycl::info::event_profiling::command_end>() -
e.get_profiling_info<sycl::info::event_profiling::command_start>()
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/atomics/test_atomic.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "align.hpp"
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
template <typename T> using VectorAllocator = AlignedAllocator<T>;
template <typename T> using AlignedVector = std::vector<T, VectorAllocator<T>>;
constexpr size_t array_size = (1 << 15);
template <typename T> void InitializeArray(AlignedVector<T> &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = (T)i * (T)i;
}
template <typename T> void Initialize(AlignedVector<T> &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
// Snippet1 Begin
//
int VectorInt(sycl::queue &q, int iter) {
VectorAllocator<int> alloc;
AlignedVector<int> a(array_size, alloc);
AlignedVector<int> b(array_size, alloc);
InitializeArray<int>(a);
InitializeArray<int>(b);
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
// InpuGt accessors
sycl::accessor a_acc(a_buf, h, sycl::read_write);
sycl::accessor b_acc(a_buf, h, sycl::read_only);
h.parallel_for(num_items, [=](auto i) {
auto v = sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
a_acc[0]);
v += b_acc[i];
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector int completed on device - took " << (end - start).count()
<< " u-secs\n";
return ((end - start).count());
}
// Snippet1 End
//
// Snippet2 Begin
//
int VectorFloat(sycl::queue &q, int iter) {
VectorAllocator<float> alloc;
AlignedVector<float> a(array_size, alloc);
AlignedVector<float> b(array_size, alloc);
InitializeArray<float>(a);
InitializeArray<float>(b);
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
// InpuGt accessors
sycl::accessor a_acc(a_buf, h, sycl::read_write);
sycl::accessor b_acc(a_buf, h, sycl::read_only);
h.parallel_for(num_items, [=](auto i) {
auto v = sycl::atomic_ref<float, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
a_acc[0]);
v += b_acc[i];
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector float completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
}
// Snippet2 End
//
// Snippet3 Begin
//
int VectorDouble(sycl::queue &q, int iter) {
VectorAllocator<double> alloc;
AlignedVector<double> a(array_size, alloc);
AlignedVector<double> b(array_size, alloc);
InitializeArray<double>(a);
InitializeArray<double>(b);
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
// InpuGt accessors
sycl::accessor a_acc(a_buf, h, sycl::read_write);
sycl::accessor b_acc(a_buf, h, sycl::read_only);
h.parallel_for(num_items, [=](auto i) {
auto v = sycl::atomic_ref<double, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
a_acc[0]);
v += b_acc[i];
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector Double completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
}
// Snippet3 End
int main() {
sycl::queue q(sycl::gpu_selector_v);
VectorAllocator<int> alloc;
AlignedVector<int> a(array_size, alloc);
AlignedVector<int> b(array_size, alloc);
InitializeArray<int>(a);
InitializeArray<int>(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
VectorInt(q, 10);
VectorInt(q, 10);
VectorFloat(q, 10);
VectorDouble(q, 10);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/atomics/atomics.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Summation of 10M 'one' values
constexpr size_t N = 1024 * 32;
// Number of repetitions
constexpr int repetitions = 16;
// expected vlaue of sum
int sum_expected = N;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
int ComputeSerialInt(std::vector<int> &data, std::vector<int> &flush,
int iter) {
const size_t data_size = data.size();
Timer timer;
int sum;
// ComputeSerial main begin
for (int it = 0; it < iter; it++) {
sum = 0;
for (size_t i = 0; i < data_size; ++i) {
sum += data[i];
}
}
// ComputeSerial main end
double elapsed = timer.Elapsed() / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeSerialInt = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ComputeSerialInt Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end ComputeSerial
int ComputeSerialFloat(std::vector<float> &data, std::vector<float> &flush,
int iter) {
const size_t data_size = data.size();
Timer timer;
float sum;
// ComputeSerial main begin
for (int it = 0; it < iter; it++) {
sum = 0.0;
for (size_t i = 0; i < data_size; ++i) {
sum += data[i];
}
}
// ComputeSerial main end
double elapsed = timer.Elapsed() / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeSerialFloat = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ComputeSerialFloat Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end ComputeSerial
int reductionInt(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) {
size_t glob_id = index[0];
sum_acc[0] = 0;
});
});
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
Timer timer;
// reductionInt main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
// reductionInt main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ReductionInt = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ReductionInt Expected " << sum_expected << " but got "
<< sum << "\n";
return sum;
} // end reduction1
int reductionFloat(sycl::queue &q, std::vector<float> &data,
std::vector<float> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
float sum = 0.0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<float> buf(data.data(), data_size, props);
sycl::buffer<float> flush_buf(flush.data(), flush_size, props);
sycl::buffer<float> sum_buf(&sum, 1, props);
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) {
size_t glob_id = index[0];
sum_acc[0] = 0;
});
});
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
Timer timer;
// reductionFloat main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<float, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
// reductionFloat main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ReductionFLoat = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ReductionFloat Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end reduction2
int main(int argc, char *argv[]) {
sycl::queue q{sycl::default_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
std::vector<int> data(N, 1);
std::vector<int> extra(N, 1);
ComputeSerialInt(data, extra, 16);
reductionInt(q, data, extra, 16);
}
{
std::vector<float> data(N, 1.0f);
std::vector<float> extra(N, 1.0f);
ComputeSerialFloat(data, extra, 16);
reductionFloat(q, data, extra, 16);
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/atomics/local_atomics_ref.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
constexpr int N = 256 * 256;
constexpr int M = 512;
constexpr int NUM_WG = N / M;
int total = 0;
int *a = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++)
a[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> global(&total, 1);
sycl::buffer<int> bufa(a, N);
auto e1 = q.submit([&](sycl::handler &h) {
sycl::accessor b(global, h);
sycl::accessor acc_a(bufa, h, sycl::read_only);
auto acc = sycl::local_accessor<int, 1>(NUM_WG, h);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id(0);
auto group_id = it.get_group(0);
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device,
sycl::access::address_space::local_space>
atomic_op(acc[group_id]);
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device,
sycl::access::address_space::global_space>
atomic_op_global(b[0]);
atomic_op += acc_a[i];
it.barrier(sycl::access::fence_space::local_space);
if (it.get_local_id() == 0)
atomic_op_global += acc[group_id];
});
});
sycl::host_accessor h_global(global);
std::cout << "Reduction Sum : " << h_global[0] << "\n";
int total_time =
(e1.get_profiling_info<sycl::info::event_profiling::command_end>() -
e1.get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time of Local Atomics : " << total_time
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/atomics/align.hpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef __ALIGN
#define __ALIGN 1
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
enum class Alignment : size_t {
Normal = sizeof(void *),
SSE = 16,
AVX = 32,
PAGE = 4096,
};
namespace detail {
void *allocate_aligned_memory(size_t align, size_t size);
void deallocate_aligned_memory(void *ptr) noexcept;
} // namespace detail
template <typename T, Alignment Align = Alignment::PAGE> class AlignedAllocator;
template <Alignment Align> class AlignedAllocator<void, Align> {
public:
typedef void *pointer;
typedef const void *const_pointer;
typedef void value_type;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
};
template <typename T, Alignment Align> class AlignedAllocator {
public:
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
pointer address(reference x) const noexcept { return std::addressof(x); }
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&...args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment Align> class AlignedAllocator<const T, Align> {
public:
typedef T value_type;
typedef const T *pointer;
typedef const T *const_pointer;
typedef const T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&...args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator==(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign == UAlign;
}
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator!=(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign != UAlign;
}
void *detail::allocate_aligned_memory(size_t align, size_t size) {
assert(align >= sizeof(void *));
// assert(nail::is_power_of_two(align));
if (size == 0) {
return nullptr;
}
void *ptr = nullptr;
int rc = posix_memalign(&ptr, align, size);
if (rc != 0) {
return nullptr;
}
return ptr;
}
void detail::deallocate_aligned_memory(void *ptr) noexcept { return free(ptr); }
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/histogram256-int.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int BLOCK_SIZE = 256;
constexpr int NUM_BINS = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned int
histogram[NUM_BINS]; // histogram bins take too much storage to be
// promoted to registers
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/block-load-store.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
template <typename T>
auto get_multi_ptr(T *raw_ptr) {
auto multi_ptr =
sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(raw_ptr);
return multi_ptr;
}
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(
sycl::nd_range(sycl::range{N}, sycl::range{32}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
auto sg = it.get_sub_group();
int base = (it.get_group(0) * 32 +
sg.get_group_id()[0] * sg.get_local_range()[0]);
auto load_ptr = get_multi_ptr(&(data2[base + 0]));
int x = sg.load(load_ptr);
auto store_ptr = get_multi_ptr(&(data[base + 0]));
sg.store(store_ptr, x);
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/histogram256-int-shared-private.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int BLOCK_SIZE = 256;
constexpr int NUM_BINS = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned int
histogram[NUM_BINS / 16]; // histogram bins take too much storage
// to be promoted to registers
for (int k = 0; k < NUM_BINS / 16; k++) {
histogram[k] = 0;
}
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
// subgroup size is 16
#pragma unroll
for (int j = 0; j < 16; j++) {
unsigned long y = sycl::group_broadcast(sg, x, j);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = y & 0xFF;
// (c & 0xF) is the workitem in which the bin resides
// (c >> 4) is the bin index
if (sg.get_local_id()[0] == (c & 0xF)) {
histogram[c >> 4] += 1;
}
y = y >> 8;
}
}
}
for (int k = 0; k < NUM_BINS / 16; k++) {
hacc[16 * k + sg.get_local_id()[0]].fetch_add(histogram[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/non-block-load-store.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
data[i] = data2[i];
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/histogram32-int.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int BLOCK_SIZE = 256;
constexpr int NUM_BINS = 32;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned int histogram[NUM_BINS]; // histogram bins take less storage
// with smaller data type
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/histogram32-long.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr size_t N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (size_t i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int BLOCK_SIZE = 256;
constexpr int NUM_BINS = 32;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned long
histogram[NUM_BINS]; // histogram bins take too much storage to be
// promoted to registers
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/registers/histogram32-int-volatile.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int BLOCK_SIZE = 256;
constexpr int NUM_BINS = 32;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
volatile unsigned int
histogram[NUM_BINS]; // volatile variables will not
// be assigned to any registers
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x = sg.load(
macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/MPI/01_omp_mpich/omp_mpich.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <cmath>
#include <omp.h>
#define N 1000000
#define STEPS 1000
#define ABS(x) (x) > 0 ? (x) : -(x)
int main (int argc, char **argv )
{
int mpi_aware = 0;
if ( argc > 1 ) {
mpi_aware = 1;
printf("MPI device aware path enabled\n");
} // argc check
MPI_Init(NULL, NULL);
int rank,nranks;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
int next_rank = ( rank + 1 ) % nranks;
int prev_rank = rank == 0 ? nranks-1 : rank-1;
printf("rank=%d next=%d prev=%d\n",rank,next_rank,prev_rank);
#pragma omp target
;
double buf1[N],buf2[N];
double *curr,*next,*tmp;
for ( int i = 0; i < N; i++ ) {
buf1[i] = 0;
buf2[i] = 0;
}
MPI_Request psrq;
double start = omp_get_wtime();
#pragma omp target data map(buf1,buf2)
{
#pragma omp target data use_device_addr(buf1,buf2) if(mpi_aware)
{
curr = buf1;
next = buf2;
}
printf("curr=%p next=%p\n",curr,next);
for ( int step = 0; step < STEPS; step++ ) {
if ( rank == 0 && step % 100 == 0 ) printf("step: %d\n",step);
#pragma omp target teams distribute parallel for
for ( int i = 0; i < N; i++ ) curr[i]++;
if ( nranks > 1 ) {
#pragma omp target update from(curr[0:N]) if(!mpi_aware)
MPI_Request srq;
MPI_Isend(curr,N,MPI_DOUBLE,next_rank,0,MPI_COMM_WORLD,&srq);
// we need to make sure that the MPI_Isend of the previous
// iteration finished before doing the MPI_Recv of this
// iteration
if ( step > 0 ) MPI_Wait(&psrq,MPI_STATUS_IGNORE);
psrq = srq;
MPI_Recv(next,N,MPI_DOUBLE,prev_rank,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
#pragma omp target update to(next[0:N]) if(!mpi_aware)
} // nranks
tmp = curr;
curr = next;
next = tmp;
}
}
MPI_Barrier(MPI_COMM_WORLD);
double end = omp_get_wtime();
printf("rank %d total_time=%g\n",rank, end-start);
for ( int i = 0; i < N; i++ ) {
if ( buf1[i] != STEPS ) {
printf("Error in %d = %f\n",i,buf1[i]);
break;
}
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_example_03.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define epsilon 0.0000001f
bool compare(double x, double y)
{
// returns true if x and y are the same
return fabs(x - y) <= epsilon;
}
int main()
{
double *A1, *B1, *C1, *C1_fl;
double *A2, *B2, *C2, *C2_fl;
int m, n, k, i, j, q;
double alpha, beta;
double sum;
int fail;
double t_start, t_end;
m = 2000, k = 200, n = 1000;
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A1 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B1 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C1 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C1_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
A2 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B2 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C2 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C2_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
if (A1 == NULL || B1 == NULL || C1 == NULL || C1_fl == NULL ||
A2 == NULL || B2 == NULL || C2 == NULL || C2_fl == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
return 1;
}
printf (" Intializing matrix data \n\n");
for (i = 0; i < (m*k); i++) {
A1[i] = A2[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B1[i] = B2[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C1[i] = C2[i] = 0.0;
C1_fl[i] = C2_fl[i] = 0.0;
}
printf (" \nComputing matrix product using Intel MKL cblas_dgemm function \n");
t_start = omp_get_wtime();
#pragma omp target data \
map(to: A1[0:m*k], B1[0:k*n], A2[0:m*k], B2[0:k*n]) \
map(tofrom: C1[0:m*n], C2[0:m*n])
{
#pragma omp dispatch nowait
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A1, k, B1, n, beta, C1, n);
#pragma omp dispatch nowait
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A2, k, B2, n, beta, C2, n);
#pragma omp taskwait
}
t_end = omp_get_wtime();
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2[j+i*n]);
}
printf ("\n");
}
printf (" \nComputing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A1[k*i+q] * B1[n*q+j];
C1_fl[n*i+j] = sum;
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A2[k*i+q] * B2[n*q+j];
C2_fl[n*i+j] = sum;
}
}
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (! compare(C1[i], C1_fl[i]) || ! compare(C2[i], C2_fl[i])) {
fail = 1;
break;
}
}
if (fail) {
printf (" **** FAIL **** \n");
}
else {
printf(" time = %lf seconds\n", t_end - t_start);
printf (" **** PASS **** \n");
}
mkl_free(A1);
mkl_free(B1);
mkl_free(C1);
mkl_free(C1_fl);
mkl_free(A2);
mkl_free(B2);
mkl_free(C2);
mkl_free(C2_fl);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_batch_example_02.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
/*******************************************************************************
* Copyright 2019-2021 Intel Corporation.
*
* This software and the related documents are Intel copyrighted materials, and
* your use of them is governed by the express license under which they were
* provided to you (License). Unless the License provides otherwise, you may not
* use, modify, copy, publish, distribute, disclose or transmit this software or
* the related documents without Intel's prior written permission.
*
* This software and the related documents are provided as is, with no express
* or implied warranties, other than those that are expressly stated in the
* License.
*******************************************************************************/
// Snippet begin
#include <stdio.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#include "common.h"
#define GROUP_COUNT 3
int dnum = 0;
int main() {
CBLAS_LAYOUT layout = (rand_int(0,1) == 0) ? CblasColMajor : CblasRowMajor;
CBLAS_TRANSPOSE *transA, *transB;
MKL_INT *m, *n, *k, *lda, *ldb, *ldc;
double *alpha, *beta;
MKL_INT *group_size, *sizea_array, *sizeb_array, *sizec_array, total_batch_size = 0, sizea, sizeb, sizec;
double **a_array, **b_array, **c_array, **c_ref_array;
double **a_array_dev, **b_array_dev, **c_array_dev;
transA = (CBLAS_TRANSPOSE *)mkl_malloc(GROUP_COUNT * sizeof(CBLAS_TRANSPOSE), 64);
transB = (CBLAS_TRANSPOSE *)mkl_malloc(GROUP_COUNT * sizeof(CBLAS_TRANSPOSE), 64);
m = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
n = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
k = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
lda = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
ldb = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
ldc = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
group_size = (MKL_INT *)mkl_malloc(GROUP_COUNT * sizeof(MKL_INT), 64);
alpha = (double *)mkl_malloc(GROUP_COUNT * sizeof(double), 64);
beta = (double *)mkl_malloc(GROUP_COUNT * sizeof(double), 64);
if ((m == NULL) || (n == NULL) || (k == NULL) || (lda == NULL) || (ldb == NULL) || (ldc == NULL) ||
(group_size == NULL) || (alpha == NULL) || (beta == NULL)) {
printf("Cannot allocate input arrays\n");
return 1;
}
MKL_INT i, j, p, idx;
for (i = 0; i < GROUP_COUNT; i++) {
transA[i] = (rand_int(0,1) == 0) ? CblasNoTrans : CblasTrans;
transB[i] = (rand_int(0,1) == 0) ? CblasNoTrans : CblasTrans;
alpha[i] = rand_double_scalar();
beta[i] = rand_double_scalar();
m[i] = rand_int(1, 20);
n[i] = rand_int(1, 20);
k[i] = rand_int(1, 20);
lda[i] = MAX(m[i], k[i]);
ldb[i] = MAX(k[i], n[i]);
ldc[i] = MAX(m[i], n[i]);
group_size[i] = rand_int(1, 10);
total_batch_size += group_size[i];
#ifdef MKL_ILP64
printf("Group %lld: layout = %s, transA = %s, transB = %s, m = %lld, n = %lld, k = %lld, lda = %lld, ldb = %lld, ldc = %lld, alpha = %lf, beta = %lf, group_size = %lld\n",
i, (layout == CblasColMajor) ? "Column Major" : "Row Major",
(transA[i] == CblasNoTrans) ? "Non Transpose" : "Transpose",
(transB[i] == CblasNoTrans) ? "Non Transpose" : "Transpose",
m[i], n[i], k[i], lda[i], ldb[i], ldc[i], alpha[i], beta[i], group_size[i]);
#else
printf("Group %d: layout = %s, transA = %s, transB = %s, m = %d, n = %d, k = %d, lda = %d, ldb = %d, ldc = %d, alpha = %lf, beta = %lf, group_size = %d\n",
i, (layout == CblasColMajor) ? "Column Major" : "Row Major",
(transA[i] == CblasNoTrans) ? "Non Transpose" : "Transpose",
(transB[i] == CblasNoTrans) ? "Non Transpose" : "Transpose",
m[i], n[i], k[i], lda[i], ldb[i], ldc[i], alpha[i], beta[i], group_size[i]);
#endif
}
sizea_array = (MKL_INT *)mkl_malloc(sizeof(MKL_INT) * total_batch_size, 64);
sizeb_array = (MKL_INT *)mkl_malloc(sizeof(MKL_INT) * total_batch_size, 64);
sizec_array = (MKL_INT *)mkl_malloc(sizeof(MKL_INT) * total_batch_size, 64);
a_array = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
b_array = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
c_array = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
a_array_dev = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
b_array_dev = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
c_array_dev = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
c_ref_array = (double **)mkl_malloc(sizeof(double *) * total_batch_size, 64);
if ((sizea_array == NULL) || (sizeb_array == NULL) || (sizec_array == NULL) || (a_array == NULL) ||
(b_array == NULL) || (c_array == NULL) || (a_array_dev == NULL) || (b_array_dev == NULL) ||
(c_array_dev == NULL) || (c_ref_array == NULL)) {
printf("Cannot allocate matrices and size arrays\n");
return 1;
}
idx = 0;
for (i = 0; i < GROUP_COUNT; i++) {
sizea = (((layout == CblasRowMajor) && (transA[i] == CblasTrans)) ||
((layout == CblasColMajor) && (transA[i] == CblasNoTrans))) ? lda[i] * k[i] : m[i] * lda[i];
sizeb = (((layout == CblasRowMajor) && (transB[i] == CblasTrans)) ||
((layout == CblasColMajor) && (transB[i] == CblasNoTrans))) ? ldb[i] * n[i] : k[i] * ldb[i];
sizec = (layout == CblasColMajor) ? ldc[i] * n[i] : ldc[i] * m[i];
for (j = 0; j < group_size[i]; j++) {
a_array[idx] = (double *)mkl_malloc(sizeof(double) * sizea, 64);
a_array_dev[idx] = a_array[idx];
sizea_array[idx] = sizea;
if (a_array[idx] == NULL) {
printf("cannot allocate a matrices\n");
return 1;
}
b_array[idx] = (double *)mkl_malloc(sizeof(double) * sizeb, 64);
b_array_dev[idx] = b_array[idx];
sizeb_array[idx] = sizeb;
if (b_array[idx] == NULL) {
printf("cannot allocate b matrices\n");
return 1;
}
c_array[idx] = (double *)mkl_malloc(sizeof(double) * sizec, 64);
c_array_dev[idx] = c_array[idx];
sizec_array[idx] = sizec;
if (c_array[idx] == NULL) {
printf("cannot allocate c matrices\n");
return 1;
}
c_ref_array[idx] = (double *)mkl_malloc(sizeof(double) * sizec, 64);
if (c_ref_array[idx] == NULL) {
printf("cannot allocate c_ref matrices\n");
return 1;
}
init_double_array(sizea, a_array[idx], 1);
init_double_array(sizeb, b_array[idx], 1);
init_double_array(sizec, c_array[idx], 1);
for (p = 0; p < sizec_array[idx]; p++) c_ref_array[idx][p] = c_array[idx][p];
idx++;
}
}
// run gemm_batch on host, use standard oneMKL interface
cblas_dgemm_batch(layout, transA, transB, m, n, k, alpha, (const double **) a_array, lda,
(const double **) b_array, ldb, beta, c_ref_array, ldc, GROUP_COUNT, group_size);
double *a, *b, *c;
for (i = 0; i < total_batch_size; i++) {
a = a_array[i];
b = b_array[i];
c = c_array[i];
#pragma omp target enter data map(to:a[0:sizea_array[i]],b[0:sizeb_array[i]],c[0:sizec_array[i]])
#pragma omp target data use_device_ptr(a,b,c)
{
a_array_dev[i] = a;
b_array_dev[i] = b;
c_array_dev[i] = c;
}
}
#pragma omp target data map(to:a_array_dev[0:total_batch_size], \
b_array_dev[0:total_batch_size], \
c_array_dev[0:total_batch_size]) device(dnum)
{
#pragma omp dispatch
cblas_dgemm_batch(layout, transA, transB, m, n, k, alpha, (const double **) a_array_dev, lda, (const double **) b_array_dev, ldb, beta, c_array_dev, ldc, GROUP_COUNT, group_size);
}
for (i = 0; i < total_batch_size; i++) {
a = a_array[i];
b = b_array[i];
c = c_array[i];
#pragma omp target exit data map(from:a[0:sizea_array[i]],b[0:sizeb_array[i]],c[0:sizec_array[i]])
}
double computed, reference, diff;
MKL_INT l;
idx = 0;
for (p = 0; p < GROUP_COUNT; p++) {
for (l = 0; l < group_size[p]; l++) {
for (i = 0; i < m[p]; i++) {
for (j = 0; j < n[p]; j++) {
if (layout == CblasColMajor) {
computed = c_array[idx][i + j * ldc[p]];
reference = c_ref_array[idx][i + j * ldc[p]];
}
else {
computed = c_array[idx][j + i * ldc[p]];
reference = c_ref_array[idx][j + i * ldc[p]];
}
diff = computed - reference;
diff = (diff > 0) ? diff : -diff;
if (diff > 0.0001) {
#ifdef MKL_ILP64
printf("Error in matrix %lld (group = %lld, matrix index in group = %lld) at index [%lld][%lld], computed = %lf, reference = %lf, difference = %lf\n", idx, p, l, i, j, computed, reference, diff);
#else
printf("Error in matrix %d at index [%d][%d], computed = %lf, reference = %lf, difference = %lf\n", idx, i, j, computed, reference, diff);
#endif
free_double_matrices(a_array, total_batch_size);
free_double_matrices(b_array, total_batch_size);
free_double_matrices(c_array, total_batch_size);
free_double_matrices(c_ref_array, total_batch_size);
mkl_free(a_array);
mkl_free(b_array);
mkl_free(c_array);
mkl_free(c_ref_array);
mkl_free(a_array_dev);
mkl_free(b_array_dev);
mkl_free(c_array_dev);
mkl_free(sizea_array);
mkl_free(sizeb_array);
mkl_free(sizec_array);
mkl_free(transA); mkl_free(transB);
mkl_free(m); mkl_free(n); mkl_free(k);
mkl_free(lda); mkl_free(ldb); mkl_free(ldc); mkl_free(group_size);
mkl_free(alpha); mkl_free(beta);
return 1;
}
}
}
idx++;
}
}
printf("Validation PASSED\n");
free_double_matrices(a_array, total_batch_size);
free_double_matrices(b_array, total_batch_size);
free_double_matrices(c_array, total_batch_size);
free_double_matrices(c_ref_array, total_batch_size);
mkl_free(a_array);
mkl_free(b_array);
mkl_free(c_array);
mkl_free(c_ref_array);
mkl_free(a_array_dev);
mkl_free(b_array_dev);
mkl_free(c_array_dev);
mkl_free(sizea_array);
mkl_free(sizeb_array);
mkl_free(sizec_array);
mkl_free(transA); mkl_free(transB);
mkl_free(m); mkl_free(n); mkl_free(k);
mkl_free(lda); mkl_free(ldb); mkl_free(ldc); mkl_free(group_size);
mkl_free(alpha); mkl_free(beta);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_example_01.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define epsilon 0.0000001f
bool compare(double x, double y)
{
// returns true if x and y are the same
return fabs(x - y) <= epsilon;
}
int main()
{
double *A1, *B1, *C1, *C1_fl;
double *A2, *B2, *C2, *C2_fl;
int m, n, k, i, j, q;
double alpha, beta;
double sum;
int fail;
double t_start, t_end;
m = 2000, k = 200, n = 1000;
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A1 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B1 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C1 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C1_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
A2 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B2 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C2 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C2_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
if (A1 == NULL || B1 == NULL || C1 == NULL || C1_fl == NULL ||
A2 == NULL || B2 == NULL || C2 == NULL || C2_fl == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
return 1;
}
printf (" Intializing matrix data \n\n");
for (i = 0; i < (m*k); i++) {
A1[i] = A2[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B1[i] = B2[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C1[i] = C2[i] = 0.0;
C1_fl[i] = C2_fl[i] = 0.0;
}
printf (" \nComputing matrix product using Intel MKL cblas_dgemm function \n");
t_start = omp_get_wtime();
#pragma omp target data \
map(to: A1[0:m*k], B1[0:k*n], A2[0:m*k], B2[0:k*n]) \
map(tofrom: C1[0:m*n], C2[0:m*n])
{
#pragma omp dispatch
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A1, k, B1, n, beta, C1, n);
#pragma omp dispatch
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A2, k, B2, n, beta, C2, n);
}
t_end = omp_get_wtime();
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2[j+i*n]);
}
printf ("\n");
}
printf (" \nComputing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A1[k*i+q] * B1[n*q+j];
C1_fl[n*i+j] = sum;
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A2[k*i+q] * B2[n*q+j];
C2_fl[n*i+j] = sum;
}
}
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (! compare(C1[i], C1_fl[i]) || ! compare(C2[i], C2_fl[i])) {
fail = 1;
break;
}
}
if (fail) {
printf (" **** FAIL **** \n");
}
else {
printf(" time = %lf seconds\n", t_end - t_start);
printf (" **** PASS **** \n");
}
mkl_free(A1);
mkl_free(B1);
mkl_free(C1);
mkl_free(C1_fl);
mkl_free(A2);
mkl_free(B2);
mkl_free(C2);
mkl_free(C2_fl);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_example_02.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define epsilon 0.0000001f
bool compare(double x, double y)
{
// returns true if x and y are the same
return fabs(x - y) <= epsilon;
}
int main()
{
double *A1, *B1, *C1, *C1_fl;
double *A2, *B2, *C2, *C2_fl;
int m, n, k, i, j, q;
double alpha, beta;
double sum;
int fail;
double t_start, t_end;
m = 2000, k = 200, n = 1000;
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A1 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B1 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C1 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C1_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
A2 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B2 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C2 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C2_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
if (A1 == NULL || B1 == NULL || C1 == NULL || C1_fl == NULL ||
A2 == NULL || B2 == NULL || C2 == NULL || C2_fl == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
return 1;
}
printf (" Intializing matrix data \n\n");
for (i = 0; i < (m*k); i++) {
A1[i] = A2[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B1[i] = B2[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C1[i] = C2[i] = 0.0;
C1_fl[i] = C2_fl[i] = 0.0;
}
printf (" \nComputing matrix product using Intel MKL cblas_dgemm function \n");
t_start = omp_get_wtime();
#pragma omp target data \
map(to: A1[0:m*k], B1[0:k*n], A2[0:m*k], B2[0:k*n]) \
map(tofrom: C1[0:m*n], C2[0:m*n])
{
#pragma omp parallel num_threads(2)
{
int id = omp_get_thread_num();
if (id == 0) {
#pragma omp dispatch
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A1, k, B1, n, beta, C1, n);
}
else if (id == 1) {
#pragma omp dispatch
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A2, k, B2, n, beta, C2, n);
}
}
}
t_end = omp_get_wtime();
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2[j+i*n]);
}
printf ("\n");
}
printf (" \nComputing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A1[k*i+q] * B1[n*q+j];
C1_fl[n*i+j] = sum;
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A2[k*i+q] * B2[n*q+j];
C2_fl[n*i+j] = sum;
}
}
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (! compare(C1[i], C1_fl[i]) || ! compare(C2[i], C2_fl[i])) {
fail = 1;
break;
}
}
if (fail) {
printf (" **** FAIL **** \n");
}
else {
printf(" time = %lf seconds\n", t_end - t_start);
printf (" **** PASS **** \n");
}
mkl_free(A1);
mkl_free(B1);
mkl_free(C1);
mkl_free(C1_fl);
mkl_free(A2);
mkl_free(B2);
mkl_free(C2);
mkl_free(C2_fl);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_dispatch_c.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define EPSILON 0.0001
int main()
{
double *A, *B, *C, *C_fl;
int64_t m, n, k;
double alpha, beta;
double sum;
int64_t i, j, q;
int fail;
printf ("\n This example computes real matrix C=alpha*A*B+beta*C using \n"
" Intel oneMKL function dgemm, where A, B, and C are matrices and \n"
" alpha and beta are double precision scalars\n\n");
m = 2000, k = 200, n = 1000;
printf (" Initializing data for matrix multiplication C=A*B for matrix \n"
" A(%li x %li) and matrix B(%li x %li)\n\n", m, k, k, n);
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A = (double *)mkl_malloc( m * k * sizeof( double ), 64 );
B = (double *)mkl_malloc( k * n * sizeof( double ), 64 );
C = (double *)mkl_malloc( m * n * sizeof( double ), 64 );
C_fl = (double *)mkl_malloc( m*n*sizeof( double ), 64 );
if (A == NULL || B == NULL || C == NULL || C_fl == NULL) {
printf( "\n ERROR: Cannot allocate memory for matrices. Exiting... \n\n");
return 1;
}
printf (" Intializing matrices \n\n");
for (i = 0; i < (m*k); i++) {
A[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C[i] = 0.0;
C_fl[i] = 0.0;
}
printf (" Computing matrix product using Intel oneMKL dgemm function via CBLAS interface \n\n");
#pragma omp target data map(to: A[0:m*k], B[0:k*n]) map(tofrom: C[0:m*n])
{
#pragma omp target variant dispatch use_device_ptr(A, B, C)
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A, k, B, n, beta, C, n);
}
printf ("\n Top left corner of matrix C: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C[j+i*n]);
}
printf ("\n");
}
printf (" Computing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++) {
sum += A[k*i+q] * B[n*q+j];
}
C_fl[n*i+j] = alpha * sum + beta * C_fl[n*i+j];
}
}
printf ("\n Top left corner of matrix C_fl: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C_fl[j+i*n]);
}
printf ("\n");
}
printf (" Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (fabs(C[i] - C_fl[i]) > EPSILON) {
fail = 1;
break;
}
}
if (fail)
printf ("\n **** FAIL **** \n");
else
printf ("\n **** PASS **** \n");
printf ("\n Deallocating memory \n\n");
mkl_free(A);
mkl_free(B);
mkl_free(C);
return fail;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_batch_example_01.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define epsilon 0.0000001f
bool compare(double x, double y)
{
// return true is x and y are the same
return (fabs(x - y) <= epsilon);
}
int main()
{
double *A1, *B1, *C1, *C1_fl;
double *A2, *B2, *C2, *C2_fl;
int m, n, k, i, j, q;
double alpha, beta;
double sum;
int fail;
double t_start, t_end;
m = 2000, k = 200, n = 1000;
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A1 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B1 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C1 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C1_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
A2 = (double *)mkl_malloc (m*k*sizeof( double ), 64 );
B2 = (double *)mkl_malloc (k*n*sizeof( double ), 64 );
C2 = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
C2_fl = (double *)mkl_malloc (m*n*sizeof( double ), 64 );
if (A1 == NULL || B1 == NULL || C1 == NULL || C1_fl == NULL ||
A2 == NULL || B2 == NULL || C2 == NULL || C2_fl == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
return 1;
}
printf (" Intializing matrix data \n\n");
for (i = 0; i < (m*k); i++) {
A1[i] = A2[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B1[i] = B2[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C1[i] = C2[i] = 0.0;
C1_fl[i] = C2_fl[i] = 0.0;
}
printf (" \nComputing matrix product using Intel MKL cblas_dgemm_batch function \n");
#define GRP_COUNT 1 // 1 group
MKL_INT group_count = GRP_COUNT;
MKL_INT group_sizes[GRP_COUNT] = {2}; // 8 matrix multiplications
CBLAS_TRANSPOSE transa_array[GRP_COUNT] = {CblasNoTrans};
CBLAS_TRANSPOSE transb_array[GRP_COUNT] = {CblasNoTrans};
MKL_INT m_array[GRP_COUNT] = {m};
MKL_INT n_array[GRP_COUNT] = {n};
MKL_INT k_array[GRP_COUNT] = {k};
MKL_INT lda_array[GRP_COUNT] = {k};
MKL_INT ldb_array[GRP_COUNT] = {n};
MKL_INT ldc_array[GRP_COUNT] = {n};
double alpha_array[GRP_COUNT] = {alpha};
double beta_array[GRP_COUNT] = {beta};
// Number of matrix multiplications = 2
double **a_array, **b_array, **c_array;
a_array = (double **)mkl_calloc(2, sizeof( double* ), 64);
b_array = (double **)mkl_calloc(2, sizeof( double* ), 64);
c_array = (double **)mkl_calloc(2, sizeof( double* ), 64);
t_start = omp_get_wtime();
// Call cblas_dgemm_batch
#pragma omp target enter data \
map(to: A1[0:m*k], B1[0:k*n], C1[0:m*n]) \
map(to: A2[0:m*k], B2[0:k*n], C2[0:m*n])
#pragma omp target data use_device_ptr(A1, B1, C1, A2, B2, C2)
{
a_array[0] = A1, a_array[1] = A2;
b_array[0] = B1, b_array[1] = B2;
c_array[0] = C1, c_array[1] = C2;
}
#pragma omp target data \
map(to:a_array[0:2], b_array[0:2], c_array[0:2])
{
#pragma omp dispatch
cblas_dgemm_batch (
CblasRowMajor,
transa_array,
transb_array,
m_array,
n_array,
k_array,
alpha_array,
(const double **)a_array,
lda_array,
(const double **)b_array,
ldb_array,
beta_array,
c_array,
ldc_array,
group_count,
group_sizes);
} // end target data map
#pragma omp target exit data \
map(from: C1[0:m*n], C2[0:m*n])
t_end = omp_get_wtime();
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2[j+i*n]);
}
printf ("\n");
}
printf (" \nComputing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A1[k*i+q] * B1[n*q+j];
C1_fl[n*i+j] = sum;
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++)
sum += A2[k*i+q] * B2[n*q+j];
C2_fl[n*i+j] = sum;
}
}
printf ("\n Top left corner of matrix C1: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C1_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Top left corner of matrix C2: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C2_fl[j+i*n]);
}
printf ("\n");
}
printf ("\n Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (! compare(C1[i], C1_fl[i]) || ! compare(C2[i], C2_fl[i])) {
fail = 1;
break;
}
}
if (fail) {
printf (" **** FAIL **** \n");
}
else {
printf(" time = %lf seconds\n", t_end - t_start);
printf (" **** PASS **** \n");
}
mkl_free(A1);
mkl_free(B1);
mkl_free(C1);
mkl_free(C1_fl);
mkl_free(A2);
mkl_free(B2);
mkl_free(C2);
mkl_free(C2_fl);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/dgemm_target_variant_dispatch_c.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "mkl.h"
#include "mkl_omp_offload.h"
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define EPSILON 0.0001
int main()
{
double *A, *B, *C, *C_fl;
int64_t m, n, k;
double alpha, beta;
double sum;
int64_t i, j, q;
int fail;
printf ("\n This example computes real matrix C=alpha*A*B+beta*C using \n"
" Intel oneMKL function dgemm, where A, B, and C are matrices and \n"
" alpha and beta are double precision scalars\n\n");
m = 2000, k = 200, n = 1000;
printf (" Initializing data for matrix multiplication C=A*B for matrix \n"
" A(%li x %li) and matrix B(%li x %li)\n\n", m, k, k, n);
alpha = 1.0; beta = 0.0;
printf (" Allocating memory for matrices aligned on 64-byte boundary for better \n"
" performance \n\n");
A = (double *)mkl_malloc( m * k * sizeof( double ), 64 );
B = (double *)mkl_malloc( k * n * sizeof( double ), 64 );
C = (double *)mkl_malloc( m * n * sizeof( double ), 64 );
C_fl = (double *)mkl_malloc( m*n*sizeof( double ), 64 );
if (A == NULL || B == NULL || C == NULL || C_fl == NULL) {
printf( "\n ERROR: Cannot allocate memory for matrices. Exiting... \n\n");
return 1;
}
printf (" Intializing matrices \n\n");
for (i = 0; i < (m*k); i++) {
A[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C[i] = 0.0;
C_fl[i] = 0.0;
}
printf (" Computing matrix product using Intel oneMKL dgemm function via CBLAS interface \n\n");
#pragma omp target data map(to: A[0:m*k], B[0:k*n]) map(tofrom: C[0:m*n])
{
#pragma omp target variant dispatch use_device_ptr(A, B, C)
{
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A, k, B, n, beta, C, n);
}
}
printf ("\n Top left corner of matrix C: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C[j+i*n]);
}
printf ("\n");
}
printf (" Computing matrix product using for-loops \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
sum = 0.0;
for (q = 0; q < k; q++) {
sum += A[k*i+q] * B[n*q+j];
}
C_fl[n*i+j] = alpha * sum + beta * C_fl[n*i+j];
}
}
printf ("\n Top left corner of matrix C_fl: \n");
for (i=0; i<min(m,6); i++) {
for (j=0; j<min(n,6); j++) {
printf ("%12.5G", C_fl[j+i*n]);
}
printf ("\n");
}
printf (" Computations completed. Verifying... \n\n");
fail = 0;
for (i = 0; i < (m*n); i++) {
if (fabs(C[i] - C_fl[i]) > EPSILON) {
fail = 1;
break;
}
}
if (fail)
printf ("\n **** FAIL **** \n");
else
printf ("\n **** PASS **** \n");
printf ("\n Deallocating memory \n\n");
mkl_free(A);
mkl_free(B);
mkl_free(C);
return fail;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_dispatch/common/common.h | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#ifndef _OPENMP_COMMON_H_
#define _OPENMP_COMMON_H_
#define MAX(a,b) ((a) > (b)) ? (a) : (b)
static inline double rand_double_scalar() {
return ((double) rand () / (double) RAND_MAX) - 0.5;
}
static inline float rand_single_scalar() {
return ((float) rand () / (float) RAND_MAX) - 0.5f;
}
static inline MKL_Complex16 rand_double_complex_scalar() {
MKL_Complex16 res;
res.real = ((double) rand () / (double) RAND_MAX) - 0.5;
res.imag = ((double) rand () / (double) RAND_MAX) - 0.5;
return res;
}
static inline MKL_Complex8 rand_single_complex_scalar() {
MKL_Complex8 res;
res.real = ((float) rand () / (float) RAND_MAX) - 0.5f;
res.imag = ((float) rand () / (float) RAND_MAX) - 0.5f;
return res;
}
static inline void free_double_matrices(double **array, MKL_INT size) {
for (MKL_INT i = 0; i < size; i++) {
mkl_free(array[i]);
}
}
static inline void free_single_matrices(float **array, MKL_INT size) {
for (MKL_INT i = 0; i < size; i++) {
mkl_free(array[i]);
}
}
static inline void free_double_complex_matrices(MKL_Complex16 **array, MKL_INT size) {
for (MKL_INT i = 0; i < size; i++) {
mkl_free(array[i]);
}
}
static inline void free_single_complex_matrices(MKL_Complex8 **array, MKL_INT size) {
for (MKL_INT i = 0; i < size; i++) {
mkl_free(array[i]);
}
}
static inline MKL_INT rand_int(MKL_INT min, MKL_INT max) {
MKL_INT res = min + (rand() % (max - min + 1));
return res;
}
static inline void init_double_array(MKL_INT n, double *array, MKL_INT do_rand) {
MKL_INT i;
for (i = 0; i < n; i++) {
if (do_rand) {
array[i] = rand() / (double) RAND_MAX - .5;
}
else {
array[i] = (double) (i + 1);
}
}
}
static inline void init_single_array(MKL_INT n, float *array, MKL_INT do_rand) {
MKL_INT i;
for (i = 0; i < n; i++) {
if (do_rand) {
array[i] = (float) rand() / (float) RAND_MAX - .5f;
}
else {
array[i] = (float) (i + 1);
}
}
}
static inline void init_double_complex_array(MKL_INT n, MKL_Complex16 *array, MKL_INT do_rand) {
MKL_INT i;
for (i = 0; i < n; i++) {
if (do_rand) {
array[i].real = rand() / (double) RAND_MAX - .5;
array[i].imag = rand() / (double) RAND_MAX - .5;
}
else {
array[i].real = (double) (i + 1);
array[i].imag = (double) (i + 1);
}
}
}
static inline void init_single_complex_array(MKL_INT n, MKL_Complex8 *array, MKL_INT do_rand) {
MKL_INT i;
for (i = 0; i < n; i++) {
if (do_rand) {
array[i].real = (float) rand() / (float) RAND_MAX - .5f;
array[i].imag = (float) rand() / (float) RAND_MAX - .5f;
}
else {
array[i].real = (float) (i + 1);
array[i].imag = (float) (i + 1);
}
}
}
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/23_omp_work_group/test_omp_work_group.cpp | //==============================================================
// Copyright © 203 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <omp.h>
double * make_array(int n, double value) {
double* array = static_cast<double*>(malloc(n * sizeof(double)));
if (array == NULL)
return NULL;
for (int i = 0; i < n; i++) {
array[i] = value / (100.0 + i);
}
return array;
}
int main() {
// begin
int N = 2048;
double* A = make_array(N, 0.8);
double* B = make_array(N, 0.65);
double* C = make_array(N*N, 2.5);
if ((A == NULL) || (B == NULL) || (C == NULL))
exit(1);
int i, j;
double val = 0.0;
#pragma omp target map(to:A[0:N],B[0:N],C[0:N*N]) map(tofrom:val)
{
#pragma omp teams distribute parallel for collapse(2) reduction(+ : val)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
val += C[i * N + j] * A[i] * B[j];
}
}
}
printf("val = %f10.3\n", val);
free(A);
free(B);
free(C);
// end
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/24_device_ptr_addr_clauses/c_use_device_ptr_01.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define length 65536
int main(void)
{
int device_id = omp_get_default_device();
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double ar;
double br;
double cr;
double asum;
// Allocate arrays in device memory
A = (double *) omp_target_alloc_device(bytes, device_id);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using omp_target_alloc_device().\n");
exit(1);
}
B = (double *) omp_target_alloc_device(bytes, device_id);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using omp_target_alloc_device().\n");
exit(1);
}
C = (double *) omp_target_alloc_device(bytes, device_id);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using omp_target_alloc_device().\n");
exit(1);
}
#pragma omp target data use_device_ptr(A,B,C)
{
// Initialize the arrays
#pragma omp target teams distribute parallel for
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
#pragma omp target teams distribute parallel for
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
// Validate and output results
ar = 2.0;
br = 2.0;
cr = 0.0;
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
asum = 0.0;
#pragma omp target teams distribute parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
} // end target data
omp_target_free(A, device_id);
omp_target_free(B, device_id);
omp_target_free(C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates. Checksum = %lf\n", asum);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/24_device_ptr_addr_clauses/c_is_device_ptr_01.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define N 100
int main(void)
{
int *arr_host = NULL;
int *arr_device = NULL;
arr_host = (int *) malloc(N * sizeof(int));
arr_device = (int *) omp_target_alloc_device(N * sizeof(int),
omp_get_default_device());
#pragma omp target is_device_ptr(arr_device) map(from: arr_host[0:N])
{
for (int i = 0; i < N; ++i) {
arr_device[i] = i;
arr_host[i] = arr_device[i];
}
}
printf ("%d, %d, %d \n", arr_host[0], arr_host[N/2], arr_host[N-1]);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/00_omp_thread_num/test_omp_thread_num_1.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <omp.h>
void foo() {
#pragma omp target teams distribute parallel for
for (int i = 0; i < 100; ++i) {
printf ("team_num=%d num_threads=%d thread_id=%d \n",
omp_get_team_num(),
omp_get_num_threads(),
omp_get_thread_num());
}
}
int main(void) {
foo();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/00_omp_thread_num/test_omp_thread_num_2.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <omp.h>
void foo() {
#pragma omp target teams distribute parallel for simd simdlen(16)
for (int i = 0; i < 100; ++i) {
printf ("team_num=%d num_threads=%d thread_id=%d \n",
omp_get_team_num(),
omp_get_num_threads(),
omp_get_thread_num());
}
}
int main(void) {
foo();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/00_omp_thread_num/test_omp_thread_num_3.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <omp.h>
void foo() {
#pragma omp target teams distribute parallel for simd simdlen(64)
for (int i = 0; i < 100; ++i) {
printf ("team_num=%d num_threads=%d thread_id=%d \n",
omp_get_team_num(),
omp_get_num_threads(),
omp_get_thread_num());
}
}
int main(void) {
foo();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_target_map.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define iterations 100
#define length 64*1024*1024
int main(void)
{
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays on the host using plain malloc()
A = (double *) malloc(bytes);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using plain malloc().\n");
exit(1);
}
B = (double *) malloc(bytes);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using plain malloc().\n");
exit(1);
}
C = (double *) malloc(bytes);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using plain malloc().\n");
exit(1);
}
// Initialize the arrays
#pragma omp parallel for
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for \
map(to: A[0:length], B[0:length]) \
map(tofrom: C[0:length])
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
free(A);
free(B);
free(C);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_target_map2.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define iterations 100
#define length 64*1024*1024
int main(void)
{
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays on the host using plain malloc()
A = (double *) malloc(bytes);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using plain malloc().\n");
exit(1);
}
B = (double *) malloc(bytes);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using plain malloc().\n");
exit(1);
}
C = (double *) malloc(bytes);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using plain malloc().\n");
exit(1);
}
// Initialize the arrays
#pragma omp parallel for
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
nstream_time = omp_get_wtime();
#pragma omp target data map(to: A[0:length], B[0:length]) \
map(tofrom: C[0:length])
{
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
free(A);
free(B);
free(C);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_omp_target_memcpy.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define iterations 100
#define length 64*1024*1024
int main(void)
{
int device_id = omp_get_default_device();
int host_id = omp_get_initial_device();
size_t bytes = length*sizeof(double);
double * __restrict h_A;
double * __restrict h_B;
double * __restrict h_C;
double * __restrict d_A;
double * __restrict d_B;
double * __restrict d_C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays h_A, h_B, and h_C on the host using plain malloc()
h_A = (double *) malloc(bytes);
if (h_A == NULL){
printf(" ERROR: Cannot allocate space for h_A using plain malloc().\n");
exit(1);
}
h_B = (double *) malloc(bytes);
if (h_B == NULL){
printf(" ERROR: Cannot allocate space for h_B using plain malloc().\n");
exit(1);
}
h_C = (double *) malloc(bytes);
if (h_C == NULL){
printf(" ERROR: Cannot allocate space for h_C using plain malloc().\n");
exit(1);
}
// Allocate arrays d_A, d_B, and d_C on the device using omp_target_alloc()
d_A = (double *) omp_target_alloc(bytes, device_id);
if (d_A == NULL){
printf(" ERROR: Cannot allocate space for d_A using omp_target_alloc().\n");
exit(1);
}
d_B = (double *) omp_target_alloc(bytes, device_id);
if (d_B == NULL){
printf(" ERROR: Cannot allocate space for d_B using omp_target_alloc().\n");
exit(1);
}
d_C = (double *) omp_target_alloc(bytes, device_id);
if (d_C == NULL){
printf(" ERROR: Cannot allocate space for d_C using omp_target_alloc().\n");
exit(1);
}
// Initialize the arrays on the host
#pragma omp parallel for
for (size_t i=0; i<length; i++) {
h_A[i] = 2.0;
h_B[i] = 2.0;
h_C[i] = 0.0;
}
// Call omp_target_memcpy() to copy values from host to device
int rc = 0;
rc = omp_target_memcpy(d_A, h_A, bytes, 0, 0, device_id, host_id);
if (rc) {
printf("ERROR: omp_target_memcpy(A) returned %d\n", rc);
exit(1);
}
rc = omp_target_memcpy(d_B, h_B, bytes, 0, 0, device_id, host_id);
if (rc) {
printf("ERROR: omp_target_memcpy(B) returned %d\n", rc);
exit(1);
}
rc = omp_target_memcpy(d_C, h_C, bytes, 0, 0, device_id, host_id);
if (rc) {
printf("ERROR: omp_target_memcpy(C) returned %d\n", rc);
exit(1);
}
// Perform the computation
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for \
is_device_ptr(d_A,d_B,d_C)
for (size_t i=0; i<length; i++) {
d_C[i] += d_A[i] + scalar * d_B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Call omp_target_memcpy() to copy values from device to host
rc = omp_target_memcpy(h_C, d_C, bytes, 0, 0, host_id, device_id);
if (rc) {
printf("ERROR: omp_target_memcpy(A) returned %d\n", rc);
exit(1);
}
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(h_C[i]);
}
free(h_A);
free(h_B);
free(h_C);
omp_target_free(d_A, device_id);
omp_target_free(d_B, device_id);
omp_target_free(d_C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_omp_target_alloc_shared.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#pragma omp requires unified_address
#define iterations 100
#define length 64*1024*1024
int main(void)
{
int device_id = omp_get_default_device();
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays in shared memory
A = (double *) omp_target_alloc_shared(bytes, device_id);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using omp_target_alloc_shared().\n");
exit(1);
}
B = (double *) omp_target_alloc_shared(bytes, device_id);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using omp_target_alloc_shared().\n");
exit(1);
}
C = (double *) omp_target_alloc_shared(bytes, device_id);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using omp_target_alloc_shared().\n");
exit(1);
}
// Initialize the arrays
#pragma omp parallel for
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
omp_target_free(A, device_id);
omp_target_free(B, device_id);
omp_target_free(C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_omp_target_alloc_host.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#pragma omp requires unified_address
#define iterations 100
#define length 64*1024*1024
int main(void)
{
int device_id = omp_get_default_device();
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays in host memory
A = (double *) omp_target_alloc_host(bytes, device_id);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using omp_target_alloc_host().\n");
exit(1);
}
B = (double *) omp_target_alloc_host(bytes, device_id);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using omp_target_alloc_host().\n");
exit(1);
}
C = (double *) omp_target_alloc_host(bytes, device_id);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using omp_target_alloc_host().\n");
exit(1);
}
// Initialize the arrays
#pragma omp parallel for
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
omp_target_free(A, device_id);
omp_target_free(B, device_id);
omp_target_free(C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_omp_target_alloc_device.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define iterations 100
#define length 64*1024*1024
int main(void)
{
int device_id = omp_get_default_device();
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays in device memory
A = (double *) omp_target_alloc_device(bytes, device_id);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using omp_target_alloc_device().\n");
exit(1);
}
B = (double *) omp_target_alloc_device(bytes, device_id);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using omp_target_alloc_device().\n");
exit(1);
}
C = (double *) omp_target_alloc_device(bytes, device_id);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using omp_target_alloc_device().\n");
exit(1);
}
// Initialize the arrays
#pragma omp target teams distribute parallel for \
is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for \
is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp target teams distribute parallel for reduction(+:asum) \
map(tofrom: asum) is_device_ptr(C)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
omp_target_free(A, device_id);
omp_target_free(B, device_id);
omp_target_free(C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/21_omp_target_alloc/test_omp_target_alloc.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#define iterations 100
#define length 64*1024*1024
int main(void)
{
int device_id = omp_get_default_device();
size_t bytes = length*sizeof(double);
double * __restrict A;
double * __restrict B;
double * __restrict C;
double scalar = 3.0;
double nstream_time = 0.0;
// Allocate arrays in device memory
A = (double *) omp_target_alloc(bytes, device_id);
if (A == NULL){
printf(" ERROR: Cannot allocate space for A using omp_target_alloc().\n");
exit(1);
}
B = (double *) omp_target_alloc(bytes, device_id);
if (B == NULL){
printf(" ERROR: Cannot allocate space for B using omp_target_alloc().\n");
exit(1);
}
C = (double *) omp_target_alloc(bytes, device_id);
if (C == NULL){
printf(" ERROR: Cannot allocate space for C using omp_target_alloc().\n");
exit(1);
}
// Initialize the arrays
#pragma omp target teams distribute parallel for \
is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] = 2.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Perform the computation 'iterations' number of times
nstream_time = omp_get_wtime();
for (int iter = 0; iter<iterations; iter++) {
#pragma omp target teams distribute parallel for \
is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
C[i] += A[i] + scalar * B[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
// Validate and output results
double ar = 2.0;
double br = 2.0;
double cr = 0.0;
for (int iter = 0; iter<iterations; iter++) {
for (int i=0; i<length; i++) {
cr += ar + scalar * br;
}
}
double asum = 0.0;
#pragma omp target teams distribute parallel for reduction(+:asum) \
map(tofrom: asum) is_device_ptr(C)
for (size_t i=0; i<length; i++) {
asum += fabs(C[i]);
}
omp_target_free(A, device_id);
omp_target_free(B, device_id);
omp_target_free(C, device_id);
double epsilon=1.e-8;
if (fabs(cr - asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", cr, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
printf("Checksum = %lf; Avg time (s): %lf\n", asum, avgtime);
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/04_target_nowait/test_target_nowait.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
/*
* This test is taken from OpenMP API 5.0.1 Examples (June 2020)
* https://www.openmp.org/wp-content/uploads/openmp-examples-5-0-1.pdf
* (4.13.2 nowait Clause on target Construct)
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define N 100000 // N must be even
void init(int n, float *v1, float *v2) {
int i;
for(i=0; i<n; i++){
v1[i] = i * 0.25;
v2[i] = i - 1.25;
}
}
int main() {
int i, n=N;
float v1[N],v2[N],vxv[N];
double start,end; // timers
init(n, v1,v2);
/* Dummy parallel and target (nowait) regions, so as not to measure
startup time. */
#pragma omp parallel
{
#pragma omp master
#pragma omp target nowait
{;}
}
start=omp_get_wtime();
#pragma omp parallel
{
#pragma omp master
#pragma omp target teams distribute parallel for nowait \
map(to: v1[0:n/2]) \
map(to: v2[0:n/2]) \
map(from: vxv[0:n/2])
for(i=0; i<n/2; i++){
vxv[i] = v1[i]*v2[i];
}
#pragma omp for
for(i=n/2; i<n; i++) {
vxv[i] = v1[i]*v2[i];
}
/* Implicit barrier at end of worksharing for. Target region is
guaranteed to be completed by this point. */
}
end=omp_get_wtime();
printf("vxv[1]=%f, vxv[n-1]=%f, time=%lf\n", vxv[1], vxv[n-1], end-start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/04_target_nowait/test_target_no_nowait.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
/*
* This test is taken from OpenMP API 5.0.1 Examples (June 2020)
* https://www.openmp.org/wp-content/uploads/openmp-examples-5-0-1.pdf
* (4.13.2 nowait Clause on target Construct)
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define N 100000 // N must be even
void init(int n, float *v1, float *v2) {
int i;
for(i=0; i<n; i++){
v1[i] = i * 0.25;
v2[i] = i - 1.25;
}
}
int main() {
int i, n=N;
float v1[N],v2[N],vxv[N];
double start,end; // timers
init(n, v1, v2);
/* Dummy parallel and target regions, so as not to measure startup
time. */
#pragma omp parallel
{
#pragma omp master
#pragma omp target
{;}
}
start=omp_get_wtime();
#pragma omp parallel
{
#pragma omp master
#pragma omp target teams distribute parallel for \
map(to: v1[0:n/2]) \
map(to: v2[0:n/2]) \
map(from: vxv[0:n/2])
for(i=0; i<n/2; i++){
vxv[i] = v1[i]*v2[i];
}
/* Master thread will wait for target region to be completed
before proceeding beyond this point. */
#pragma omp for
for(i=n/2; i<n; i++) {
vxv[i] = v1[i]*v2[i];
}
/* Implicit barrier at end of worksharing for. */
}
end=omp_get_wtime();
printf("vxv[0]=%f, vxv[n-1]=%f, time=%lf\n", vxv[0], vxv[n-1], end-start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/09_invariant_computations/test_loop_invariant.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l, n;
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (i = 0; i < P * P; i++)
dx[i] = scaled_rand();
for (i = 0; i < SIZE; i++)
w[i] = 0;
start = omp_get_wtime();
/* offload kernel */
#pragma omp target teams distribute parallel for \
private(b,i,j,k,l,n) \
map(to: u[0:SIZE], dx[0:P * P]) \
map(tofrom: w[0:SIZE])
for (n = 0; n < SIZE; n++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
k = 0;
j = k / P;
i = (j * P + k) / (P * P);
b = 1;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(n - (n / P) * P, n / (P * P), n / P, n - (n / P) * P)] =
ur * us * ut;
}
end = omp_get_wtime();
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/09_invariant_computations/test_no_loop_invariant.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l, n;
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (i = 0; i < P * P; i++)
dx[i] = scaled_rand();
for (i = 0; i < SIZE; i++)
w[i] = 0;
double ur = 0.;
double us = 0.;
double ut = 0.;
k = 0;
j = k / P;
i = (j * P + k) / (P * P);
b = 1;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
start = omp_get_wtime();
/* offload kernel */
#pragma omp target teams distribute parallel for \
private(n) \
map(tofrom: w[0:SIZE])
for (n = 0; n < SIZE; n++) {
w[IDX4(n - (n / P) * P, n / (P * P), n / P, n - (n / P) * P)] =
ur * us * ut;
}
end = omp_get_wtime();
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/07_loop_bounds/test_loop_bounds_fp.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
int upper;
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
upper = (int)dx[0] + SIZE;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload kernel */
#pragma omp target teams distribute parallel for private(b, i, j, k, l) \
firstprivate(upper)
for (int n = 0; n < upper; n++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
k = n - (n / P) * P;
j = (n - k) / P;
i = (n - (j * P + k)) / (P * P);
b = n / (P * P * P);
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
end = omp_get_wtime();
/* map data from device */
#pragma omp target exit data map(from: w[0:SIZE])
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/07_loop_bounds/test_loop_bounds_nofp_nomap.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
int upper;
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
upper = (int)dx[0] + SIZE;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload kernel */
#pragma omp target teams distribute parallel for private(b, i, j, k, l)
for (int n = 0; n < upper; n++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
k = n - (n / P) * P;
j = (n - k) / P;
i = (n - (j * P + k)) / (P * P);
b = n / (P * P * P);
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
end = omp_get_wtime();
/* map data from device */
#pragma omp target exit data map(from: w[0:SIZE])
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/07_loop_bounds/test_loop_bounds_map.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
int upper;
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
upper = (int)dx[0] + SIZE;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload kernel */
#pragma omp target teams distribute parallel for private(b, i, j, k, l) \
map(to: upper)
for (int n = 0; n < upper; n++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
k = n - (n / P) * P;
j = (n - k) / P;
i = (n - (j * P + k)) / (P * P);
b = n / (P * P * P);
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
end = omp_get_wtime();
/* map data from device */
#pragma omp target exit data map(from: w[0:SIZE])
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/26_omp_prefetch/c_simd/nbody_c_simd.cpp | #include <chrono>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#define CACHE_CLEAN_SIZE 100000000
#define ITERATIONS 100
#define ARRAYLEN1 4096
#define ARRAYLEN2 32768
#define VECLEN 16
// snippet-begin
#define WORKGROUP_SIZE 1024
#define PREFETCH_HINT 4 // 4 = prefetch to L1 and L3; 2 = prefetch to L3
#define TILE_SIZE 64
void nbody_1d_gpu(float *c, float *a, float *b, int n1, int n2) {
#pragma omp target teams distribute parallel for thread_limit(WORKGROUP_SIZE / \
VECLEN)
for (int i = 0; i < n1; i += VECLEN) {
const float ma0 = 0.269327f, ma1 = -0.0750978f, ma2 = 0.0114808f;
const float ma3 = -0.00109313f, ma4 = 0.0000605491f, ma5 = -0.00000147177f;
const float eps = 0.01f;
float dx[VECLEN];
float aa[VECLEN], bb[TILE_SIZE];
#pragma omp simd simdlen(VECLEN)
#pragma unroll(0)
for (int v = 0; v < VECLEN; ++v) {
dx[v] = 0.0f;
aa[v] = a[i + v];
}
for (int j = 0; j < n2; j += TILE_SIZE) {
// load tile from b
for (int u = 0; u < TILE_SIZE; u += VECLEN) {
#pragma omp simd simdlen(VECLEN)
#pragma unroll(0)
for (int v = 0; v < VECLEN; ++v)
bb[u + v] = b[j + u + v];
#ifdef PREFETCH
int next_tile = j + TILE_SIZE + u;
#pragma ompx prefetch data(PREFETCH_HINT : b[next_tile]) if (next_tile < n2)
#endif
}
// compute current tile
#pragma omp simd simdlen(VECLEN)
#pragma unroll(0)
for (int v = 0; v < VECLEN; ++v) {
#pragma unroll(TILE_SIZE)
for (int u = 0; u < TILE_SIZE; ++u) {
float delta = bb[u] - aa[v];
float r2 = delta * delta;
float s0 = r2 + eps;
float s1 = 1.0f / sqrtf(s0);
float f =
(s1 * s1 * s1) -
(ma0 + r2 * (ma1 + r2 * (ma2 + r2 * (ma3 + r2 * (ma4 + ma5)))));
dx[v] += f * delta;
}
}
}
#pragma omp simd simdlen(VECLEN)
#pragma unroll(0)
for (int v = 0; v < VECLEN; ++v) {
c[i + v] = dx[v] * 0.23f;
}
}
}
// snippet-end
void nbody_1d_cpu(float *c, float *a, float *b, int n1, int n2) {
for (int i = 0; i < n1; ++i) {
const float ma0 = 0.269327f, ma1 = -0.0750978f, ma2 = 0.0114808f;
const float ma3 = -0.00109313f, ma4 = 0.0000605491f, ma5 = -0.00000147177f;
const float eps = 0.01f;
float dx = 0.0f;
for (int j = 0; j < n2; ++j) {
float delta = b[j] - a[i];
float r2 = delta * delta;
float s0 = r2 + eps;
float s1 = 1.0f / sqrtf(s0);
float f = (s1 * s1 * s1) -
(ma0 + r2 * (ma1 + r2 * (ma2 + r2 * (ma3 + r2 * (ma4 + ma5)))));
dx += f * delta;
}
c[i] = dx * 0.23f;
}
}
void clean_cache_gpu(double *d, int n) {
#pragma omp target teams distribute parallel for thread_limit(1024)
for (int i = 0; i < n; ++i)
d[i] = i;
return;
}
int main() {
float *a, *b, *c;
double *d;
a = new float[ARRAYLEN1];
b = new float[ARRAYLEN2];
c = new float[ARRAYLEN1];
d = new double[CACHE_CLEAN_SIZE];
// intialize
float dx = 1.0f / (float)ARRAYLEN2;
b[0] = 0.0f;
for (int i = 1; i < ARRAYLEN2; ++i) {
b[i] = b[i - 1] + dx;
}
for (int i = 0; i < ARRAYLEN1; ++i) {
a[i] = b[i];
c[i] = 0.0f;
}
#pragma omp target
{}
#pragma omp target enter data map(alloc \
: a [0:ARRAYLEN1], b [0:ARRAYLEN2], \
c [0:ARRAYLEN1])
#pragma omp target enter data map(alloc : d [0:CACHE_CLEAN_SIZE])
#pragma omp target update to(a [0:ARRAYLEN1], b [0:ARRAYLEN2])
double t1, t2, elapsed_s = 0.0;
for (int i = 0; i < ITERATIONS; ++i) {
clean_cache_gpu(d, CACHE_CLEAN_SIZE);
t1 = omp_get_wtime();
nbody_1d_gpu(c, a, b, ARRAYLEN1, ARRAYLEN2);
t2 = omp_get_wtime();
elapsed_s += (t2 - t1);
}
#pragma omp target update from(c [0:ARRAYLEN1])
float sum = 0.0f;
for (int i = 0; i < ARRAYLEN1; ++i)
sum += c[i];
printf("Obtained output = %8.3f\n", sum);
for (int i = 0; i < ARRAYLEN1; ++i)
c[i] = 0.0f;
nbody_1d_cpu(c, a, b, ARRAYLEN1, ARRAYLEN2);
sum = 0.0f;
for (int i = 0; i < ARRAYLEN1; ++i)
sum += c[i];
printf("Expected output = %8.3f\n", sum);
printf("\nTotal time = %8.1f milliseconds\n", (elapsed_s * 1000));
#pragma omp target exit data map(delete \
: a [0:ARRAYLEN1], b [0:ARRAYLEN2], \
c [0:ARRAYLEN1])
#pragma omp target exit data map(delete : d [0:CACHE_CLEAN_SIZE])
delete[] a;
delete[] b;
delete[] c;
delete[] d;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/26_omp_prefetch/c/nbody_c.cpp | #include <math.h>
#include <omp.h>
#include <stdio.h>
#define CACHE_CLEAN_SIZE 100000000
#define ITERATIONS 100
#define ARRAYLEN1 4096
#define ARRAYLEN2 32768
// snippet-begin
#define WORKGROUP_SIZE 1024
#define PREFETCH_HINT 4 // 4 = prefetch to L1 and L3; 2 = prefetch to L3
#define TILE_SIZE 64
void nbody_1d_gpu(float *c, float *a, float *b, int n1, int n2) {
#pragma omp target teams distribute parallel for thread_limit(WORKGROUP_SIZE)
for (int i = 0; i < n1; i++) {
const float ma0 = 0.269327f, ma1 = -0.0750978f, ma2 = 0.0114808f;
const float ma3 = -0.00109313f, ma4 = 0.0000605491f, ma5 = -0.00000147177f;
const float eps = 0.01f;
float dx = 0.0;
float bb[TILE_SIZE];
for (int j = 0; j < n2; j += TILE_SIZE) {
// load tile from b
for (int u = 0; u < TILE_SIZE; ++u) {
bb[u] = b[j + u];
#ifdef PREFETCH
int next_tile = j + TILE_SIZE + u;
if ((next_tile % 16) == 0) {
#pragma ompx prefetch data(PREFETCH_HINT : b[next_tile]) if (next_tile < n2)
}
#endif
}
#pragma unroll(TILE_SIZE)
for (int u = 0; u < TILE_SIZE; ++u) {
float delta = bb[u] - a[i];
float r2 = delta * delta;
float s0 = r2 + eps;
float s1 = 1.0f / sqrtf(s0);
float f =
(s1 * s1 * s1) -
(ma0 + r2 * (ma1 + r2 * (ma2 + r2 * (ma3 + r2 * (ma4 + ma5)))));
dx += f * delta;
}
}
c[i] = dx * 0.23f;
}
}
// snippet-end
void nbody_1d_cpu(float *c, float *a, float *b, int n1, int n2) {
for (int i = 0; i < n1; ++i) {
const float ma0 = 0.269327f, ma1 = -0.0750978f, ma2 = 0.0114808f;
const float ma3 = -0.00109313f, ma4 = 0.0000605491f, ma5 = -0.00000147177f;
const float eps = 0.01f;
float dx = 0.0f;
for (int j = 0; j < n2; ++j) {
float delta = b[j] - a[i];
float r2 = delta * delta;
float s0 = r2 + eps;
float s1 = 1.0f / sqrtf(s0);
float f = (s1 * s1 * s1) -
(ma0 + r2 * (ma1 + r2 * (ma2 + r2 * (ma3 + r2 * (ma4 + ma5)))));
dx += f * delta;
}
c[i] = dx * 0.23f;
}
}
void clean_cache_gpu(double *d, int n) {
#pragma omp target teams distribute parallel for thread_limit(1024)
for (int i = 0; i < n; ++i)
d[i] = i;
return;
}
int main() {
float *a, *b, *c;
double *d;
a = new float[ARRAYLEN1];
b = new float[ARRAYLEN2];
c = new float[ARRAYLEN1];
d = new double[CACHE_CLEAN_SIZE];
// intialize
float dx = 1.0f / (float)ARRAYLEN2;
b[0] = 0.0f;
for (int i = 1; i < ARRAYLEN2; ++i) {
b[i] = b[i - 1] + dx;
}
for (int i = 0; i < ARRAYLEN1; ++i) {
a[i] = b[i];
c[i] = 0.0f;
}
#pragma omp target
{}
#pragma omp target enter data map(alloc \
: a [0:ARRAYLEN1], b [0:ARRAYLEN2], \
c [0:ARRAYLEN1])
#pragma omp target enter data map(alloc : d [0:CACHE_CLEAN_SIZE])
#pragma omp target update to(a [0:ARRAYLEN1], b [0:ARRAYLEN2])
double t1, t2, elapsed_s = 0.0;
for (int i = 0; i < ITERATIONS; ++i) {
clean_cache_gpu(d, CACHE_CLEAN_SIZE);
t1 = omp_get_wtime();
nbody_1d_gpu(c, a, b, ARRAYLEN1, ARRAYLEN2);
t2 = omp_get_wtime();
elapsed_s += (t2 - t1);
}
#pragma omp target update from(c [0:ARRAYLEN1])
double sum = 0.0f;
for (int i = 0; i < ARRAYLEN1; ++i)
sum += c[i];
printf("Obtained output = %8.3f\n", sum);
for (int i = 0; i < ARRAYLEN1; ++i)
c[i] = 0.0f;
nbody_1d_cpu(c, a, b, ARRAYLEN1, ARRAYLEN2);
sum = 0.0f;
for (int i = 0; i < ARRAYLEN1; ++i)
sum += c[i];
printf("Expected output = %8.3f\n", sum);
printf("\nTotal time = %8.1f milliseconds\n", (elapsed_s * 1000));
#pragma omp target exit data map(delete \
: a [0:ARRAYLEN1], b [0:ARRAYLEN2], \
c [0:ARRAYLEN1])
#pragma omp target exit data map(delete : d [0:CACHE_CLEAN_SIZE])
delete[] a;
delete[] b;
delete[] c;
delete[] d;
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/05_scalars_fp/test_scalars_nofp_nomap.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double s1, s2, s3; /* scalars */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* initialize scalars */
s1 = u[SIZE / 2];
s2 = scaled_rand();
s3 = 0.145;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/05_scalars_fp/test_scalars_fp.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double s1, s2, s3; /* scalars */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* initialize scalars */
s1 = u[SIZE / 2];
s2 = scaled_rand();
s3 = 0.145;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
firstprivate(s1, s2, s3) private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/05_scalars_fp/test_scalars_map.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double s1, s2, s3; /* scalars */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* initialize scalars */
s1 = u[SIZE / 2];
s2 = scaled_rand();
s3 = 0.145;
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
map(to: s1, s2, s3) private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/10_map/test_map_to_or_from.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
#pragma omp target teams distribute parallel for \
private(b, i, j, k, l) \
map(to: u[0:SIZE], dx[0:P * P]) \
map(from: w [0:SIZE])
for (int n = 0; n < SIZE; n++) {
k = n - (n / P) * P;
j = (n - k) / P;
i = (n - (j * P + k)) / (P * P);
b = n / (P * P * P);
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
end = omp_get_wtime();
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/10_map/test_map_tofrom.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
#pragma omp target teams distribute parallel for \
private(b, i, j, k, l) \
map(tofrom: u[0:SIZE], dx[0:P * P]) \
map(tofrom: w [0:SIZE])
for (int n = 0; n < SIZE; n++) {
k = n - (n / P) * P;
j = (n - k) / P;
i = (n - (j * P + k)) / (P * P);
b = n / (P * P * P);
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
end = omp_get_wtime();
printf("offload: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/06_scalars_private/test_scalars_private.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double s1, s2, s3; /* scalars */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P*P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(s1, s2, s3) private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
s1 = dx[IDX2(b, k)];
s2 = u[IDX4(b, 0, 0, k)] + BLOCKS;
s3 = 0.145;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/06_scalars_private/test_scalars.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double s1, s2, s3; /* scalars */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P*P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
s1 = dx[IDX2(b, k)];
s2 = u[IDX4(b, 0, 0, k)] + BLOCKS;
s3 = 0.145;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/06_scalars_private/test_scalars_private_2.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P*P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
double s1 = dx[IDX2(b, k)];
double s2 = u[IDX4(b, 0, 0, k)] + BLOCKS;
double s3 = 0.145;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)] + s1;
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)] - s2;
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)] * s3;
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/11_device_alloc/test_declare_target.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
#pragma omp declare target
double ur[SIZE], us[SIZE], ut[SIZE]; /* work arrays */
#pragma omp end declare target
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* offload the kernel */
#pragma omp target teams distribute parallel for simd simdlen(16) collapse(4) \
map(to:u[0:SIZE],dx[0:P*P]) \
map(from:w[0:SIZE]) \
private(b,i,j,k,l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
w[IDX4(b, i, j, k)] = 0.;
ur[IDX4(b, i, j, k)] = 0.;
us[IDX4(b, i, j, k)] = 0.;
ut[IDX4(b, i, j, k)] = 0.;
for (l = 0; l < P; l++) {
ur[IDX4(b, i, j, k)] += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us[IDX4(b, i, j, k)] += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut[IDX4(b, i, j, k)] += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur[IDX4(b, i, j, k)] * us[IDX4(b, i, j, k)] *
ut[IDX4(b, i, j, k)];
}
}
}
}
end = omp_get_wtime();
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/11_device_alloc/test_map_to.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double ur[SIZE], us[SIZE], ut[SIZE]; /* work arrays */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* offload the kernel */
#pragma omp target teams distribute parallel for simd simdlen(16) collapse(4) \
map(to:u[0:SIZE],dx[0:P*P]) \
map(from:w[0:SIZE]) \
map(to:ur[0:SIZE]) \
map(to:us[0:SIZE]) \
map(to:ut[0:SIZE]) \
private(b,i,j,k,l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
w[IDX4(b, i, j, k)] = 0.;
ur[IDX4(b, i, j, k)] = 0.;
us[IDX4(b, i, j, k)] = 0.;
ut[IDX4(b, i, j, k)] = 0.;
for (l = 0; l < P; l++) {
ur[IDX4(b, i, j, k)] += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us[IDX4(b, i, j, k)] += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut[IDX4(b, i, j, k)] += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur[IDX4(b, i, j, k)] * us[IDX4(b, i, j, k)] *
ut[IDX4(b, i, j, k)];
}
}
}
}
end = omp_get_wtime();
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/11_device_alloc/test_map_alloc.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double ur[SIZE], us[SIZE], ut[SIZE]; /* work arrays */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* offload the kernel */
#pragma omp target teams distribute parallel for simd simdlen(16) collapse(4) \
map(to:u[0:SIZE],dx[0:P*P]) \
map(from:w[0:SIZE]) \
map(alloc:ur[0:SIZE]) \
map(alloc:us[0:SIZE]) \
map(alloc:ut[0:SIZE]) \
private(b,i,j,k,l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
w[IDX4(b, i, j, k)] = 0.;
ur[IDX4(b, i, j, k)] = 0.;
us[IDX4(b, i, j, k)] = 0.;
ut[IDX4(b, i, j, k)] = 0.;
for (l = 0; l < P; l++) {
ur[IDX4(b, i, j, k)] += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us[IDX4(b, i, j, k)] += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut[IDX4(b, i, j, k)] += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur[IDX4(b, i, j, k)] * us[IDX4(b, i, j, k)] *
ut[IDX4(b, i, j, k)];
}
}
}
}
end = omp_get_wtime();
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/11_device_alloc/test_target_alloc.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
double *ur, *us, *ut; /* pointers to work arrays allocated on device */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* allocate work arrays (ur, us, and ut) on device */
ur = (double *)omp_target_alloc(sizeof(double) * SIZE, 0);
if (ur == NULL) {
printf(" ERROR: Cannot allocate memory on device.\n");
exit(1);
}
us = (double *)omp_target_alloc(sizeof(double) * SIZE, 0);
if (us == NULL) {
printf(" ERROR: Cannot allocate memory on device.\n");
exit(1);
}
ut = (double *)omp_target_alloc(sizeof(double) * SIZE, 0);
if (ut == NULL) {
printf(" ERROR: Cannot allocate memory on device.\n");
exit(1);
}
/* offload the kernel */
#pragma omp target teams distribute parallel for simd simdlen(16) collapse(4) \
map(to:u[0:SIZE]) \
map(from:w[0:SIZE]) \
is_device_ptr(ur, us, ut) \
private(b,i,j,k,l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
w[IDX4(b, i, j, k)] = 0.;
ur[IDX4(b, i, j, k)] = 0.;
us[IDX4(b, i, j, k)] = 0.;
ut[IDX4(b, i, j, k)] = 0.;
for (l = 0; l < P; l++) {
ur[IDX4(b, i, j, k)] += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us[IDX4(b, i, j, k)] += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut[IDX4(b, i, j, k)] += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur[IDX4(b, i, j, k)] * us[IDX4(b, i, j, k)] *
ut[IDX4(b, i, j, k)];
}
}
}
}
end = omp_get_wtime();
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
omp_target_free(ur, 0);
omp_target_free(us, 0);
omp_target_free(ut, 0);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/03_target_enter_exit_data/test_no_target_enter_exit_data.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* offload kernel #1 */
#pragma omp target teams distribute parallel for collapse(4) \
map(to: u[0:SIZE], dx[0:P * P]) map(from: w[0:SIZE]) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
/* offload kernel #2 */
#pragma omp target teams distribute parallel for collapse(4) \
map(to: u[0:SIZE], dx[0:P * P]) map(tofrom: w[0:SIZE]) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = b + i + j - k;
double us = b + i + j - k;
double ut = b + i + j - k;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] += ur * us * ut;
}
}
}
}
end = omp_get_wtime();
/* print result */
printf("target region: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/03_target_enter_exit_data/test_target_enter_exit_data.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
start = omp_get_wtime();
/* map data to device. alloc for w avoids map(tofrom: w[0:SIZE])
on target by default. */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P]) \
map(alloc: w[0:SIZE])
/* offload kernel #1 */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
/* offload kernel #2 */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = b + i + j - k;
double us = b + i + j - k;
double ut = b + i + j - k;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] += ur * us * ut;
}
}
}
}
#pragma omp target exit data map(from: w[0:SIZE])
end = omp_get_wtime();
/* print result */
printf("target region: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/08_num_teams/test_num_teams.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l) num_teams(16)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/08_num_teams/test_no_num_teams.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/01_collapse/test_collapse_3levels.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(3) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/01_collapse/test_no_collapse.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with no collapse clause */
#pragma omp target teams distribute parallel for \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("no-collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/01_collapse/test_collapse_2levels.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(2) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/01_collapse/test_collapse_4levels.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel with collapse clause */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("collapse-clause: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/22_mkl_pad/dgemm_pad_c_01.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#include "mkl.h"
#include "mkl_omp_offload.h"
#include <float.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#if defined(PRECISION)
#if PRECISION == 1
#define FLOAT double
#else
#define FLOAT float
#endif
#else
#define PRECISION 1
#define FLOAT double
#endif
#define index(i, j, ld) (((j) * (ld)) + (i))
#define RAND() ((FLOAT)rand() / (FLOAT)RAND_MAX * 2.0 - 1.0)
#define MALLOC(x) mkl_malloc((x), 64);
#define FREE mkl_free
#define MALLOC_CHECK(p) \
if (p == NULL) { \
fprintf(stderr, "%s:%d: memory allocation error\n", __FILE__, __LINE__); \
return EXIT_FAILURE; \
}
#ifndef max
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef min
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
void printMat(FLOAT *P, int m, int n, int ld) {
int i, j;
for (i = 0; i < m; i++) {
printf("\n");
for (j = 0; j < n; j++)
printf("%f ", P[index(i, j, ld)]);
}
printf("\n");
}
void gemm_naive(int m, int n, int k, FLOAT alpha, const FLOAT *A, int lda,
const FLOAT *B, int ldb, FLOAT beta, FLOAT *C, int ldc) {
int i, j, kk;
FLOAT temp;
for (j = 0; j < n; j++) {
for (i = 0; i < m; i++) {
temp = 0.0;
for (kk = 0; kk < k; kk++) {
temp += alpha * A[index(i, kk, lda)] * B[index(kk, j, ldb)];
}
C[index(i, j, ldc)] = temp + beta * C[index(i, j, ldc)];
}
}
}
FLOAT infinity_norm_error(int m, int n, int ld, const FLOAT *ans,
const FLOAT *ref) {
int i, j, ind;
FLOAT norm = 0.0, temp;
for (i = 0; i < m; i++) {
temp = 0.0;
for (j = 0; j < n; j++) {
ind = index(i, j, ld);
temp += fabs(ref[ind] - ans[ind]);
}
norm = max(norm, temp);
}
return norm;
}
double mysecond() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp, &tzp);
if (i != 0) {
fprintf(stderr, "%s:%d: timing error %d\n", __FILE__, __LINE__, i);
return EXIT_FAILURE;
}
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
#if defined(USE_MKL)
int dnum = 0;
#endif
#if PRECISION == 1
#define LD_ALIGN 256
#define LD_BIAS 8
#else
#define LD_ALIGN 512
#define LD_BIAS 16
#endif
#define HPL_PTR(ptr_, al_) ((((size_t)(ptr_) + (al_)-1) / (al_)) * (al_))
#if defined(PAD_LD)
static inline int getld(int x) {
int ld;
ld = HPL_PTR(x, LD_ALIGN); // Rule 1
if (ld - LD_BIAS >= x)
ld -= LD_BIAS;
else
ld += LD_BIAS; // Rule 2
return ld;
}
#else
static inline int getld(int x) { return x; }
#endif
int main(int argc, char **argv) {
int i, j;
if ((argc < 4) || (argc > 4 && argc < 8)) {
printf("Performs a DGEMM test C = alpha*A*B + beta*C\n");
printf("A matrix is MxK and B matrix is KxN\n");
printf("All matrices are stored in column-major format\n");
printf("Run as ./dgemm_cublas <M> <K> <N> [<alpha> <beta> <iterations> "
"<verify>]\n");
printf("Required inputs are:\n");
printf(" M: number of rows of matrix A\n");
printf(" K: number of cols of matrix A\n");
printf(" N: number of cols of matrix B\n");
printf("Optional inputs are (all must be provided if providing any):\n");
printf(" alpha: scalar multiplier (default: 1.0)\n");
printf(" beta: scalar multiplier (default: 0.0)\n");
printf(" iterations: number of blocking DGEMM calls to perform "
"(default: 10)\n");
printf(" verify: set to 1 to check solution against CPU reference, "
"not recommended for large M|K|N (default: 0)\n");
return EXIT_FAILURE;
}
FLOAT alpha, beta;
int niter, verify;
int HA = atoi(argv[1]);
int WA = atoi(argv[2]);
int WB = atoi(argv[3]);
if ((HA == 0) || (WA == 0) || (WB == 0))
exit(1);
if (argc > 4) {
#if PRECISION == 1
sscanf(argv[4], "%lf", &alpha);
sscanf(argv[5], "%lf", &beta);
#else
sscanf(argv[4], "%f", &alpha);
sscanf(argv[5], "%f", &beta);
#endif
niter = atoi(argv[6]);
verify = atoi(argv[7]);
} else {
alpha = 1.0;
beta = 0.0;
niter = 10;
verify = 0;
}
#if PRECISION == 1
printf("DGEMM performance test\n");
#else
printf("SGEMM performance test\n");
#endif
int HB = WA;
int WC = WB;
int HC = HA;
int ldA = getld(HA);
int ldB = getld(HB);
int ldC = getld(HC);
printf("M = %d, K = %d, N = %d, ldA = %d, ldB = %d, ldC = %d, alpha = %f, "
"beta = %f, iterations = %d, verify? = %d\n",
HA, WA, WB, ldA, ldB, ldC, alpha, beta, niter, verify);
double start_t, end_t, tot_t = 0.0, best_t = DBL_MAX;
/*ALLOCATE HOST ARRAYS*/
FLOAT *A = (FLOAT *)MALLOC(ldA * WA * sizeof(FLOAT));
MALLOC_CHECK(A);
FLOAT *B = (FLOAT *)MALLOC(ldB * WB * sizeof(FLOAT));
MALLOC_CHECK(B);
FLOAT *C = (FLOAT *)MALLOC(ldC * WC * sizeof(FLOAT));
MALLOC_CHECK(C);
FLOAT *Cref = NULL;
if (verify) {
Cref = (FLOAT *)MALLOC(ldC * WC * sizeof(FLOAT));
MALLOC_CHECK(Cref);
}
printf("\n---------------------\n");
printf("Array A: %d x %d\n", ldA, WA);
printf("Array B: %d x %d\n", ldB, WB);
printf("Array C: %d x %d\n", ldC, WC);
printf("---------------------\n");
/*INITIALIZE WITH PSEUDO-RANDOM DATA*/
srand(2864);
for (j = 0; j < WA; j++)
for (i = 0; i < HA; i++)
A[index(i, j, ldA)] = RAND();
for (j = 0; j < WB; j++)
for (i = 0; i < HB; i++)
B[index(i, j, ldB)] = RAND();
if (beta != 0.0) {
for (j = 0; j < WC; j++)
for (i = 0; i < HC; i++)
C[index(i, j, ldC)] = RAND();
} else {
for (j = 0; j < WC; j++)
for (i = 0; i < HC; i++)
C[index(i, j, ldC)] = 0.0;
}
if (verify) {
for (j = 0; j < WC; j++)
for (i = 0; i < HC; i++)
Cref[index(i, j, ldC)] = C[index(i, j, ldC)];
}
#if defined(USE_MKL)
size_t sizea = (size_t)ldA * WA;
size_t sizeb = (size_t)ldB * WB;
size_t sizec = (size_t)ldC * WC;
#pragma omp target data map(to: A [0:sizea], B [0:sizeb]) map(tofrom: C [0:sizec])
{
// warm-up run
#pragma omp dispatch
#if PRECISION == 1
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, HA, WB, WA, alpha, A,
ldA, B, ldB, beta, C, ldC);
#else
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, HA, WB, WA, alpha, A,
ldA, B, ldB, beta, C, ldC);
#endif
// run gemm on gpu, using dispatch construct
for (i = 0; i < niter; i++) {
start_t = mysecond();
#pragma omp dispatch
#if PRECISION == 1
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, HA, WB, WA, alpha,
A, ldA, B, ldB, beta, C, ldC);
#else
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, HA, WB, WA, alpha,
A, ldA, B, ldB, beta, C, ldC);
#endif
end_t = mysecond();
tot_t += end_t - start_t;
best_t = min(best_t, end_t - start_t);
} // end for
}
#endif // end #pragma omp target data
double tflop_count;
tflop_count = (double)2.0 * HA * WB * WA;
if (beta != 0.0)
tflop_count += (double)HA * WB;
tflop_count *= 1.E-12;
printf("Total runtime for %d iterations: %f seconds.\n", niter, tot_t);
printf("Mean TFLOP/s: %f\n", (double)niter * tflop_count / tot_t);
printf("Best TFLOP/s: %f\n", (double)tflop_count / best_t);
if (verify) {
// compute reference solution on host (1 added to niter to account for the
// warm-up run)
for (i = 0; i < (niter + 1); i++) {
gemm_naive(HA, WB, WA, alpha, A, ldA, B, ldB, beta, Cref, ldC);
}
printf("Error Matrix Infinity Norm = %f\n",
infinity_norm_error(HA, WB, ldC, C, Cref));
}
FREE(A);
FREE(B);
FREE(C);
if (verify)
FREE(Cref);
return EXIT_SUCCESS;
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/02_teams_distribute/test_teams_distribute.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel */
#pragma omp target teams distribute parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("target region: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/OpenMP/02_teams_distribute/test_no_teams_distribute.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define P 16
#define BLOCKS 8
#define SIZE (BLOCKS * P * P * P)
#define MAX 100
#define scaled_rand() ((rand() % MAX) / (1.0 * MAX))
#define IDX2(i, j) (i * P + j)
#define IDX4(b, i, j, k) (b * P * P * P + i * P * P + j * P + k)
int main(void) {
double w[SIZE]; /* output */
double u[SIZE], dx[P * P]; /* input */
int b, i, j, k, l; /* loop counters */
double start, end; /* timers */
omp_set_default_device(0);
/* dummy target region, so as not to measure startup time. */
#pragma omp target
{ ; }
/* initialize input with random values */
srand(0);
for (int i = 0; i < SIZE; i++)
u[i] = scaled_rand();
for (int i = 0; i < P * P; i++)
dx[i] = scaled_rand();
/* map data to device */
#pragma omp target enter data map(to: u[0:SIZE], dx[0:P * P])
start = omp_get_wtime();
/* offload the kernel */
#pragma omp target parallel for collapse(4) \
private(b, i, j, k, l)
for (b = 0; b < BLOCKS; b++) {
for (i = 0; i < P; i++) {
for (j = 0; j < P; j++) {
for (k = 0; k < P; k++) {
double ur = 0.;
double us = 0.;
double ut = 0.;
for (l = 0; l < P; l++) {
ur += dx[IDX2(i, l)] * u[IDX4(b, l, j, k)];
us += dx[IDX2(k, l)] * u[IDX4(b, i, l, k)];
ut += dx[IDX2(j, l)] * u[IDX4(b, i, j, l)];
}
w[IDX4(b, i, j, k)] = ur * us * ut;
}
}
}
}
end = omp_get_wtime();
#pragma omp target exit data map(from: w[0:SIZE])
/* print result */
printf("target region: w[0]=%lf time=%lf\n", w[0], end - start);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/kernels/launch.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
const int iters = 10000;
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
void emptyKernel1(sycl::queue &q) {
Timer timer;
for (int i = 0; i < iters; ++i)
q.parallel_for(1, [=](auto) {
/* NOP */
}).wait();
std::cout << " emptyKernel1: Elapsed time: " << timer.Elapsed() / iters
<< " sec\n";
} // end emptyKernel1
void emptyKernel2(sycl::queue &q) {
Timer timer;
for (int i = 0; i < iters; ++i)
q.parallel_for(1, [=](auto) {
/* NOP */
});
std::cout << " emptyKernel2: Elapsed time: " << timer.Elapsed() / iters
<< " sec\n";
} // end emptyKernel2
int main() {
sycl::queue q;
emptyKernel1(q);
emptyKernel2(q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/kernels/profiling-api.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
int main() {
Timer timer;
sycl::queue q{sycl::property::queue::enable_profiling()};
auto evt = q.parallel_for(1000, [=](auto) {
/* kernel statements here */
});
double t1 = timer.Elapsed();
evt.wait();
double t2 = timer.Elapsed();
auto startK =
evt.get_profiling_info<sycl::info::event_profiling::command_start>();
auto endK =
evt.get_profiling_info<sycl::info::event_profiling::command_end>();
std::cout << "Kernel submission time: " << t1 << "secs\n";
std::cout << "Kernel submission + execution time: " << t2 << "secs\n";
std::cout << "Kernel execution time: "
<< ((double)(endK - startK)) / 1000000.0 << "secs\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/conditionals/convolution-global-conditionals-min-max.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (size_t i = 0; i < N; ++i) {
input[i] = rand();
}
for (size_t i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
{
// Snippet begin
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{256}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
int startj = sycl::max<int>(M / 2 - i, 0);
int endj = sycl::min<int>(M / 2 + N - i, M);
int startk = sycl::max<int>(i - M / 2, 0);
for (int j = startj, k = startk; j < endj; j++, k++) {
t += iacc[k] * kacc[j];
}
oacc[i] = t;
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/conditionals/convolution-global-conditionals.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
std::vector<int> input(N + M / 2 + M / 2);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (size_t i = M / 2; i < N + M / 2; ++i) {
input[i] = rand();
}
for (size_t i = 0; i < M / 2; ++i) {
input[i] = 0;
input[i + N + M / 2] = 0;
}
for (size_t i = 0; i < M; ++i) {
kernel[i] = rand();
}
{
sycl::buffer<int> ibuf(input.data(), N + M / 2 + M / 2);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{256}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
for (size_t j = 0; j < M; ++j) {
t += iacc[i + j] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
}
// Snippet end
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/multiple-queue-submission/multi-queue-heavy-kernel.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 15);
typedef std::array<int, array_size> IntArray;
#define iter 10
int VectorAdd(sycl::queue &q1, sycl::queue &q2, sycl::queue &q3,
const IntArray &a, const IntArray &b) {
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer<int> *sum_buf[3 * iter];
for (size_t i = 0; i < (3 * iter); i++)
sum_buf[i] = new sycl::buffer<int>(256);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q1.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc = sum_buf[3 * i]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q2.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 1]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q3.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 2]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q1.wait();
q2.wait();
q3.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add completed on device - took " << (end - start).count()
<< " u-secs\n";
// check results
for (size_t i = 0; i < (3 * iter); i++)
delete sum_buf[i];
return ((end - start).count());
} // end VectorAdd
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 1;
}
void Initialize(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
IntArray a, b;
int main() {
sycl::queue q(sycl::default_selector_v);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// jit the code
VectorAdd(q, q, q, a, b);
std::cout << "Submission to same queue\n";
// Submission to same queue
VectorAdd(q, q, q, a, b);
// End Submission to same queue
std::cout << "Submission to different queues with same context\n";
// Submission to different queues with same context
sycl::queue q1(sycl::default_selector_v);
sycl::queue q2(q1.get_context(), sycl::default_selector_v);
sycl::queue q3(q1.get_context(), sycl::default_selector_v);
VectorAdd(q1, q2, q3, a, b);
// End Submission to different queues with same context
std::cout << "Submission to diffferent queues with different contexts\n";
// Submission to different queues with different contexts
sycl::queue q4(sycl::default_selector_v);
sycl::queue q5(sycl::default_selector_v);
sycl::queue q6(sycl::default_selector_v);
VectorAdd(q4, q5, q6, a, b);
// End Submission to different queues with different contexts
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/multiple-queue-submission/multi-queue-light-kernel.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 15);
typedef std::array<int, array_size> IntArray;
#define iter 10
int VectorAdd(sycl::queue &q1, sycl::queue &q2, sycl::queue &q3,
const IntArray &a, const IntArray &b) {
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer<int> *sum_buf[3 * iter];
for (size_t i = 0; i < (3 * iter); i++)
sum_buf[i] = new sycl::buffer<int>(256);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q1.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc = sum_buf[3 * i]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q2.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 1]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q3.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 2]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q1.wait();
q2.wait();
q3.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add completed on device - took " << (end - start).count()
<< " u-secs\n";
// check results
for (size_t i = 0; i < (3 * iter); i++)
delete sum_buf[i];
return ((end - start).count());
} // end VectorAdd
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 1;
}
void Initialize(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
IntArray a, b;
int main() {
sycl::queue q(sycl::default_selector_v);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// jit the code
VectorAdd(q, q, q, a, b);
std::cout << "Submission to same queue\n";
// Submission to same queue
VectorAdd(q, q, q, a, b);
// End Submission to same queue
std::cout << "Submission to different queues with same context\n";
// Submission to different queues with same context
sycl::queue q1(sycl::default_selector_v);
sycl::queue q2(q1.get_context(), sycl::default_selector_v);
sycl::queue q3(q1.get_context(), sycl::default_selector_v);
VectorAdd(q1, q2, q3, a, b);
// End Submission to different queues with same context
std::cout << "Submission to diffferent queues with different contexts\n";
// Submission to different queues with different contexts
sycl::queue q4(sycl::default_selector_v);
sycl::queue q5(sycl::default_selector_v);
sycl::queue q6(sycl::default_selector_v);
VectorAdd(q4, q5, q6, a, b);
// End Submission to different queues with different contexts
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/01_memory_order/memory_order.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
#ifndef SCALE
#define SCALE 1
#endif
#define N 1024*SCALE
#define SG_SIZE 32
// Number of repetitions
constexpr int repetitions = 16;
constexpr int warm_up_token = -1;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
std::cout << "Failure" << std::endl;
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
#ifdef FLUSH_CACHE
void flush_cache(sycl::queue &q, sycl::buffer<int> &flush_buf) {
auto flush_size = flush_buf.get_size()/sizeof(int);
auto ev = q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::noinit);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
ev.wait_and_throw();
}
#endif
void atomicLatencyTest(sycl::queue &q, sycl::buffer<int> inbuf,
sycl::buffer<int> flush_buf, int &res, int iter) {
const size_t data_size = inbuf.byte_size()/sizeof(int);
sycl::buffer<int> sum_buf(&res, 1);
double elapsed = 0;
for (int k = warm_up_token; k < iter; k++) {
#ifdef FLUSH_CACHE
flush_cache(q, flush_buf);
#endif
Timer timer;
q.submit([&](auto &h) {
sycl::accessor buf_acc(inbuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(sycl::range<>{N}, sycl::range<>{SG_SIZE}), [=](sycl::nd_item<1> item)
[[intel::reqd_sub_group_size(SG_SIZE)]] {
int i = item.get_global_id(0);
for (int ii = 0; ii < 1024; ++ii) {
auto v =
#ifdef ATOMIC_RELAXED
sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(buf_acc[i]);
#else
sycl::atomic_ref<int, sycl::memory_order::acq_rel,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(buf_acc[i]);
#endif
v.fetch_add(1);
}
});
});
q.wait();
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
std::cout << "SUCCESS: Time atomicLatency = " << elapsed << "s" << std::endl;
}
int main(int argc, char *argv[]) {
sycl::queue q{sycl::gpu_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << std::endl;
std::vector<int> data(N);
std::vector<int> extra(N);
for (size_t i = 0; i < N ; ++i) {
data[i] = 1;
extra[i] = 1;
}
int res=0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> flush_buf(extra.data(), extra.size(), props);
atomicLatencyTest(q, buf, flush_buf, res, 16);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/13_openmp_explicit_subdevice/openmp_explicit_subdevice.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <assert.h>
#include <iostream>
#include <omp.h>
#include <stdint.h>
#ifndef NUM_SUBDEVICES
#define NUM_SUBDEVICES 1
#endif
#ifndef DEVKIND
#define DEVKIND 0
#endif
template <int num_subdevices> struct mptr {
float *p[num_subdevices];
};
int main(int argc, char **argv) {
constexpr int SIZE = 8e6;
constexpr int SIMD_SIZE = 32;
constexpr std::size_t TOTAL_SIZE = SIZE * SIMD_SIZE;
constexpr int num_subdevices = NUM_SUBDEVICES;
mptr<num_subdevices> device_ptr_a;
mptr<num_subdevices> device_ptr_b;
mptr<num_subdevices> device_ptr_c;
const int device_id = omp_get_default_device();
std::cout << "device_id = " << device_id << std::endl;
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
device_ptr_a.p[sdev] =
static_cast<float *>(malloc(TOTAL_SIZE * sizeof(float)));
device_ptr_b.p[sdev] =
static_cast<float *>(malloc(TOTAL_SIZE * sizeof(float)));
device_ptr_c.p[sdev] =
static_cast<float *>(malloc(TOTAL_SIZE * sizeof(float)));
#pragma omp target enter data map(alloc \
: device_ptr_a.p[sdev] [0:TOTAL_SIZE]) \
device(device_id) subdevice(DEVKIND, sdev)
#pragma omp target enter data map(alloc \
: device_ptr_b.p[sdev] [0:TOTAL_SIZE]) \
device(device_id) subdevice(DEVKIND, sdev)
#pragma omp target enter data map(alloc \
: device_ptr_c.p[sdev] [0:TOTAL_SIZE]) \
device(device_id) subdevice(DEVKIND, sdev)
}
std::cout << "memory footprint per GPU = "
<< 3 * (std::size_t)(TOTAL_SIZE) * sizeof(float) * 1E-9 << " GB"
<< std::endl;
#pragma omp parallel for
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
float *a = device_ptr_a.p[sdev];
float *b = device_ptr_b.p[sdev];
#pragma omp target teams distribute parallel for device(device_id) \
subdevice(LEVEL, sdev)
for (int i = 0; i < TOTAL_SIZE; ++i) {
a[i] = i + 0.5;
b[i] = i - 0.5;
}
}
const int no_max_rep = 200;
double time = 0.0;
for (int irep = 0; irep < no_max_rep + 1; ++irep) {
if (irep == 1)
time = omp_get_wtime();
#pragma omp parallel for num_threads(num_subdevices)
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
float *a = device_ptr_a.p[sdev];
float *b = device_ptr_b.p[sdev];
float *c = device_ptr_c.p[sdev];
#pragma omp target teams distribute parallel for device(device_id) \
subdevice(LEVEL, sdev)
for (int i = 0; i < TOTAL_SIZE; ++i) {
c[i] = a[i] + b[i];
}
}
}
time = omp_get_wtime() - time;
time = time / no_max_rep;
const std::size_t streamed_bytes =
3 * (std::size_t)(TOTAL_SIZE)*num_subdevices * sizeof(float);
std::cout << "bandwidth = " << (streamed_bytes / time) * 1E-9 << " GB/s"
<< std::endl;
std::cout << "time = " << time << " s" << std::endl;
std::cout.precision(10);
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
#pragma omp target update from(device_ptr_c.p[sdev][:TOTAL_SIZE]) \
device(device_id) subdevice(LEVEL, sdev)
std::cout << "-GPU: device id = : " << sdev << std::endl;
std::cout << "target result:" << std::endl;
std::cout << "c[" << 0 << "] = " << device_ptr_c.p[sdev][0] << std::endl;
std::cout << "c[" << SIMD_SIZE - 1
<< "] = " << device_ptr_c.p[sdev][SIMD_SIZE - 1] << std::endl;
std::cout << "c[" << TOTAL_SIZE / 2
<< "] = " << device_ptr_c.p[sdev][TOTAL_SIZE / 2] << std::endl;
std::cout << "c[" << TOTAL_SIZE - 1
<< "] = " << device_ptr_c.p[sdev][TOTAL_SIZE - 1] << std::endl;
}
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
for (int i = 0; i < TOTAL_SIZE; ++i) {
assert((int)(device_ptr_c.p[sdev][i]) ==
(int)(device_ptr_c.p[sdev][i] +
device_ptr_a.p[sdev][i] * device_ptr_b.p[sdev][i]));
}
}
for (int sdev = 0; sdev < num_subdevices; ++sdev) {
#pragma omp target exit data map(release : device_ptr_a.p[sdev][:TOTAL_SIZE])
#pragma omp target exit data map(release : device_ptr_b.p[sdev][:TOTAL_SIZE])
#pragma omp target exit data map(release : device_ptr_a.p[sdev][:TOTAL_SIZE])
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/04_multi_tile_subdevices/multi_tile_subdevices.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
try {
vector<device> SubDevices = ...;
auto C = context(SubDevices);
for (auto &D : SubDevices) {
// All queues share the same context, data can be shared across
// queues.
auto Q = queue(C, D);
Q.submit([&](handler &cgh) { ... });
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/14_explicit_subsubdevice/ccs.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
using namespace sycl;
int main() {
sycl::device d(sycl::gpu_selector_v);
std::vector<sycl::device> *subdevices = new std::vector<sycl::device>();
std::vector<sycl::device> *CCS = new std::vector<sycl::device>();
auto part_prop = d.get_info<sycl::info::device::partition_properties>();
size_t num_of_tiles;
size_t num_of_ccs;
if (part_prop.empty()) {
num_of_tiles = 1;
} else {
for (int i = 0; i < part_prop.size(); i++) {
if (part_prop[i] ==
sycl::info::partition_property::partition_by_affinity_domain) {
auto sub_devices = d.create_sub_devices<
sycl::info::partition_property::partition_by_affinity_domain>(
sycl::info::partition_affinity_domain::numa);
num_of_tiles = sub_devices.size();
for (int j = 0; j < num_of_tiles; j++)
subdevices->push_back(sub_devices[j]);
break;
} else {
num_of_tiles = 1;
}
}
}
std::cout << "List of Tiles:\n";
for (int i = 0; i < num_of_tiles; i++) {
std::cout << i << ") Device name: "
<< (*subdevices)[i].get_info<sycl::info::device::name>() << "\n";
std::cout
<< " Max Compute Units: "
<< (*subdevices)[i].get_info<sycl::info::device::max_compute_units>()
<< "\n";
}
for (int j = 0; j < num_of_tiles; j++) {
auto part_prop1 =
(*subdevices)[j].get_info<sycl::info::device::partition_properties>();
if (part_prop1.empty()) {
std::cout << "No partition support\n";
} else {
for (int i = 0; i < part_prop1.size(); i++) {
if (part_prop1[i] ==
sycl::info::partition_property::partition_by_affinity_domain) {
auto ccses =
(*subdevices)[j]
.create_sub_devices<sycl::info::partition_property::
partition_by_affinity_domain>(
sycl::info::partition_affinity_domain::numa);
num_of_ccs = ccses.size();
for (int k = 0; k < num_of_ccs; k++)
CCS->push_back(ccses[k]);
break;
} else {
num_of_ccs = 1;
}
}
}
}
std::cout << "List of Compute Command Streamers:\n";
for (int i = 0; i < CCS->size(); i++) {
std::cout << i << ") Device name: "
<< (*CCS)[i].get_info<sycl::info::device::name>() << "\n";
std::cout << " Max Compute Units: "
<< (*CCS)[i].get_info<sycl::info::device::max_compute_units>()
<< "\n";
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/11_openmp_root_device/openmp_root_device.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
int root_id = omp_get_default_device();
#pragma omp target teams distribute parallel for device(root_id) map(…)
for (int i = 0, i < N; i++) {
…
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/12_openmp_multi_roots/openmp_multi_roots.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
Int num_devices = omp_get_num_devices();
#pragma omp parallel for
for (int root_id = 0; root_id < num_devices; root_id++) {
#pragma omp target teams distribute parallel for device(root_id) map(…)
for (int i = lb(root_id); I < ub(root_id); i++) {
…
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/08_openmp_usm/openmp_usm.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
if (exists(“#pragma omp requires unified_shared_memory”)) {
if (LIBOMPTARGET_USM_HOST_MEM == 1)
return "host memory";
else
return "shared memory";
} else {
return "device memory";
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/07_explicit_subdevice/explicit_subdevice.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
using namespace sycl;
constexpr int num_runs = 10;
constexpr size_t scalar = 3;
cl_ulong triad(size_t array_size) {
cl_ulong min_time_ns0 = DBL_MAX;
cl_ulong min_time_ns1 = DBL_MAX;
device dev = device(gpu_selector_v);
std::vector<device> subdev = {};
subdev = dev.create_sub_devices<sycl::info::partition_property::
partition_by_affinity_domain>(sycl::info::partition_affinity_domain::numa);
queue q[2] = {queue(subdev[0], property::queue::enable_profiling{}),
queue(subdev[1], property::queue::enable_profiling{})};
std::cout << "Running on device: " <<
q[0].get_device().get_info<info::device::name>() << "\n";
std::cout << "Running on device: " <<
q[1].get_device().get_info<info::device::name>() << "\n";
double *A0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *B0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *C0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *A1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *B1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *C1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
for ( int i = 0; i < array_size/2; i++) {
A0[i]= 1.0; B0[i]= 2.0; C0[i]= 0.0;
A1[i]= 1.0; B1[i]= 2.0; C1[i]= 0.0;
}
for (int i = 0; i< num_runs; i++) {
auto q0_event = q[0].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C0[idx] = A0[idx] + B0[idx] * scalar;
});
});
auto q1_event = q[1].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C1[idx] = A1[idx] + B1[idx] * scalar;
});
});
q[0].wait();
q[1].wait();
cl_ulong exec_time_ns0 =
q0_event.get_profiling_info<info::event_profiling::command_end>() -
q0_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-0 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
cl_ulong exec_time_ns1 =
q1_event.get_profiling_info<info::event_profiling::command_end>() -
q1_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-1 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns1 * 1.0E-9 << "\n";
min_time_ns1 = std::min(min_time_ns1, exec_time_ns1);
}
// Check correctness
bool error = false;
for ( int i = 0; i < array_size/2; i++) {
if ((C0[i] != A0[i] + scalar * B0[i]) || (C1[i] != A1[i] + scalar * B1[i])) {
std::cout << "\nResult incorrect (element " << i << " is " << C0[i] << ")!\n";
error = true;
}
}
sycl::free(A0, q[0]);
sycl::free(B0, q[0]);
sycl::free(C0, q[0]);
sycl::free(A1, q[1]);
sycl::free(B1, q[1]);
sycl::free(C1, q[1]);
if (error) return -1;
std::cout << "Results are correct!\n\n";
return std::max(min_time_ns0, min_time_ns1);
}
int main(int argc, char *argv[]) {
size_t array_size;
if (argc > 1 ) {
array_size = std::stoi(argv[1]);
}
else {
std::cout << "Run as ./<progname> <arraysize in elements>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size
<< " elements (" << (array_size * sizeof(double))/(double)1024/1024 << "MB)\n";
cl_ulong min_time = triad(array_size);
if (min_time == -1) return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
double triad_bandwidth = 1.0E-09 * triad_bytes/(min_time*1.0E-9);
std::cout << "Bandwidth of fastest run in GB/s: " << triad_bandwidth << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/05_multi_tile_root/multi_tile_root.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
try {
// The queue is attached to the root-device, driver distributes to
// sub - devices, if any.
auto D = device(gpu_selector{});
auto Q = queue(D);
Q.submit([&](handler &cgh) { ... });
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/03_single_tile_subdevices/single_tile_subdevices.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
try {
vector<device> SubDevices = ...;
for (auto &D : SubDevices) {
// Each queue is in its own context, no data sharing across them.
auto Q = queue(D);
Q.submit([&](handler &cgh) { ... });
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/10_openmp_subdevice/openmp_subdevice.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#define DEVKIND 0 // TILE
int root_id = omp_get_default_device();
#pragma omp parallel for
for (int id = 0; id < NUM_SUBDEVICES; ++id) {
#pragma omp target teams distribute parallel for device(root_id) \
subdevice(DEVKIND, id) map(…)
for (int i = lb(id), i < ub(id); i++) {
…
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/02_create_subdevices/create_subdevices.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
try {
vector<device> SubDevices = RootDevice.create_sub_devices<
cl::sycl::info::partition_property::partition_by_affinity_domain>(
cl::sycl::info::partition_affinity_domain::numa);
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/06_multi_roots/multi_roots.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
try {
auto P = platform(gpu_selector{});
auto RootDevices = P.get_devices();
auto C = context(RootDevices);
for (auto &D : RootDevices) {
// Context has multiple root-devices, data can be shared across
// multi - card(requires explict copying)
auto Q = queue(C, D);
Q.submit([&](handler &cgh) { ... });
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/explicit-scaling/09_openmp_subsubdevice/openmp_subsubdevice.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#define DEVKIND 1 // C-Slice
int root_id = omp_get_default_device();
#pragma omp parallel for
for (int id = 0; id < NUM_SUBSUBDEVICES; ++id) {
#pragma omp target teams distribute parallel for device(root_id) \
subdevice(DEVKIND, id) map(…)
for (int i = lb(id), i < ub(id); i++) {
...;
}
}
// Snippet end
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/multiple-devices/overlap.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
// Array type and data size for this example.
constexpr size_t array_size = 3 * 5 * 7 * (1 << 17);
typedef std::array<int, array_size> IntArray;
// #define mysize (1 << 17)
// Executing entire kernel on the GPU
size_t VectorAdd1(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum, int iter) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
auto e = q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items,
[=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add1 completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
} // end VectorAdd1
// Executing half on GPU and the other half on CPU
size_t VectorAdd2(sycl::queue &q1, sycl::queue &q2, const IntArray &a,
const IntArray &b, IntArray &sum, int iter) {
sycl::range num_items{a.size() / 2};
auto start = std::chrono::steady_clock::now();
{
sycl::buffer a1_buf(a.data(), num_items);
sycl::buffer b1_buf(b.data(), num_items);
sycl::buffer sum1_buf(sum.data(), num_items);
sycl::buffer a2_buf(a.data() + a.size() / 2, num_items);
sycl::buffer b2_buf(b.data() + a.size() / 2, num_items);
sycl::buffer sum2_buf(sum.data() + a.size() / 2, num_items);
for (int i = 0; i < iter; i++) {
q1.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a1_buf, h, sycl::read_only);
sycl::accessor b_acc(b1_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum1_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items,
[=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
// do the work on host
q2.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a2_buf, h, sycl::read_only);
sycl::accessor b_acc(b2_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum2_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items,
[=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
}
// On some platforms this explicit flush of queues is needed
// to ensure the overlap in execution between the CPU and GPU
// cl_command_queue cq = q1.get();
// clFlush(cq);
// cq=q2.get();
// clFlush(cq);
}
q1.wait();
q2.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add2 completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
} // end VectorAdd2
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = i;
}
void Initialize(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
IntArray a, b, sum;
int main() {
sycl::queue q(sycl::default_selector_v);
sycl::queue q1(sycl::gpu_selector_v);
sycl::queue q2(sycl::cpu_selector_v);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// jit the code
VectorAdd1(q, a, b, sum, 10);
// check results
Initialize(sum);
VectorAdd1(q, a, b, sum, 1);
for (size_t i = 0; i < array_size; i++)
if (sum[i] != static_cast<int>(2 * i)) {
std::cout << "add1 Did not match\n";
}
Initialize(sum);
VectorAdd2(q1, q2, a, b, sum, 1);
for (size_t i = 0; i < array_size; i++)
if (sum[i] != static_cast<int>(2 * i)) {
std::cout << "add2 Did not match\n";
}
Initialize(sum);
VectorAdd1(q, a, b, sum, 10);
Initialize(sum);
VectorAdd2(q1, q2, a, b, sum, 10);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/usm/utils.hpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <chrono>
class timer {
public:
timer() : start_(std::chrono::steady_clock::now()) {}
double elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/usm/usm-buffer.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iomanip>
#include <CL/sycl.hpp>
#include "utils.hpp"
const int data_size = 10000000;
const int time_steps = 10;
const int device_steps = 1000;
sycl::property_list q_prop{sycl::property::queue::in_order()};
sycl::queue q(q_prop);
float ref_data[data_size];
// Accept a pointer or a host accessor
template <typename T> static void check(T data) {
return;
for (int i = 0; i < data_size; i++) {
if (data[i] != ref_data[i]) {
std::cout << "Expected: " << ref_data[i] << " got " << data[i] << "\n";
exit(1);
}
}
}
// Accept a pointer or a host accessor
template <typename T> static void init(T data) {
for (int i = 0; i < data_size; i++)
data[i] = 0;
}
void put_elapsed_time(timer it) {
std::cout << std::setw(7) << std::fixed << std::setprecision(4)
<< it.elapsed() << std::endl;
}
static void reference(int stride) {
init(ref_data);
// compute results
for (int i = 0; i < time_steps; i++) {
// Device
for (int j = 0; j < device_steps; j++) {
for (int k = 0; k < data_size; k++) {
ref_data[k] += 1.0;
}
}
// host
for (int k = 0; k < data_size; k += stride) {
ref_data[k] += 1.0;
}
}
}
void buffer_data(int stride) {
// Allocate buffer, initialize on host
sycl::buffer<float> buffer_data{data_size};
init(sycl::host_accessor(buffer_data, sycl::write_only, sycl::no_init));
timer it;
for (int i = 0; i < time_steps; i++) {
// Compute on device
q.submit([&](auto &h) {
sycl::accessor device_data(buffer_data, h);
auto compute = [=](auto id) {
for (int k = 0; k < device_steps; k++)
device_data[id] += 1.0;
};
h.parallel_for(data_size, compute);
});
// Compute on host
sycl::host_accessor host_data(buffer_data);
for (int i = 0; i < data_size; i += stride)
host_data[i] += 1.0;
}
put_elapsed_time(it);
const sycl::host_accessor h(buffer_data);
check(h);
} // buffer_data
void device_usm_data(int stride) {
// Allocate and initialize host data
float *host_data = new float[data_size];
init(host_data);
// Allocate device data
float *device_data = sycl::malloc_device<float>(data_size, q);
timer it;
for (int i = 0; i < time_steps; i++) {
// Copy data to device and compute
q.memcpy(device_data, host_data, sizeof(float) * data_size);
auto compute = [=](auto id) {
for (int k = 0; k < device_steps; k++)
device_data[id] += 1.0;
};
q.parallel_for(data_size, compute);
// Copy data to host and compute
q.memcpy(host_data, device_data, sizeof(float) * data_size).wait();
for (int k = 0; k < data_size; k += stride)
host_data[k] += 1.0;
}
q.wait();
put_elapsed_time(it);
check(host_data);
sycl::free(device_data, q);
delete[] host_data;
} // device_usm_data
void shared_usm_data(int stride) {
float *data = sycl::malloc_shared<float>(data_size, q);
init(data);
timer it;
for (int i = 0; i < time_steps; i++) {
auto compute = [=](auto id) {
for (int k = 0; k < device_steps; k++)
data[id] += 1.0;
};
q.parallel_for(data_size, compute).wait();
for (int k = 0; k < data_size; k += stride)
data[k] += 1.0;
}
q.wait();
put_elapsed_time(it);
check(data);
sycl::free(data, q);
} // shared_usm_data
void serial(int stride) {
// Allocate and initialize data
float *data = new float[data_size];
init(data);
timer it;
for (int i = 0; i < time_steps; i++) {
for (int j = 0; j < data_size; j++) {
for (int k = 0; k < device_steps; k++)
data[j] += 1.0;
}
for (int j = 0; j < data_size; j += stride)
data[j] += 1.0;
}
put_elapsed_time(it);
check(data);
delete[] data;
} // serial
int main() {
std::vector strides = {1, 200000};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
for (auto stride : strides) {
std::cout << "stride: " << stride << "\n";
reference(stride);
std::cout << " usm malloc_shared: ";
shared_usm_data(stride);
std::cout << " usm malloc_device: ";
device_usm_data(stride);
std::cout << " buffer: ";
buffer_data(stride);
std::cout << " serial: ";
serial(stride);
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/convolution-slm-cache.cpp | //==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (size_t i = 0; i < N; ++i) {
input[i] = rand();
}
for (size_t i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
{
// Snippet begin
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
sycl::local_accessor<int, 1> ciacc(sycl::range(256 + (M / 2) * 2), h);
h.parallel_for(
sycl::nd_range(sycl::range{N}, sycl::range{256}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int local_id = it.get_local_id()[0];
int _M = static_cast<int>(M);
ciacc[local_id + M / 2] = iacc[i];
if (local_id == 0) {
if (group == 0) {
for (int j = 0; j < _M / 2; ++j) {
ciacc[j] = 0;
}
} else {
for (int j = 0, k = i - _M / 2; j < _M / 2; ++j, ++k) {
ciacc[j] = iacc[k];
}
}
}
if (local_id == gSize - 1) {
if (group == static_cast<int>(it.get_group_range()[0]) - 1) {
for (int j = gSize + _M / 2; j < gSize + _M / 2 + _M / 2; ++j) {
ciacc[j] = 0;
}
} else {
for (int j = gSize + _M / 2, k = i + 1;
j < gSize + _M / 2 + _M / 2; ++j, ++k) {
ciacc[j] = iacc[k];
}
}
}
it.barrier(sycl::access::fence_space::local_space);
int t = 0;
for (int j = 0, k = local_id; j < _M; ++j, ++k) {
t += ciacc[k] * kacc[j];
}
oacc[i] = t;
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
}
return 0;
}
| cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.