repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/y_pattern_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data1{range{N}};
buffer<int> data2{range{N}};
Q.submit([&](handler &h) {
accessor a{data1, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor b{data2, h};
h.parallel_for(N, [=](id<1> i) { b[i] = 2; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
accessor b{data2, h, read_only};
h.parallel_for(N, [=](id<1> i) { a[i] += b[i]; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
a[0] /= 3;
});
});
host_accessor h_a{data1};
assert(h_a[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/accessors_RAW.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b, c;
for (int i = 0; i < N; i++) {
a[i] = b[i] = c[i] = 0;
}
queue Q;
//Create Buffers
buffer A{a};
buffer B{b};
buffer C{c};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N,
[=](id<1> i) { accB[i] = accA[i] + 1; });
});
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
h.parallel_for( // readA
N,
[=](id<1> i) {
// Useful only as an example
int data = accA[i];
});
});
Q.submit([&](handler &h) {
// RAW of buffer B
accessor accB(B, h, read_only);
accessor accC(C, h, write_only);
h.parallel_for( // computeC
N,
[=](id<1> i) { accC[i] = accB[i] + 2; });
});
// read C on host
host_accessor host_accC(C, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accC[i] << " ";
}
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/USM_implicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *host_array = malloc_host<int>(N, Q);
int *shared_array = malloc_shared<int>(N, Q);
for (int i = 0; i < N; i++) {
// Initialize hostArray on host
host_array[i] = i;
}
// Submit the queue
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) {
// access sharedArray and hostArray on device
shared_array[i] = host_array[i] + 1;
});
});
Q.wait();
for (int i = 0; i < N; i++) {
// access sharedArray on host
host_array[i] = shared_array[i];
}
free(shared_array, Q);
free(host_array, Q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/event_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data = malloc_shared<int>(N, Q);
auto e = Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.submit([&](handler &h) {
h.depends_on(e);
h.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/accessors_WAR_WAW.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b;
for (int i = 0; i < N; i++) {
a[i] = b[i] = 0;
}
queue Q;
buffer A{a};
buffer B{b};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N, [=](id<1> i) {
accB[i] = accA[i] + 1;
});
});
Q.submit([&](handler &h) {
// WAR of buffer A
accessor accA(A, h, write_only);
h.parallel_for( // rewriteA
N, [=](id<1> i) {
accA[i] = 21 + 21;
});
});
Q.submit([&](handler &h) {
// WAW of buffer B
accessor accB(B, h, write_only);
h.parallel_for( // rewriteB
N, [=](id<1> i) {
accB[i] = 30 + 12;
});
});
host_accessor host_accA(A, read_only);
host_accessor host_accB(B, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accA[i] << " " << host_accB[i] << " ";
}
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/y_pattern_events.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
auto e1 = Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
auto e2 = Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
auto e3 = Q.parallel_for(range{N}, {e1, e2},
[=](id<1> i) { data1[i] += data2[i]; });
Q.single_task(e3, [=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/accessors_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <cassert>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
// Create 3 buffers of 42 ints
buffer<int> A{range{N}};
buffer<int> B{range{N}};
buffer<int> C{range{N}};
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, write_only, no_init};
accessor aB{B, h, write_only, no_init};
accessor aC{C, h, write_only, no_init};
h.parallel_for(N, [=](id<1> i) {
aA[i] = 1;
aB[i] = 40;
aC[i] = 0;
});
});
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, read_only};
accessor aB{B, h, read_only};
accessor aC{C, h, read_write};
h.parallel_for(N, [=](id<1> i) { aC[i] += aA[i] + aB[i]; });
});
host_accessor result{C, read_only};
for (int i = 0; i < N; i++) std::cout << result[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/linear_buffers_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data{range{N}};
Q.submit([&](handler &h) {
accessor a{data, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor a{data, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
});
});
host_accessor h_a{data};
assert(h_a[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/y_pattern_inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
Q.parallel_for(N, [=](id<1> i) { data1[i] += data2[i]; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/Linear_inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/task_scheduling.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 256;
int main() {
queue q;
//# 3 vectors initialized to values
std::vector<int> data1(N, 1);
std::vector<int> data2(N, 2);
std::vector<int> data3(N, 3);
//# STEP 1 : Create buffers for data1, data2 and data3
//# YOUR CODE GOES HERE
//# STEP 2 : Create a kernel to update data1 += data3, set accessor permissions
//# YOUR CODE GOES HERE
//# STEP 3 : Create a kernel to update data2 *= 2, set accessor permissions
//# YOUR CODE GOES HERE
//# STEP 4 : Create a kernel to update data3 = data1 + data2, set accessor permissions
//# YOUR CODE GOES HERE
//# STEP 5 : Create a host accessor to copy back data3
//# YOUR CODE GOES HERE
std::cout << "Output = " << data3[0] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/linear_event_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data = malloc_shared<int>(N, Q);
auto e = Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.submit([&](handler &h) {
h.depends_on(e);
h.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/USM_explicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include<array>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
std::array<int,N> host_array;
int *device_array = malloc_device<int>(N, Q);
for (int i = 0; i < N; i++)
host_array[i] = N;
// Submit the queue
Q.submit([&](handler &h) {
// copy hostArray to deviceArray
h.memcpy(device_array, &host_array[0], N * sizeof(int));
});
Q.wait();
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) { device_array[i]++; });
});
Q.wait();
Q.submit([&](handler &h) {
// copy deviceArray back to hostArray
h.memcpy(&host_array[0], device_array, N * sizeof(int));
});
Q.wait();
free(device_array, Q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/y_pattern_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data1{range{N}};
buffer<int> data2{range{N}};
Q.submit([&](handler &h) {
accessor a{data1, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor b{data2, h};
h.parallel_for(N, [=](id<1> i) { b[i] = 2; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
accessor b{data2, h, read_only};
h.parallel_for(N, [=](id<1> i) { a[i] += b[i]; });
});
Q.submit([&](handler &h) {
accessor a{data1, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
a[0] /= 3;
});
});
host_accessor h_a{data1};
assert(h_a[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/lab/accessors_RAW.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b, c;
for (int i = 0; i < N; i++) {
a[i] = b[i] = c[i] = 0;
}
queue Q;
//Create Buffers
buffer A{a};
buffer B{b};
buffer C{c};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N,
[=](id<1> i) { accB[i] = accA[i] + 1; });
});
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
h.parallel_for( // readA
N,
[=](id<1> i) {
// Useful only as an example
int data = accA[i];
});
});
Q.submit([&](handler &h) {
// RAW of buffer B
accessor accB(B, h, read_only);
accessor accC(C, h, write_only);
h.parallel_for( // computeC
N,
[=](id<1> i) { accC[i] = accB[i] + 2; });
});
// read C on host
host_accessor host_accC(C, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accC[i] << " ";
}
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/00_Introduction_to_Jupyter/src/hello.cpp |
#include <iostream>
int main(){
std::cout << "Hello World";
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/sub_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 64;
const int num1 = 2;
const int num2 = 3;
int data[N];
for (int i = 0; i < N; i++) data[i] = i;
std::cout<<"BUffer Values: ";
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout<<"\n";
buffer B(data, range(N));
//Create sub buffers with offsets and half of the range.
buffer<int> B1(B, 0, range{ N / 2 });
buffer<int> B2(B, 32, range{ N / 2 });
//Multiply the elemets in first sub buffer by 2
queue q1;
q1.submit([&](handler& h) {
accessor a1(B1, h);
h.parallel_for(N/2, [=](auto i) { a1[i] *= num1; });
});
//Multiply the elemets in second sub buffer by 3
queue q2;
q2.submit([&](handler& h) {
accessor a2(B2, h);
h.parallel_for(N/2, [=](auto i) { a2[i] *= num2; });
});
//Host accessors to get the results back to the host from the device
host_accessor b1(B1, read_only);
host_accessor b2(B2, read_only);
std::cout<<"Sub Buffer1: ";
for (int i = 0; i < N/2; i++) std::cout<< b1[i] << " ";
std::cout<<"\n";
std::cout<<"Sub Buffer2: ";
for (int i = 0; i < N/2; i++) std::cout << b2[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_creation.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
// Create a buffer of ints from an input iterator
std::vector<int> myVec;
buffer b1{myVec};
buffer b2{myVec.begin(), myVec.end()};
// Create a buffer of ints from std::array
std::array<int,42> my_data;
buffer b3{my_data};
// Create a buffer of 4 doubles and initialize it from a host pointer
double myDoubles[4] = {1.1, 2.2, 3.3, 4.4};
buffer b4{myDoubles, range{4}};
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_creation_uncommon.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
// Create a buffer of 2x5 ints using the default allocator and CTAD for dimensions
buffer<int, 2> b1{range{2, 5}};
//Dimensions defaults to 1
// Create a buffer of 20 floats using a default-constructed std::allocator
buffer<float> b2{range{20}};
// Create a buffer from a shared pointer to int
auto sharedPtr = std::make_shared<int>(42);
buffer b3{sharedPtr, range{1}};
// Create a buffer of 2x5 ints and 2 non-overlapping sub-buffers of 5 ints.
buffer<int, 2> b4{range{2, 5}};
buffer b5{b4, id{0, 0}, range{1, 5}};
buffer b6{b4, id{1, 0}, range{1, 5}};
// Create a buffer of 5 doubles and initialize it from a host pointer to
// const double
const double myConstDbls[5] = {1.0, 2.0, 3.0, 4.0, 5.0};
buffer b7{myConstDbls, range{5}};
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_set_write_back.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> my_data;
for (int i = 0; i < N; i++)
my_data[i] = i;
{
queue q;
buffer my_buffer(my_data);
//Call the set_write_back method to control the data to be written back to the host from the device. e
//Setting it to false will not update the host with the updated values
my_buffer.set_write_back(false);
q.submit([&](handler &h) {
// create an accessor to update
// the buffer on the device
accessor my_accessor(my_buffer, h);
h.parallel_for(N, [=](id<1> i) {
my_accessor[i]*=2;
});
});
}
// myData is updated when myBuffer is
// destroyed upon exiting scope
for (int i = 0; i < N; i++) {
std::cout << my_data[i] << " ";
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/accessors_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <cassert>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
// Create 3 buffers of 42 ints
buffer<int> A{range{N}};
buffer<int> B{range{N}};
buffer<int> C{range{N}};
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, write_only, no_init};
accessor aB{B, h, write_only, no_init};
accessor aC{C, h, write_only, no_init};
h.parallel_for(N, [=](id<1> i) {
aA[i] = 1;
aB[i] = 40;
aC[i] = 0;
});
});
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, read_only};
accessor aB{B, h, read_only};
accessor aC{C, h, read_write};
h.parallel_for(N, [=](id<1> i) { aC[i] += aA[i] + aB[i]; });
});
host_accessor result{C, read_only};
for (int i = 0; i < N; i++) std::cout << result[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_host_ptr.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <mutex>
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 20;
int main() {
int myInts[N];
queue q;
//Initialize vector a,b and c
std::vector<float> a(N, 10.0f);
std::vector<float> b(N, 20.0f);
auto R = range<1>(N);
{
//Create host_ptr buffers for a and b
buffer buf_a(a,{property::buffer::use_host_ptr()});
buffer buf_b(b,{property::buffer::use_host_ptr()});
q.submit([&](handler& h) {
//create Accessors for a and b
accessor A(buf_a,h);
accessor B(buf_b,h,read_only);
h.parallel_for(R, [=](auto i) { A[i] += B[1] ; });
});
}
for (int i = 0; i < N; i++) std::cout << a[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int num=16;
using namespace sycl;
int main() {
auto R = range<1>{ num };
//Create Buffers A and B
buffer<int> A{ R }, B{ R };
//Create a device queue
queue Q;
//Submit Kernel 1
Q.submit([&](handler& h) {
//Accessor for buffer A
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit Kernel 2
Q.submit([&](handler& h) {
//This task will wait till the first queue is complete
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] += idx[0]; }); });
//Submit Kernel 3
Q.submit([&](handler& h) {
//Accessor for Buffer B
accessor out(B,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit task 4
Q.submit([&](handler& h) {
//This task will wait till kernel 2 and 3 are complete
accessor in (A,h,read_only);
accessor inout(B,h);
h.parallel_for(R, [=](auto idx) {
inout[idx] *= in[idx]; }); });
// And the following is back to device code
host_accessor result(B,read_only);
for (int i=0; i<num; ++i)
std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/buffer_set_final_data.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> my_data;
for (int i = 0; i < N; i++)
my_data[i] = i;
auto buff = std::make_shared<std::array<int, N>>();
{
queue q;
buffer my_buffer(my_data);
//Call the set_final_data to the created shared ptr where the values will be written back when the buffer gets destructed.
//my_buffer.set_final_data(nullptr);
my_buffer.set_final_data(buff);
q.submit([&](handler &h) {
// create an accessor to update
// the buffer on the device
accessor my_accessor(my_buffer, h);
h.parallel_for(N, [=](id<1> i) {
my_accessor[i]*=2;
});
});
}
// myData is updated when myBuffer is
// destroyed upon exiting scope
for (int i = 0; i < N; i++) {
std::cout << my_data[i] << " ";
}
std::cout << "\n";
for (int i = 0; i < N; i++) {
std::cout <<(*buff)[i] << " ";
}
std::cout << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/lab_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 256;
int data[N];
for (int i = 0; i < N; i++) data[i] = i;
std::cout<<"\nInput Values: ";
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout<<"\n";
buffer buf_data(data, range(N));
//# STEP 1 : Create 3 non-overlapping/disjoint sub-buffers for buf_data with length 64, 128 and 64.
buffer<int> buf_data1(buf_data, 0, range{ 64 });
buffer<int> buf_data2(buf_data, 64, range{ 128 });
buffer<int> buf_data3(buf_data, 192, range{ 64 });
//# STEP 2 : Submit task to Multiply the elements in first sub buffer by 2
queue q1;
q1.submit([&](handler& h) {
accessor a(buf_data1, h);
h.parallel_for(64, [=](auto i) { a[i] *= 2; });
});
//# STEP 3 : Submit task to Multiply the elements in second sub buffer by 3
queue q2;
q2.submit([&](handler& h) {
accessor a(buf_data2, h);
h.parallel_for(128, [=](auto i) { a[i] *= 3; });
});
//# STEP 4 : Submit task to Multiply the elements in third sub buffer by 2
queue q3;
q3.submit([&](handler& h) {
accessor a(buf_data3, h);
h.parallel_for(64, [=](auto i) { a[i] *= 2; });
});
//# STEP 5 : Create Host accessors to get the results back to the host from the device
host_accessor a(buf_data, read_only);
std::cout<<"\nOutput Values: ";
for (int i = 0; i < N; i++) std::cout<< data[i] << " ";
std::cout<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/host_accessor_init.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <algorithm>
#include <iostream>
using namespace sycl;
int main() {
constexpr size_t N = 1024;
// Set up queue on any available device
queue q;
// Create buffers of size N
buffer<int> in_buf{N}, out_buf{N};
// Use host accessors to initialize the data
{ // CRITICAL: Begin scope for host_accessor lifetime!
host_accessor in_acc{ in_buf }, out_acc{ out_buf };
for (int i=0; i < N; i++) {
in_acc[i] = i;
out_acc[i] = 0;
}
} //Close scope to make host accessors go out of scope!
// Submit the kernel to the queue
q.submit([&](handler& h) {
accessor in{in_buf, h};
accessor out{out_buf, h};
h.parallel_for(range{N}, [=](id<1> idx) {
out[idx] = in[idx];
});
});
// Check that all outputs match expected value
// Use host accessor! Buffer is still in scope / alive
host_accessor A{out_buf};
//for (int i=0; i<N; i++) std::cout << "A[" << i << "]=" << A[i] << "\n";
int indices[]{0, 1, 2, 3, 4, (N - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "A[" << j << "]=" << A[j] << "\n";
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/src/host_accessor_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <algorithm>
#include <iostream>
using namespace sycl;
int main() {
// BEGIN CODE SNIP
static const int N = 1024;
// Set up queue on any available device
queue q;
// Create host containers to initialize on the host
std::vector<int> in_vec(N), out_vec(N);
// Initialize input and output vectors
for (int i=0; i < N; i++) in_vec[i] = i;
std::fill(out_vec.begin(), out_vec.end(), 0);
// Create buffers using host allocations (vector in this case)
buffer in_buf{in_vec}, out_buf{out_vec};
// Submit the kernel to the queue
q.submit([&](handler& h) {
accessor in{in_buf, h};
accessor out{out_buf, h};
h.parallel_for(range{N}, [=](id<1> idx) {
out[idx] = in[idx] * 2;
});
});
// Check that all outputs match expected value
// Use host accessor! Buffer is still in scope / alive
host_accessor A{out_buf};
//for (int i=0; i<N; i++) std::cout << "A[" << i << "]=" << A[i] << "\n";
int indices[]{0, 1, 2, 3, 4, (N - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "A[" << j << "]=" << A[j] << "\n";
}
// END CODE SNIP
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/sub_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 64;
const int num1 = 2;
const int num2 = 3;
int data[N];
for (int i = 0; i < N; i++) data[i] = i;
std::cout<<"BUffer Values: ";
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout<<"\n";
buffer B(data, range(N));
//Create sub buffers with offsets and half of the range.
buffer<int> B1(B, 0, range{ N / 2 });
buffer<int> B2(B, 32, range{ N / 2 });
//Multiply the elemets in first sub buffer by 2
queue q1;
q1.submit([&](handler& h) {
accessor a1(B1, h);
h.parallel_for(N/2, [=](auto i) { a1[i] *= num1; });
});
//Multiply the elemets in second sub buffer by 3
queue q2;
q2.submit([&](handler& h) {
accessor a2(B2, h);
h.parallel_for(N/2, [=](auto i) { a2[i] *= num2; });
});
//Host accessors to get the results back to the host from the device
host_accessor b1(B1, read_only);
host_accessor b2(B2, read_only);
std::cout<<"Sub Buffer1: ";
for (int i = 0; i < N/2; i++) std::cout<< b1[i] << " ";
std::cout<<"\n";
std::cout<<"Sub Buffer2: ";
for (int i = 0; i < N/2; i++) std::cout << b2[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_creation.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
// Create a buffer of ints from an input iterator
std::vector<int> myVec;
buffer b1{myVec};
buffer b2{myVec.begin(), myVec.end()};
// Create a buffer of ints from std::array
std::array<int,42> my_data;
buffer b3{my_data};
// Create a buffer of 4 doubles and initialize it from a host pointer
double myDoubles[4] = {1.1, 2.2, 3.3, 4.4};
buffer b4{myDoubles, range{4}};
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_creation_uncommon.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
// Create a buffer of 2x5 ints using the default allocator and CTAD for dimensions
buffer<int, 2> b1{range{2, 5}};
//Dimensions defaults to 1
// Create a buffer of 20 floats using a default-constructed std::allocator
buffer<float> b2{range{20}};
// Create a buffer from a shared pointer to int
auto sharedPtr = std::make_shared<int>(42);
buffer b3{sharedPtr, range{1}};
// Create a buffer of 2x5 ints and 2 non-overlapping sub-buffers of 5 ints.
buffer<int, 2> b4{range{2, 5}};
buffer b5{b4, id{0, 0}, range{1, 5}};
buffer b6{b4, id{1, 0}, range{1, 5}};
// Create a buffer of 5 doubles and initialize it from a host pointer to
// const double
const double myConstDbls[5] = {1.0, 2.0, 3.0, 4.0, 5.0};
buffer b7{myConstDbls, range{5}};
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_set_write_back.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> my_data;
for (int i = 0; i < N; i++)
my_data[i] = i;
{
queue q;
buffer my_buffer(my_data);
//Call the set_write_back method to control the data to be written back to the host from the device. e
//Setting it to false will not update the host with the updated values
my_buffer.set_write_back(false);
q.submit([&](handler &h) {
// create an accessor to update
// the buffer on the device
accessor my_accessor(my_buffer, h);
h.parallel_for(N, [=](id<1> i) {
my_accessor[i]*=2;
});
});
}
// myData is updated when myBuffer is
// destroyed upon exiting scope
for (int i = 0; i < N; i++) {
std::cout << my_data[i] << " ";
}
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/accessors_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <cassert>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
// Create 3 buffers of 42 ints
buffer<int> A{range{N}};
buffer<int> B{range{N}};
buffer<int> C{range{N}};
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, write_only, no_init};
accessor aB{B, h, write_only, no_init};
accessor aC{C, h, write_only, no_init};
h.parallel_for(N, [=](id<1> i) {
aA[i] = 1;
aB[i] = 40;
aC[i] = 0;
});
});
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, read_only};
accessor aB{B, h, read_only};
accessor aC{C, h, read_write};
h.parallel_for(N, [=](id<1> i) { aC[i] += aA[i] + aB[i]; });
});
host_accessor result{C, read_only};
for (int i = 0; i < N; i++) std::cout << result[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_host_ptr.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <mutex>
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 20;
int main() {
int myInts[N];
queue q;
//Initialize vector a,b and c
std::vector<float> a(N, 10.0f);
std::vector<float> b(N, 20.0f);
auto R = range<1>(N);
{
//Create host_ptr buffers for a and b
buffer buf_a(a,{property::buffer::use_host_ptr()});
buffer buf_b(b,{property::buffer::use_host_ptr()});
q.submit([&](handler& h) {
//create Accessors for a and b
accessor A(buf_a,h);
accessor B(buf_b,h,read_only);
h.parallel_for(R, [=](auto i) { A[i] += B[1] ; });
});
}
for (int i = 0; i < N; i++) std::cout << a[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int num=16;
using namespace sycl;
int main() {
auto R = range<1>{ num };
//Create Buffers A and B
buffer<int> A{ R }, B{ R };
//Create a device queue
queue Q;
//Submit Kernel 1
Q.submit([&](handler& h) {
//Accessor for buffer A
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit Kernel 2
Q.submit([&](handler& h) {
//This task will wait till the first queue is complete
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] += idx[0]; }); });
//Submit Kernel 3
Q.submit([&](handler& h) {
//Accessor for Buffer B
accessor out(B,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit task 4
Q.submit([&](handler& h) {
//This task will wait till kernel 2 and 3 are complete
accessor in (A,h,read_only);
accessor inout(B,h);
h.parallel_for(R, [=](auto idx) {
inout[idx] *= in[idx]; }); });
// And the following is back to device code
host_accessor result(B,read_only);
for (int i=0; i<num; ++i)
std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/buffer_set_final_data.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> my_data;
for (int i = 0; i < N; i++)
my_data[i] = i;
auto buff = std::make_shared<std::array<int, N>>();
{
queue q;
buffer my_buffer(my_data);
//Call the set_final_data to the created shared ptr where the values will be written back when the buffer gets destructed.
//my_buffer.set_final_data(nullptr);
my_buffer.set_final_data(buff);
q.submit([&](handler &h) {
// create an accessor to update
// the buffer on the device
accessor my_accessor(my_buffer, h);
h.parallel_for(N, [=](id<1> i) {
my_accessor[i]*=2;
});
});
}
// myData is updated when myBuffer is
// destroyed upon exiting scope
for (int i = 0; i < N; i++) {
std::cout << my_data[i] << " ";
}
std::cout << "\n";
for (int i = 0; i < N; i++) {
std::cout <<(*buff)[i] << " ";
}
std::cout << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/lab_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 256;
int data[N];
for (int i = 0; i < N; i++) data[i] = i;
std::cout<<"\nInput Values: ";
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout<<"\n";
buffer buf_data(data, range(N));
//# STEP 1 : Create 3 non-overlapping/disjoint sub-buffers for buf_data with length 64, 128 and 64.
//# YOUR CODE GOES HERE
//# STEP 2 : Submit task to Multiply the elements in first sub buffer by 2
queue q1;
q1.submit([&](handler& h) {
//# YOUR CODE GOES HERE
});
//# STEP 3 : Submit task to Multiply the elements in second sub buffer by 3
queue q2;
q2.submit([&](handler& h) {
//# YOUR CODE GOES HERE
});
//# STEP 4 : Submit task to Multiply the elements in third sub buffer by 2
queue q3;
q3.submit([&](handler& h) {
//# YOUR CODE GOES HERE
});
//# STEP 5 : Create Host accessors to get the results back to the host from the device
//# YOUR CODE GOES HERE
std::cout<<"\nOutput Values: ";
for (int i = 0; i < N; i++) std::cout<< data[i] << " ";
std::cout<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/host_accessor_init.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <algorithm>
#include <iostream>
using namespace sycl;
int main() {
constexpr size_t N = 1024;
// Set up queue on any available device
queue q;
// Create buffers of size N
buffer<int> in_buf{N}, out_buf{N};
// Use host accessors to initialize the data
{ // CRITICAL: Begin scope for host_accessor lifetime!
host_accessor in_acc{ in_buf }, out_acc{ out_buf };
for (int i=0; i < N; i++) {
in_acc[i] = i;
out_acc[i] = 0;
}
} //Close scope to make host accessors go out of scope!
// Submit the kernel to the queue
q.submit([&](handler& h) {
accessor in{in_buf, h};
accessor out{out_buf, h};
h.parallel_for(range{N}, [=](id<1> idx) {
out[idx] = in[idx];
});
});
// Check that all outputs match expected value
// Use host accessor! Buffer is still in scope / alive
host_accessor A{out_buf};
//for (int i=0; i<N; i++) std::cout << "A[" << i << "]=" << A[i] << "\n";
int indices[]{0, 1, 2, 3, 4, (N - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "A[" << j << "]=" << A[j] << "\n";
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/09_SYCL_Buffers_And_Accessors_Indepth/lab/host_accessor_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <algorithm>
#include <iostream>
using namespace sycl;
int main() {
// BEGIN CODE SNIP
static const int N = 1024;
// Set up queue on any available device
queue q;
// Create host containers to initialize on the host
std::vector<int> in_vec(N), out_vec(N);
// Initialize input and output vectors
for (int i=0; i < N; i++) in_vec[i] = i;
std::fill(out_vec.begin(), out_vec.end(), 0);
// Create buffers using host allocations (vector in this case)
buffer in_buf{in_vec}, out_buf{out_vec};
// Submit the kernel to the queue
q.submit([&](handler& h) {
accessor in{in_buf, h};
accessor out{out_buf, h};
h.parallel_for(range{N}, [=](id<1> idx) {
out[idx] = in[idx] * 2;
});
});
// Check that all outputs match expected value
// Use host accessor! Buffer is still in scope / alive
host_accessor A{out_buf};
//for (int i=0; i<N; i++) std::cout << "A[" << i << "]=" << A[i] << "\n";
int indices[]{0, 1, 2, 3, 4, (N - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "A[" << j << "]=" << A[j] << "\n";
}
// END CODE SNIP
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/sycl_vector_add.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
//# STEP 1 : Include header for SYCL
#include <sycl/sycl.hpp>
int main(){
//# STEP 2: Create a SYCL queue and device selection for offload
sycl::queue q;
//# initialize some data array
const int N = 16;
//# STEP 3: Allocate memory so that both host and device can access
auto a = sycl::malloc_shared<float>(N, q);
auto b = sycl::malloc_shared<float>(N, q);
auto c = sycl::malloc_shared<float>(N, q);
for(int i=0;i<N;i++) {
a[i] = 1;
b[i] = 2;
c[i] = 0;
}
//# STEP 4: Submit computation to Offload device
q.parallel_for(N, [=](auto i){
//# computation
for(int i=0;i<N;i++) c[i] = a[i] + b[i];
}).wait();
//# print output
for(int i=0;i<N;i++) std::cout << c[i] << "\n";
sycl::free(a, q);
sycl::free(b, q);
sycl::free(c, q);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/simple.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main(){
//# define queue which has default device associated for offload
queue q;
std::cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n";
//# Unified Shared Memory Allocation enables data access on host and device
int *data = malloc_shared<int>(N, q);
//# Initialization
for(int i=0; i<N; i++) data[i] = i;
//# Offload parallel computation to device
q.parallel_for(range<1>(N), [=] (id<1> i){
data[i] *= 2;
}).wait();
//# Print Output
for(int i=0; i<N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/onedpl_compute_offload.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <iostream>
int main(){
//# initialize some data array
const int N = 16;
std::vector<int> data(N);
for(int i=0;i<N;i++) data[i] = i;
//# parallel computation on GPU using SYCL library (oneDPL)
oneapi::dpl::for_each(oneapi::dpl::execution::dpcpp_default, data.begin(), data.end(), [](int &tmp){ tmp *= 5; });
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/sycl_compute_offload_parallelism.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
int main(){
//# select device for offload
sycl::queue q(sycl::gpu_selector_v);
std::cout << "Offload Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize some data array
const int N = 16;
auto data = sycl::malloc_shared<float>(N, q);
for(int i=0;i<N;i++) data[i] = i;
//# parallel computation on GPU
q.parallel_for(N,[=](auto i){
data[i] = data[i] * 5;
}).wait();
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/sycl_compute_offload.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
int main(){
//# select device for offload
sycl::queue q(sycl::gpu_selector_v);
std::cout << "Offload Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize some data array
const int N = 16;
auto data = sycl::malloc_shared<float>(N, q);
for(int i=0;i<N;i++) data[i] = i;
//# computation on GPU
q.single_task([=](){
for(int i=0;i<N;i++) data[i] = data[i] * 5;
}).wait();
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/src/cpp_compute.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
int main(){
//# initialize some data array
const int N = 16;
float data[N];
for(int i=0;i<N;i++) data[i] = i;
//# computation on CPU
for(int i=0;i<N;i++) data[i] = data[i] * 5;
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/sycl_vector_add.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
//# STEP 1 : Include header for SYCL
//# YOUR CODE GOES HERE
int main(){
//# STEP 2: Create a SYCL queue and device selection for offload
//# YOUR CODE GOES HERE
//# initialize some data array
const int N = 16;
//# STEP 3: Allocate memory so that both host and device can access
//# MODIFY THE CODE BELOW
float a[N], b[N], c[N];
for(int i=0;i<N;i++) {
a[i] = 1;
b[i] = 2;
c[i] = 0;
}
//# STEP 4: Submit computation to Offload device
//# MODIFY THE CODE BELOW
//# computation
for(int i=0;i<N;i++) c[i] = a[i] + b[i];
//# print output
for(int i=0;i<N;i++) std::cout << c[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/simple.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main(){
//# define queue which has default device associated for offload
queue q;
std::cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n";
//# Unified Shared Memory Allocation enables data access on host and device
int *data = malloc_shared<int>(N, q);
//# Initialization
for(int i=0; i<N; i++) data[i] = i;
//# Offload parallel computation to device
q.parallel_for(range<1>(N), [=] (id<1> i){
data[i] *= 2;
}).wait();
//# Print Output
for(int i=0; i<N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/onedpl_compute_offload.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <iostream>
int main(){
//# initialize some data array
const int N = 16;
std::vector<int> data(N);
for(int i=0;i<N;i++) data[i] = i;
//# parallel computation on GPU using SYCL library (oneDPL)
oneapi::dpl::for_each(oneapi::dpl::execution::dpcpp_default, data.begin(), data.end(), [](int &tmp){ tmp *= 5; });
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/sycl_compute_offload_parallelism.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
int main(){
//# select device for offload
sycl::queue q(sycl::gpu_selector_v);
std::cout << "Offload Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize some data array
const int N = 16;
auto data = sycl::malloc_shared<float>(N, q);
for(int i=0;i<N;i++) data[i] = i;
//# parallel computation on GPU
q.parallel_for(N,[=](auto i){
data[i] = data[i] * 5;
}).wait();
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/sycl_compute_offload.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
int main(){
//# select device for offload
sycl::queue q(sycl::gpu_selector_v);
std::cout << "Offload Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize some data array
const int N = 16;
auto data = sycl::malloc_shared<float>(N, q);
for(int i=0;i<N;i++) data[i] = i;
//# computation on GPU
q.single_task([=](){
for(int i=0;i<N;i++) data[i] = data[i] * 5;
}).wait();
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/01_oneAPI_Intro/lab/cpp_compute.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
int main(){
//# initialize some data array
const int N = 16;
float data[N];
for(int i=0;i<N;i++) data[i] = i;
//# computation on CPU
for(int i=0;i<N;i++) data[i] = data[i] * 5;
//# print output
for(int i=0;i<N;i++) std::cout << data[i] << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/src/usm_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 1024;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//intialize 2 arrays on host
int *data1 = static_cast<int *>(malloc(N * sizeof(int)));
int *data2 = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) {
data1[i] = 25;
data2[i] = 49;
}
//# STEP 1 : Create USM device allocation for data1 and data2
auto data1_device = malloc_device<int>(N, q);
auto data2_device = malloc_device<int>(N, q);
//# STEP 2 : Copy data1 and data2 to USM device allocation
auto e1 = q.memcpy(data1_device, data1, N * sizeof(int));
auto e2 = q.memcpy(data2_device, data2, N * sizeof(int));
//# STEP 3 : Write kernel code to update data1 on device with square of its value
auto e3 = q.parallel_for(N, e1, [=](auto i) {
data1_device[i] = (data1_device[i]*data1_device[i]);
});
//# STEP 4 : Write kernel code to update data2 on device with square of its value
auto e4 = q.parallel_for(N, e2, [=](auto i) {
data2_device[i] = (data2_device[i]*data2_device[i]);
});
//# STEP 5 : Write kernel code to add data2 on device to data1
auto e5 = q.parallel_for(N, {e3,e4}, [=](auto i) { data1_device[i] += data2_device[i]; });
//# STEP 6 : Copy result from device to data1
q.memcpy(data1, data1_device, N * sizeof(int), e5).wait();
//# verify results
int fail = 0;
for (int i = 0; i < N; i++) if(data1[i] != 3026) {fail = 1; break;}
if(fail == 1) std::cout << " FAIL"; else std::cout << " PASS";
std::cout << "\n";
free(data1_device, q);
free(data2_device, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/src/usm_data.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 256;
int main() {
/* in_order queue property */
queue q{property::queue::in_order()};
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
int *data = static_cast<int *>(malloc_shared(N * sizeof(int), q));
for (int i = 0; i < N; i++) data[i] = 10;
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 2; });
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 3; });
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 5; });
q.wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/src/usm.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocation using malloc_shared
int *data = static_cast<int *>(malloc_shared(N * sizeof(int), q));
//# Initialize data array
for (int i = 0; i < N; i++) data[i] = i;
//# Modify data array on device
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] *= 2; }).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/src/usm_explicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data on host
int *data = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) data[i] = i;
//# Explicit USM allocation using malloc_device
int *data_device = malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(data_device, data, sizeof(int) * N).wait();
//# update device memory
q.parallel_for(range<1>(N), [=](id<1> i) { data_device[i] *= 2; }).wait();
//# copy mem from device to host
q.memcpy(data, data_device, sizeof(int) * N).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data_device, q);
free(data);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/src/usm_data2.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 1024;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
int *data1 = malloc_shared<int>(N, q);
int *data2 = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) {
data1[i] = 10;
data2[i] = 10;
}
auto e1 = q.parallel_for(range<1>(N), [=](id<1> i) { data1[i] += 2; });
auto e2 = q.parallel_for(range<1>(N), [=](id<1> i) { data2[i] += 3; });
q.parallel_for(range<1>(N), {e1, e2}, [=](id<1> i) { data1[i] += data2[i]; }).wait();
for (int i = 0; i < N; i++) std::cout << data1[i] << " ";
std::cout << "\n";
free(data1, q);
free(data2, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/lab/usm_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 1024;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//intialize 2 arrays on host
int *data1 = static_cast<int *>(malloc(N * sizeof(int)));
int *data2 = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) {
data1[i] = 25;
data2[i] = 49;
}
//# STEP 1 : Create USM device allocation for data1 and data2
//# YOUR CODE GOES HERE
//# STEP 2 : Copy data1 and data2 to USM device allocation
//# YOUR CODE GOES HERE
//# STEP 3 : Write kernel code to update data1 on device with square of its value
q.parallel_for(N, [=](auto i) {
//# YOUR CODE GOES HERE
});
//# STEP 3 : Write kernel code to update data2 on device with square of its value
q.parallel_for(N, [=](auto i) {
//# YOUR CODE GOES HERE
});
//# STEP 5 : Write kernel code to add data2 on device to data1
q.parallel_for(N, [=](auto i) {
//# YOUR CODE GOES HERE
});
//# STEP 6 : Copy data1 on device to host
//# YOUR CODE GOES HERE
//# verify results
int fail = 0;
for (int i = 0; i < N; i++) if(data1[i] != 12) {fail = 1; break;}
if(fail == 1) std::cout << " FAIL"; else std::cout << " PASS";
std::cout << "\n";
//# STEP 7 : Free USM device allocations
//# YOUR CODE GOES HERE
//# STEP 8 : Add event based kernel dependency for the Steps 2 - 6
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/lab/usm_data.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 256;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
int *data = static_cast<int *>(malloc_shared(N * sizeof(int), q));
for (int i = 0; i < N; i++) data[i] = 10;
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 2; });
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 3; });
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] += 5; });
q.wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/lab/usm.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocation using malloc_shared
int *data = malloc_shared<int>(N, q);
//# Initialize data array
for (int i = 0; i < N; i++) data[i] = i;
//# Modify data array on device
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] *= 2; }).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/lab/usm_explicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data on host
int *data = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) data[i] = i;
//# Explicit USM allocation using malloc_device
int *data_device = malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(data_device, data, sizeof(int) * N).wait();
//# update device memory
q.parallel_for(range<1>(N), [=](id<1> i) { data_device[i] *= 2; }).wait();
//# copy mem from device to host
q.memcpy(data, data_device, sizeof(int) * N).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data_device, q);
free(data);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/03_SYCL_Unified_Shared_Memory/lab/usm_data2.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static const int N = 1024;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
int *data1 = malloc_shared<int>(N, q);
int *data2 = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) {
data1[i] = 10;
data2[i] = 10;
}
q.parallel_for(range<1>(N), [=](id<1> i) { data1[i] += 2; });
q.parallel_for(range<1>(N), [=](id<1> i) { data2[i] += 3; });
q.parallel_for(range<1>(N), [=](id<1> i) { data1[i] += data2[i]; }).wait();
for (int i = 0; i < N; i++) std::cout << data1[i] << " ";
std::cout << "\n";
free(data1, q);
free(data2, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/src/selector.hpp | //==============================================================
// Copyright (C) Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
// Custom device selector to select a device of the specified type.
// The platform of the device has to contain the phrase "Intel". If
// the platform or the type are not as expected, the queried device is
// not selected, as indicated by a negative score.
using namespace std;
using namespace sycl;
class CustomSelector {
public:
CustomSelector(info::device_type type) : type{type} {}
int operator()(const device& dev) const {
if (type != dev.get_info<info::device::device_type>()) return -1;
string platform_name =
dev.get_platform().get_info<info::platform::name>();
if (platform_name.find("Intel") != string::npos)
return 10;
else
return (type == info::device_type::host) ? 10 : -1;
}
private:
info::device_type type;
};
// Return the device type based on the program arguments.
static info::device_type GetDeviceType(int argc, char* argv[]) {
if (argc < 2) {
cerr << "Usage: " << argv[0] << " "
<< "<host|cpu|gpu|accelerator>\n";
exit(1);
}
string type_arg{argv[1]};
info::device_type type;
if (type_arg.compare("host") == 0)
type = info::device_type::host;
else if (type_arg.compare("cpu") == 0)
type = info::device_type::cpu;
else if (type_arg.compare("gpu") == 0)
type = info::device_type::gpu;
else if (type_arg.compare("accelerator") == 0)
type = info::device_type::accelerator;
else {
cerr << "fail; unrecognized device type '" << type_arg << "'\n";
exit(-1);
}
return type;
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/src/array-transform.cpp | //==============================================================
// Copyright (C) Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// This is a simple SYCL program that accompanies the Getting Started
// Guide of the debugger. The kernel does not compute anything
// particularly interesting; it is designed to illustrate the most
// essential features of the debugger when the target device is CPU or
// GPU.
#include <sycl/sycl.hpp>
#include <iostream>
#include "selector.hpp"
using namespace std;
using namespace sycl;
// A device function, called from inside the kernel.
static size_t GetDim(id<1> wi, int dim) {
return wi[dim];
}
int main(int argc, char *argv[]) {
constexpr size_t length = 64;
int input[length];
int output[length];
// Initialize the input
for (int i = 0; i < length; i++)
input[i] = i + 100;
try {
CustomSelector selector(GetDeviceType(argc, argv));
queue q(selector);
cout << "[SYCL] Using device: ["
<< q.get_device().get_info<info::device::name>()
<< "] from ["
<< q.get_device().get_platform().get_info<info::platform::name>()
<< "]\n";
range data_range{length};
buffer buffer_in{input, data_range};
buffer buffer_out{output, data_range};
q.submit([&](auto &h) {
accessor in(buffer_in, h, read_only);
accessor out(buffer_out, h, write_only);
// kernel-start
h.parallel_for(data_range, [=](id<1> index) {
size_t id0 = GetDim(index, 0);
int element = in[index]; // breakpoint-here
int result = element + 50;
if (id0 % 2 == 0) {
result = result + 50; // then-branch
} else {
result = -1; // else-branch
}
out[index] = result;
});
// kernel-end
});
q.wait_and_throw();
} catch (sycl::exception const& e) {
cout << "fail; synchronous exception occurred: " << e.what() << "\n";
return -1;
}
// Verify the output
for (int i = 0; i < length; i++) {
int result = (i % 2 == 0) ? (input[i] + 100) : -1;
if (output[i] != result) {
cout << "fail; element " << i << " is " << output[i] << "\n";
return -1;
}
}
cout << "success; result is correct.\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_reqd_size.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 64; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# get all supported sub_group sizes and print
auto sg_sizes = q.get_device().get_info<info::device::sub_group_sizes>();
std::cout << "Supported Sub-Group Sizes : ";
for (int i=0; i<sg_sizes.size(); i++) std::cout << sg_sizes[i] << " "; std::cout << "\n";
//# find out maximum supported sub_group size
auto max_sg_size = std::max_element(sg_sizes.begin(), sg_sizes.end());
std::cout << "Max Sub-Group Size : " << max_sg_size[0] << "\n";
q.submit([&](handler &h) {
//# setup sycl stream class to print standard output from device code
auto out = stream(1024, 768, h);
//# nd-range kernel with user specified sub_group size
h.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(32)]] {
//# get sub_group handle
auto sg = item.get_sub_group();
//# query sub_group and print sub_group info once per sub_group
if (sg.get_local_id()[0] == 0) {
out << "sub_group id: " << sg.get_group_id()[0] << " of "
<< sg.get_group_range()[0] << ", size=" << sg.get_local_range()[0]
<< "\n";
}
});
}).wait();
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_info.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 64; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
q.submit([&](handler &h) {
//# setup sycl stream class to print standard output from device code
auto out = stream(1024, 768, h);
//# nd-range kernel
h.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
//# get sub_group handle
auto sg = item.get_sub_group();
//# query sub_group and print sub_group info once per sub_group
if (sg.get_local_id()[0] == 0) {
out << "sub_group id: " << sg.get_group_id()[0] << " of "
<< sg.get_group_range()[0] << ", size=" << sg.get_local_range()[0]
<< "\n";
}
});
}).wait();
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_shuffle.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# swap adjacent items in array using sub_group permute_group_by_xor
data[i] = permute_group_by_xor(sg, data[i], 1);
//# reverse the order of items in sub_group using permute_group_by_xor
//data[i] = permute_group_by_xor(sg, data[i], sg.get_max_local_range() - 1);
}).wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_reduce.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# Adds all elements in sub_group using sub_group collectives
int sum = reduce_over_group(sg, data[i], plus<>());
//# write sub_group sum in first location for each sub_group
if (sg.get_local_id()[0] == 0) {
data[i] = sum;
} else {
data[i] = 0;
}
}).wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 256; // work-group size
static constexpr size_t S = 32; // sub-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# allocate USM shared allocation for input data array and sg_data array
int *data = malloc_shared<int>(N, q);
int *sg_data = malloc_shared<int>(N/S, q);
//# initialize input data array
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
//# Kernel task to compute sub-group sum and save to sg_data array
//# STEP 1 : fix the sub_group size to value S in the kernel below
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(S)]] {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# STEP 2: Add all elements in sub_group using sub_group reduce
int sg_sum = reduce_over_group(sg, data[i], plus<>());
//# STEP 3 : save each sub-group sum to sg_sum array
if (sg.get_local_id()[0] == 0) {
sg_data[i/S] = sg_sum;
}
}).wait();
//# print sg_sum array
for (int i = 0; i < N/S; i++) std::cout << sg_data[i] << " ";
std::cout << "\n";
//# STEP 4: compute sum of all elements in sg_data array
int sum = 0;
for (int i = 0; i < N/S; i++) sum += sg_data[i];
std::cout << "\nSum = " << sum << "\n";
//# free USM allocations
free(data, q);
free(sg_data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_votes.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 32; // global size
static constexpr size_t B = 16; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize input and output array using usm
auto input = malloc_shared<int>(N, q);
auto all = malloc_shared<int>(N, q);
auto any = malloc_shared<int>(N, q);
auto none = malloc_shared<int>(N, q);
//# initialize values for input array
for(int i=0; i<N; i++) { if (i< 10) input[i] = 0; else input[i] = i; }
std::cout << "input:\n";
for(int i=0; i<N; i++) std::cout << input[i] << " "; std::cout << "\n";
//# use parallel_for and sub_groups
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(8)]] {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# write items with vote functions
all[i] = all_of_group(sg, input[i]);
any[i] = any_of_group(sg, input[i]);
none[i] = none_of_group(sg, input[i]);
}).wait();
std::cout << "all_of:\n";
for(int i=0; i<N; i++) std::cout << all[i] << " "; std::cout << "\n";
std::cout << "any_of:\n";
for(int i=0; i<N; i++) std::cout << any[i] << " "; std::cout << "\n";
std::cout << "none_of:\n";
for(int i=0; i<N; i++) std::cout << none[i] << " "; std::cout << "\n";
free(input, q);
free(all, q);
free(any, q);
free(none, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/src/sub_group_broadcast.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for(int i=0; i<N; i++) data[i] = i;
for(int i=0; i<N; i++) std::cout << data[i] << " ";
std::cout << "\n";
//# use parallel_for and sub_groups
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# write sub_group item values to broadcast value at index 3
data[i] = group_broadcast(sg, data[i], 3);
}).wait();
for(int i=0; i<N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_reqd_size.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 64; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# get all supported sub_group sizes and print
auto sg_sizes = q.get_device().get_info<info::device::sub_group_sizes>();
std::cout << "Supported Sub-Group Sizes : ";
for (int i=0; i<sg_sizes.size(); i++) std::cout << sg_sizes[i] << " "; std::cout << "\n";
//# find out maximum supported sub_group size
auto max_sg_size = std::max_element(sg_sizes.begin(), sg_sizes.end());
std::cout << "Max Sub-Group Size : " << max_sg_size[0] << "\n";
q.submit([&](handler &h) {
//# setup sycl stream class to print standard output from device code
auto out = stream(1024, 768, h);
//# nd-range kernel with user specified sub_group size
h.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(32)]] {
//# get sub_group handle
auto sg = item.get_sub_group();
//# query sub_group and print sub_group info once per sub_group
if (sg.get_local_id()[0] == 0) {
out << "sub_group id: " << sg.get_group_id()[0] << " of "
<< sg.get_group_range()[0] << ", size=" << sg.get_local_range()[0]
<< "\n";
}
});
}).wait();
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_info.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 64; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
q.submit([&](handler &h) {
//# setup sycl stream class to print standard output from device code
auto out = stream(1024, 768, h);
//# nd-range kernel
h.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
//# get sub_group handle
auto sg = item.get_sub_group();
//# query sub_group and print sub_group info once per sub_group
if (sg.get_local_id()[0] == 0) {
out << "sub_group id: " << sg.get_group_id()[0] << " of "
<< sg.get_group_range()[0] << ", size=" << sg.get_local_range()[0]
<< "\n";
}
});
}).wait();
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_shuffle.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# swap adjacent items in array using sub_group permute_group_by_xor
data[i] = permute_group_by_xor(sg, data[i], 1);
//# reverse the order of items in sub_group using permute_group_by_xor
//data[i] = permute_group_by_xor(sg, data[i], sg.get_max_local_range()[0] - 1);
}).wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_reduce.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# Add all elements in sub_group using sub_group collectives
int result = reduce_over_group(sg, data[i], plus<>());
//# write sub_group sum in first location for each sub_group
if (sg.get_local_id()[0] == 0) {
data[i] = result;
} else {
data[i] = 0;
}
}).wait();
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 256; // work-group size
static constexpr size_t S = 32; // sub-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# allocate USM shared allocation for input data array and sg_data array
int *data = malloc_shared<int>(N, q);
int *sg_data = malloc_shared<int>(N/S, q);
//# initialize input data array
for (int i = 0; i < N; i++) data[i] = i;
for (int i = 0; i < N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
//# Kernel task to compute sub-group sum and save to sg_data array
//# STEP 1 : set fixed sub_group size of value S in the kernel below
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# STEP 2: Add all elements in sub_group using sub_group reduce
//# YOUR CODE GOES HERE
//# STEP 3 : save each sub-group sum to sg_data array
//# YOUR CODE GOES HERE
}).wait();
//# print sg_data array
for (int i = 0; i < N/S; i++) std::cout << sg_data[i] << " ";
std::cout << "\n";
//# STEP 4: compute sum of all elements in sg_data array
int sum = 0;
//# YOUR CODE GOES HERE
std::cout << "\nSum = " << sum << "\n";
//# free USM allocations
free(data, q);
free(sg_data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_votes.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 32; // global size
static constexpr size_t B = 16; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize input and output array using usm
auto input = malloc_shared<int>(N, q);
auto all = malloc_shared<int>(N, q);
auto any = malloc_shared<int>(N, q);
auto none = malloc_shared<int>(N, q);
//# initialize values for input array
for(int i=0; i<N; i++) { if (i< 10) input[i] = 0; else input[i] = i; }
std::cout << "input:\n";
for(int i=0; i<N; i++) std::cout << input[i] << " "; std::cout << "\n";
//# use parallel_for and sub_groups
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(32)]] {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# write items with vote functions
all[i] = all_of_group(sg, input[i]);
any[i] = any_of_group(sg, input[i]);
none[i] = none_of_group(sg, input[i]);
}).wait();
std::cout << "all_of:\n";
for(int i=0; i<N; i++) std::cout << all[i] << " "; std::cout << "\n";
std::cout << "any_of:\n";
for(int i=0; i<N; i++) std::cout << any[i] << " "; std::cout << "\n";
std::cout << "none_of:\n";
for(int i=0; i<N; i++) std::cout << none[i] << " "; std::cout << "\n";
free(input, q);
free(all, q);
free(any, q);
free(none, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/04_SYCL_Sub_Groups/lab/sub_group_broadcast.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
int *data = malloc_shared<int>(N, q);
for(int i=0; i<N; i++) data[i] = i;
for(int i=0; i<N; i++) std::cout << data[i] << " ";
std::cout << "\n\n";
//# use parallel_for and sub_groups
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item) {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# write sub_group item values to broadcast value at index 3
data[i] = group_broadcast(sg, data[i], 3);
}).wait();
for(int i=0; i<N; i++) std::cout << data[i] << " ";
std::cout << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/buffer_destruction2.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 16;
using namespace sycl;
// Buffer creation happens within a separate function scope.
void SYCL_code(std::vector<int> &v, queue &q) {
auto R = range<1>(N);
buffer buf(v);
q.submit([&](handler &h) {
accessor a(buf,h);
h.parallel_for(R, [=](auto i) { a[i] -= 2; });
});
}
int main() {
std::vector<int> v(N, 10);
queue q;
SYCL_code(v, q);
// When execution advances beyond this function scope, buffer destructor is
// invoked which relinquishes the ownership of data and copies back the data to
// the host memory.
for (int i = 0; i < N; i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/complex_mult_solution.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
#include <vector>
#include "Complex.hpp"
using namespace sycl;
using namespace std;
// Number of complex numbers passing to the SYCL code
static const int num_elements = 10000;
class CustomDeviceSelector {
public:
CustomDeviceSelector(std::string vendorName) : vendorName_(vendorName){};
int operator()(const device &dev) const {
int device_rating = 0;
//We are querying for the custom device specific to a Vendor and if it is a GPU device we
//are giving the highest rating as 3 . The second preference is given to any GPU device and the third preference is given to
//CPU device.
//**************Step1: Uncomment the following lines where you are setting the rating for the devices********
if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) !=
std::string::npos))
device_rating = 3;
else if (dev.is_gpu())
device_rating = 2;
else if (dev.is_cpu())
device_rating = 1;
return device_rating;
};
private:
std::string vendorName_;
};
// in_vect1 and in_vect2 are the vectors with num_elements complex nubers and
// are inputs to the parallel function
void SYCLParallel(queue &q, std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
auto R = range(in_vect1.size());
if (in_vect2.size() != in_vect1.size() || out_vect.size() != in_vect1.size()){
std::cout << "ERROR: Vector sizes do not match"<< "\n";
return;
}
// Setup input buffers
buffer bufin_vect1(in_vect1);
buffer bufin_vect2(in_vect2);
// Setup Output buffers
buffer bufout_vect(out_vect);
std::cout << "Target Device: "
<< q.get_device().get_info<info::device::name>() << "\n";
// Submit Command group function object to the queue
q.submit([&](auto &h) {
// Accessors set as read mode
accessor V1(bufin_vect1,h,read_only);
accessor V2(bufin_vect2,h,read_only);
// Accessor set to Write mode
//**************STEP 2: Uncomment the below line to set the Write Accessor********************
accessor V3 (bufout_vect,h,write_only);
h.parallel_for(R, [=](auto i) {
//**************STEP 3: Uncomment the below line to call the complex_mul function that computes the multiplication
//of the complex numbers********************
V3[i] = V1[i].complex_mul(V2[i]);
});
});
q.wait_and_throw();
}
void Scalar(std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
if ((in_vect2.size() != in_vect1.size()) || (out_vect.size() != in_vect1.size())){
std::cout<<"ERROR: Vector sizes do not match"<<"\n";
return;
}
for (int i = 0; i < in_vect1.size(); i++) {
out_vect[i] = in_vect1[i].complex_mul(in_vect2[i]);
}
}
// Compare the results of the two output vectors from parallel and scalar. They
// should be equal
int Compare(std::vector<Complex2> &v1, std::vector<Complex2> &v2) {
int ret_code = 1;
if(v1.size() != v2.size()){
ret_code = -1;
}
for (int i = 0; i < v1.size(); i++) {
if (v1[i] != v2[i]) {
ret_code = -1;
break;
}
}
return ret_code;
}
int main() {
// Declare your Input and Output vectors of the Complex2 class
vector<Complex2> input_vect1;
vector<Complex2> input_vect2;
vector<Complex2> out_vect_parallel;
vector<Complex2> out_vect_scalar;
for (int i = 0; i < num_elements; i++) {
input_vect1.push_back(Complex2(i + 2, i + 4));
input_vect2.push_back(Complex2(i + 4, i + 6));
out_vect_parallel.push_back(Complex2(0, 0));
out_vect_scalar.push_back(Complex2(0, 0));
}
// Initialize your Input and Output Vectors. Inputs are initialized as below.
// Outputs are initialized with 0
try {
// Pass in the name of the vendor for which the device you want to query
std::string vendor_name = "Intel";
// std::string vendor_name = "AMD";
// std::string vendor_name = "Nvidia";
// queue constructor passed exception handler
CustomDeviceSelector selector(vendor_name);
queue q(selector);
// Call the SYCLParallel with the required inputs and outputs
SYCLParallel(q, input_vect1, input_vect2, out_vect_parallel);
} catch (...) {
// some other exception detected
std::cout << "Failure" << "\n";
std::terminate();
}
std::cout
<< "****************************************Multiplying Complex numbers "
"in Parallel********************************************************"
<< "\n";
// Print the outputs of the Parallel function
int indices[]{0, 1, 2, 3, 4, (num_elements - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "[" << j << "] " << input_vect1[j] << " * " << input_vect2[j]
<< " = " << out_vect_parallel[j] << "\n";
}
// Call the Scalar function with the required input and outputs
Scalar(input_vect1, input_vect2, out_vect_scalar);
// Compare the outputs from the parallel and the scalar functions. They should
// be equal
int ret_code = Compare(out_vect_parallel, out_vect_scalar);
if (ret_code == 1) {
std::cout << "Complex multiplication successfully run on the device"
<< "\n";
} else
std::cout
<< "*********************************************Verification Failed. Results are "
"not matched**************************"
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/vector_add_usm_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
// kernel function to compute vector add using Unified Shared memory model (USM)
void kernel_usm(int* a, int* b, int* c, int N) {
//Step 1: create a device queue
queue q;
//Step 2: create USM device allocation
auto a_device = malloc_device<int>(N, q);
auto b_device = malloc_device<int>(N, q);
auto c_device = malloc_device<int>(N, q);
//Step 3: copy memory from host to device
q.memcpy(a_device, a, N*sizeof(int));
q.memcpy(b_device, b, N*sizeof(int));
q.wait();
//Step 4: send a kernel (lambda) for execution
q.parallel_for(N, [=](auto i){
//Step 5: write a kernel
c_device[i] = a_device[i] + b_device[i];
}).wait();
//Step 6: copy the result back to host
q.memcpy(c, c_device, N*sizeof(int)).wait();
//Step 7: free device allocation
free(a_device, q);
free(b_device, q);
free(c_device, q);
}
// kernel function to compute vector add using Buffer memory model
void kernel_buffers(int* a, int* b, int* c, int N) {
//Step 1: create a device queue
queue q;
//Step 2: create buffers
buffer buf_a(a, range<1>(N));
buffer buf_b(b, range<1>(N));
buffer buf_c(c, range<1>(N));
//Step 3: submit a command for (asynchronous) execution
q.submit([&](handler &h){
//Step 4: create buffer accessors to access buffer data on the device
accessor A(buf_a, h, read_only);
accessor B(buf_b, h, read_only);
accessor C(buf_c, h, write_only);
//Step 5: send a kernel (lambda) for execution
h.parallel_for(N, [=](auto i){
//Step 6: write a kernel
C[i] = A[i] + B[i];
});
});
}
int main() {
// initialize data arrays on host
constexpr int N = 256;
int a[N], b[N], c[N];
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 2;
}
// initialize c = 0 and offload computation using USM, print output
for (int i=0; i<N; i++) c[i] = 0;
kernel_usm(a, b, c, N);
std::cout << "Vector Add Output (USM): \n";
for (int i=0; i<N; i++)std::cout << c[i] << " ";std::cout << "\n";
// initialize c = 0 and offload computation using USM, print output
for (int i=0; i<N; i++) c[i] = 0;
std::cout << "Vector Add Output (Buffers): \n";
kernel_buffers(a, b, c, N);
for (int i=0; i<N; i++)std::cout << c[i] << " ";std::cout << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/custom_device_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
using namespace sycl;
class my_device_selector {
public:
my_device_selector(std::string vendorName) : vendorName_(vendorName){};
int operator()(const device& dev) const {
int rating = 0;
//We are querying for the custom device specific to a Vendor and if it is a GPU device we
//are giving the highest rating as 3 . The second preference is given to any GPU device and the third preference is given to
//CPU device.
if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) != std::string::npos))
rating = 3;
else if (dev.is_gpu()) rating = 2;
else if (dev.is_cpu()) rating = 1;
return rating;
};
private:
std::string vendorName_;
};
int main() {
//pass in the name of the vendor for which the device you want to query
std::string vendor_name = "Intel";
//std::string vendor_name = "AMD";
//std::string vendor_name = "Nvidia";
my_device_selector selector(vendor_name);
queue q(selector);
std::cout << "Device: "
<< q.get_device().get_info<info::device::name>() << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/buffer_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int num=16;
using namespace sycl;
int main() {
auto R = range<1>{ num };
//Create Buffers A and B
buffer<int> A{ R }, B{ R };
//Create a device queue
queue Q;
//Submit Kernel 1
Q.submit([&](handler& h) {
//Accessor for buffer A
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit Kernel 2
Q.submit([&](handler& h) {
//This task will wait till the first queue is complete
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] += idx[0]; }); });
//Submit Kernel 3
Q.submit([&](handler& h) {
//Accessor for Buffer B
accessor out(B,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit task 4
Q.submit([&](handler& h) {
//This task will wait till kernel 2 and 3 are complete
accessor in (A,h,read_only);
accessor inout(B,h);
h.parallel_for(R, [=](auto idx) {
inout[idx] *= in[idx]; }); });
// And the following is back to device code
host_accessor result(B,read_only);
for (int i=0; i<num; ++i)
std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/Complex.hpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
#include <vector>
using namespace std;
class Complex2 {
private:
int m_real_, m_imag_;
public:
Complex2() {
m_real_ = 0;
m_imag_ = 0;
}
Complex2(int x, int y) {
m_real_ = x;
m_imag_ = y;
}
// Overloading the != operator
friend bool operator!=(const Complex2& a, const Complex2& b) {
return (a.m_real_ != b.m_real_) || (a.m_imag_ != b.m_imag_);
}
// The function performs Complex number multiplication and returns a Complex2
// object.
Complex2 complex_mul(const Complex2& obj) const {
return Complex2(((m_real_ * obj.m_real_) - (m_imag_ * obj.m_imag_)),
((m_real_ * obj.m_imag_) + (m_imag_ * obj.m_real_)));
}
// Overloading the ostream operator to print the objects of the Complex2
// object
friend ostream& operator<<(ostream& out, const Complex2& obj) {
out << "(" << obj.m_real_ << " : " << obj.m_imag_ << "i)";
return out;
}
};
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/complex_mult.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
#include <vector>
#include "Complex.hpp"
using namespace sycl;
using namespace std;
// Number of complex numbers passing to the SYCL code
static const int num_elements = 10000;
class CustomDeviceSelector {
public:
CustomDeviceSelector(std::string vendorName) : vendorName_(vendorName){};
int operator()(const device &dev) {
int device_rating = 0;
//We are querying for the custom device specific to a Vendor and if it is a GPU device we
//are giving the highest rating as 3 . The second preference is given to any GPU device and the third preference is given to
//CPU device.
if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) !=
std::string::npos))
device_rating = 3;
else if (dev.is_gpu())
device_rating = 2;
else if (dev.is_cpu())
device_rating = 1;
return device_rating;
};
private:
std::string vendorName_;
};
// in_vect1 and in_vect2 are the vectors with num_elements complex nubers and
// are inputs to the parallel function
void SYCLParallel(queue &q, std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
auto R = range(in_vect1.size());
if (in_vect2.size() != in_vect1.size() || out_vect.size() != in_vect1.size()){
std::cout << "ERROR: Vector sizes do not match"<< "\n";
return;
}
// Setup input buffers
buffer bufin_vect1(in_vect1);
buffer bufin_vect2(in_vect2);
// Setup Output buffers
buffer bufout_vect(out_vect);
std::cout << "Target Device: "
<< q.get_device().get_info<info::device::name>() << "\n";
// Submit Command group function object to the queue
q.submit([&](auto &h) {
// Accessors set as read mode
accessor V1(bufin_vect1,h,read_only);
accessor V2(bufin_vect2,h,read_only);
// Accessor set to Write mode
accessor V3 (bufout_vect,h,write_only);
h.parallel_for(R, [=](auto i) {
V3[i] = V1[i].complex_mul(V2[i]);
});
});
q.wait_and_throw();
}
void Scalar(std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
if ((in_vect2.size() != in_vect1.size()) || (out_vect.size() != in_vect1.size())){
std::cout<<"ERROR: Vector sizes do not match"<<"\n";
return;
}
for (int i = 0; i < in_vect1.size(); i++) {
out_vect[i] = in_vect1[i].complex_mul(in_vect2[i]);
}
}
// Compare the results of the two output vectors from parallel and scalar. They
// should be equal
int Compare(std::vector<Complex2> &v1, std::vector<Complex2> &v2) {
int ret_code = 1;
if(v1.size() != v2.size()){
ret_code = -1;
}
for (int i = 0; i < v1.size(); i++) {
if (v1[i] != v2[i]) {
ret_code = -1;
break;
}
}
return ret_code;
}
int main() {
// Declare your Input and Output vectors of the Complex2 class
vector<Complex2> input_vect1;
vector<Complex2> input_vect2;
vector<Complex2> out_vect_parallel;
vector<Complex2> out_vect_scalar;
for (int i = 0; i < num_elements; i++) {
input_vect1.push_back(Complex2(i + 2, i + 4));
input_vect2.push_back(Complex2(i + 4, i + 6));
out_vect_parallel.push_back(Complex2(0, 0));
out_vect_scalar.push_back(Complex2(0, 0));
}
// Initialize your Input and Output Vectors. Inputs are initialized as below.
// Outputs are initialized with 0
try {
// Pass in the name of the vendor for which the device you want to query
std::string vendor_name = "Intel";
// std::string vendor_name = "AMD";
// std::string vendor_name = "Nvidia";
CustomDeviceSelector selector(vendor_name);
queue q(selector);
// Call the SYCLParallel with the required inputs and outputs
SYCLParallel(q, input_vect1, input_vect2, out_vect_parallel);
} catch (...) {
// some other exception detected
std::cout << "Failure" << "\n";
std::terminate();
}
std::cout
<< "****************************************Multiplying Complex numbers "
"in Parallel********************************************************"
<< "\n";
// Print the outputs of the Parallel function
int indices[]{0, 1, 2, 3, 4, (num_elements - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "[" << j << "] " << input_vect1[j] << " * " << input_vect2[j]
<< " = " << out_vect_parallel[j] << "\n";
}
// Call the Scalar function with the required input and outputs
Scalar(input_vect1, input_vect2, out_vect_scalar);
// Compare the outputs from the parallel and the scalar functions. They should
// be equal
int ret_code = Compare(out_vect_parallel, out_vect_scalar);
if (ret_code == 1) {
std::cout << "Complex multiplication successfully run on the device"
<< "\n";
} else
std::cout
<< "*********************************************Verification Failed. Results are "
"not matched**************************"
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/gpu_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
//# Create a device queue with device selector
queue q(gpu_selector_v);
//queue q(cpu_selector_v);
//queue q(accelerator_selector_v);
//queue q(default_selector_v);
//queue q;
//# Print the device name
std::cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/vector_add.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 256;
//# Initialize a vector and print values
std::vector<int> vector1(N, 10);
std::cout<<"\nInput Vector1: ";
for (int i = 0; i < N; i++) {
std::cout << vector1[i] << " ";
}
//# STEP 1 : Create second vector, initialize to 20 and print values
std::vector<int> vector2(N, 20);
std::cout<<"\nInput Vector2: ";
for (int i = 0; i < N; i++) std::cout << vector2[i] << " ";
//# Create Buffer
buffer vector1_buffer(vector1);
//# STEP 2 : Create buffer for second vector
buffer vector2_buffer(vector2);
//# Submit task to add vector
queue q;
q.submit([&](handler &h) {
//# Create accessor for vector1_buffer
accessor vector1_accessor (vector1_buffer,h);
//# STEP 3 - add second accessor for second buffer
accessor vector2_accessor (vector2_buffer,h, read_only);
h.parallel_for(range<1>(N), [=](id<1> index) {
//# STEP 4 : Modify the code below to add the second vector to first one
vector1_accessor[index] += vector2_accessor[index];
});
});
//# Create a host accessor to copy data from device to host
host_accessor h_a(vector1_buffer,read_only);
//# Print Output values
std::cout<<"\nOutput Values: ";
for (int i = 0; i < N; i++) std::cout<< vector1[i] << " ";
std::cout<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/src/host_accessor_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
constexpr int N = 16;
auto R = range<1>(N);
std::vector<int> v(N, 10);
queue q;
// Buffer takes ownership of the data stored in vector.
buffer buf(v);
q.submit([&](handler& h) {
accessor a(buf,h);
h.parallel_for(R, [=](auto i) { a[i] -= 2; });
});
// Creating host accessor is a blocking call and will only return after all
// enqueued SYCL kernels that modify the same buffer in any queue completes
// execution and the data is available to the host via this host accessor.
host_accessor b(buf,read_only);
for (int i = 0; i < N; i++) std::cout << b[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/buffer_destruction2.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 16;
using namespace sycl;
// Buffer creation happens within a separate function scope.
void SYCL_code(std::vector<int> &v, queue &q) {
auto R = range<1>(N);
buffer buf(v);
q.submit([&](handler &h) {
accessor a(buf,h);
h.parallel_for(R, [=](auto i) { a[i] -= 2; });
});
}
int main() {
std::vector<int> v(N, 10);
queue q;
SYCL_code(v, q);
// When execution advances beyond this function scope, buffer destructor is
// invoked which relinquishes the ownership of data and copies back the data to
// the host memory.
for (int i = 0; i < N; i++) std::cout << v[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/vector_add_usm_buffers.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
// kernel function to compute vector add using Unified Shared memory model (USM)
void kernel_usm(int* a, int* b, int* c, int N) {
//Step 1: create a device queue
queue q;
//Step 2: create USM device allocation
auto a_device = malloc_device<int>(N, q);
auto b_device = malloc_device<int>(N, q);
auto c_device = malloc_device<int>(N, q);
//Step 3: copy memory from host to device
q.memcpy(a_device, a, N*sizeof(int));
q.memcpy(b_device, b, N*sizeof(int));
q.wait();
//Step 4: send a kernel (lambda) for execution
q.parallel_for(N, [=](auto i){
//Step 5: write a kernel
c_device[i] = a_device[i] + b_device[i];
}).wait();
//Step 6: copy the result back to host
q.memcpy(c, c_device, N*sizeof(int)).wait();
//Step 7: free device allocation
free(a_device, q);
free(b_device, q);
free(c_device, q);
}
// kernel function to compute vector add using Buffer memory model
void kernel_buffers(int* a, int* b, int* c, int N) {
//Step 1: create a device queue
queue q;
//Step 2: create buffers
buffer buf_a(a, range<1>(N));
buffer buf_b(b, range<1>(N));
buffer buf_c(c, range<1>(N));
//Step 3: submit a command for (asynchronous) execution
q.submit([&](handler &h){
//Step 4: create buffer accessors to access buffer data on the device
accessor A(buf_a, h, read_only);
accessor B(buf_b, h, read_only);
accessor C(buf_c, h, write_only);
//Step 5: send a kernel (lambda) for execution
h.parallel_for(N, [=](auto i){
//Step 6: write a kernel
C[i] = A[i] + B[i];
});
});
}
int main() {
// initialize data arrays on host
constexpr int N = 256;
int a[N], b[N], c[N];
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 2;
}
// initialize c = 0 and offload computation using USM, print output
for (int i=0; i<N; i++) c[i] = 0;
kernel_usm(a, b, c, N);
std::cout << "Vector Add Output (USM): \n";
for (int i=0; i<N; i++)std::cout << c[i] << " ";std::cout << "\n";
// initialize c = 0 and offload computation using USM, print output
for (int i=0; i<N; i++) c[i] = 0;
std::cout << "Vector Add Output (Buffers): \n";
kernel_buffers(a, b, c, N);
for (int i=0; i<N; i++)std::cout << c[i] << " ";std::cout << "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/custom_device_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
using namespace sycl;
class my_device_selector {
public:
my_device_selector(std::string vendorName) : vendorName_(vendorName){};
int operator()(const device& dev) const {
int rating = 0;
//We are querying for the custom device specific to a Vendor and if it is a GPU device we
//are giving the highest rating as 3 . The second preference is given to any GPU device and the third preference is given to
//CPU device.
if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) != std::string::npos))
rating = 3;
else if (dev.is_gpu()) rating = 2;
else if (dev.is_cpu()) rating = 1;
return rating;
};
private:
std::string vendorName_;
};
int main() {
//pass in the name of the vendor for which the device you want to query
std::string vendor_name = "Intel";
//std::string vendor_name = "AMD";
//std::string vendor_name = "Nvidia";
my_device_selector selector(vendor_name);
queue q(selector);
std::cout << "Device: "
<< q.get_device().get_info<info::device::name>() << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/buffer_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int num=16;
using namespace sycl;
int main() {
auto R = range<1>{ num };
//Create Buffers A and B
buffer<int> A{ R }, B{ R };
//Create a device queue
queue Q;
//Submit Kernel 1
Q.submit([&](handler& h) {
//Accessor for buffer A
accessor out(A,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit Kernel 2
Q.submit([&](handler& h) {
//This task will wait till the first queue is complete. Default access mode is read_write
accessor out(A,h);
h.parallel_for(R, [=](auto idx) {
out[idx] += idx[0]; }); });
//Submit Kernel 3
Q.submit([&](handler& h) {
//Accessor for Buffer B
accessor out(B,h,write_only);
h.parallel_for(R, [=](auto idx) {
out[idx] = idx[0]; }); });
//Submit task 4
Q.submit([&](handler& h) {
//This task will wait till kernel 2 and 3 are complete
accessor in (A,h,read_only);
accessor inout(B,h);
h.parallel_for(R, [=](auto idx) {
inout[idx] *= in[idx]; }); });
// And the following is back to device code
host_accessor result(B,read_only);
for (int i=0; i<num; ++i)
std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/complex_mult.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
#include <vector>
#include "Complex.hpp"
using namespace sycl;
using namespace std;
// Number of complex numbers passing to the SYCL code
static const int num_elements = 10000;
class CustomDeviceSelector {
public:
CustomDeviceSelector(std::string vendorName) : vendorName_(vendorName){};
int operator()(const device &dev) {
int device_rating = 0;
//We are querying for the custom device specific to a Vendor and if it is a GPU device we
//are giving the highest rating as 3 . The second preference is given to any GPU device and the third preference is given to
//CPU device.
if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) !=
std::string::npos))
device_rating = 3;
else if (dev.is_gpu())
device_rating = 2;
else if (dev.is_cpu())
device_rating = 1;
return device_rating;
};
private:
std::string vendorName_;
};
// in_vect1 and in_vect2 are the vectors with num_elements complex nubers and
// are inputs to the parallel function
void SYCLParallel(queue &q, std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
auto R = range(in_vect1.size());
if (in_vect2.size() != in_vect1.size() || out_vect.size() != in_vect1.size()){
std::cout << "ERROR: Vector sizes do not match"<< "\n";
return;
}
// Setup input buffers
buffer bufin_vect1(in_vect1);
buffer bufin_vect2(in_vect2);
// Setup Output buffers
buffer bufout_vect(out_vect);
std::cout << "Target Device: "
<< q.get_device().get_info<info::device::name>() << "\n";
// Submit Command group function object to the queue
q.submit([&](auto &h) {
// Accessors set as read mode
accessor V1(bufin_vect1,h,read_only);
accessor V2(bufin_vect2,h,read_only);
// Accessor set to Write mode
accessor V3 (bufout_vect,h,write_only);
h.parallel_for(R, [=](auto i) {
V3[i] = V1[i].complex_mul(V2[i]);
});
});
q.wait_and_throw();
}
void Scalar(std::vector<Complex2> &in_vect1,
std::vector<Complex2> &in_vect2,
std::vector<Complex2> &out_vect) {
if ((in_vect2.size() != in_vect1.size()) || (out_vect.size() != in_vect1.size())){
std::cout<<"ERROR: Vector sizes do not match"<<"\n";
return;
}
for (int i = 0; i < in_vect1.size(); i++) {
out_vect[i] = in_vect1[i].complex_mul(in_vect2[i]);
}
}
// Compare the results of the two output vectors from parallel and scalar. They
// should be equal
int Compare(std::vector<Complex2> &v1, std::vector<Complex2> &v2) {
int ret_code = 1;
if(v1.size() != v2.size()){
ret_code = -1;
}
for (int i = 0; i < v1.size(); i++) {
if (v1[i] != v2[i]) {
ret_code = -1;
break;
}
}
return ret_code;
}
int main() {
// Declare your Input and Output vectors of the Complex2 class
vector<Complex2> input_vect1;
vector<Complex2> input_vect2;
vector<Complex2> out_vect_parallel;
vector<Complex2> out_vect_scalar;
for (int i = 0; i < num_elements; i++) {
input_vect1.push_back(Complex2(i + 2, i + 4));
input_vect2.push_back(Complex2(i + 4, i + 6));
out_vect_parallel.push_back(Complex2(0, 0));
out_vect_scalar.push_back(Complex2(0, 0));
}
// Initialize your Input and Output Vectors. Inputs are initialized as below.
// Outputs are initialized with 0
try {
// Pass in the name of the vendor for which the device you want to query
std::string vendor_name = "Intel";
// std::string vendor_name = "AMD";
// std::string vendor_name = "Nvidia";
CustomDeviceSelector selector(vendor_name);
queue q(selector);
// Call the SYCLParallel with the required inputs and outputs
SYCLParallel(q, input_vect1, input_vect2, out_vect_parallel);
} catch (...) {
// some other exception detected
std::cout << "Failure" << "\n";
std::terminate();
}
std::cout
<< "****************************************Multiplying Complex numbers "
"in Parallel********************************************************"
<< "\n";
// Print the outputs of the Parallel function
int indices[]{0, 1, 2, 3, 4, (num_elements - 1)};
constexpr size_t indices_size = sizeof(indices) / sizeof(int);
for (int i = 0; i < indices_size; i++) {
int j = indices[i];
if (i == indices_size - 1) std::cout << "...\n";
std::cout << "[" << j << "] " << input_vect1[j] << " * " << input_vect2[j]
<< " = " << out_vect_parallel[j] << "\n";
}
// Call the Scalar function with the required input and outputs
Scalar(input_vect1, input_vect2, out_vect_scalar);
// Compare the outputs from the parallel and the scalar functions. They should
// be equal
int ret_code = Compare(out_vect_parallel, out_vect_scalar);
if (ret_code == 1) {
std::cout << "Complex multiplication successfully run on the device"
<< "\n";
} else
std::cout
<< "*********************************************Verification Failed. Results are "
"not matched**************************"
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/gpu_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
//# Create a device queue with device selector
queue q(gpu_selector_v);
//queue q(cpu_selector_v);
//queue q(accelerator_selector_v);
//queue q(default_selector_v);
//queue q;
//# Print the device name
std::cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/vector_add.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
const int N = 256;
//# Initialize a vector and print values
std::vector<int> vector1(N, 10);
std::cout<<"\nInput Vector1: ";
for (int i = 0; i < N; i++) std::cout << vector1[i] << " ";
//# STEP 1 : Create second vector, initialize to 20 and print values
//# YOUR CODE GOES HERE
//# Create Buffer
buffer vector1_buffer(vector1);
//# STEP 2 : Create buffer for second vector
//# YOUR CODE GOES HERE
//# Submit task to add vector
queue q;
q.submit([&](handler &h) {
//# Create accessor for vector1_buffer
accessor vector1_accessor (vector1_buffer,h);
//# STEP 3 - add second accessor for second buffer
//# YOUR CODE GOES HERE
h.parallel_for(range<1>(N), [=](id<1> index) {
//# STEP 4 : Modify the code below to add the second vector to first one
vector1_accessor[index] += 1;
});
});
//# Create a host accessor to copy data from device to host
host_accessor h_a(vector1_buffer,read_only);
//# Print Output values
std::cout<<"\nOutput Values: ";
for (int i = 0; i < N; i++) std::cout<< vector1[i] << " ";
std::cout<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/02_SYCL_Program_Structure/lab/host_accessor_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
constexpr int N = 16;
auto R = range<1>(N);
std::vector<int> v(N, 10);
queue q;
// Buffer takes ownership of the data stored in vector.
buffer buf(v);
q.submit([&](handler& h) {
accessor a(buf,h);
h.parallel_for(R, [=](auto i) { a[i] -= 2; });
});
// Creating host accessor is a blocking call and will only return after all
// enqueued SYCL kernels that modify the same buffer in any queue completes
// execution and the data is available to the host via this host accessor.
host_accessor b(buf,read_only);
for (int i = 0; i < N; i++) std::cout << b[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/reduction_custom_operator.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <time.h>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
template <typename T, typename I>
struct pair {
bool operator<(const pair& o) const {
return val <= o.val || (val == o.val && idx <= o.idx);
}
T val;
I idx;
};
int main() {
//# setup queue with default selector
queue q;
//# initialize input data and result using usm
auto result = malloc_shared<pair<int, int>>(1, q);
auto data = malloc_shared<int>(N, q);
//# initialize input data with random numbers
srand(time(0));
for (int i = 0; i < N; ++i) data[i] = rand() % 256;
std::cout << "Input Data:\n";
for (int i = 0; i < N; i++) std::cout << data[i] << " "; std::cout << "\n\n";
//# custom operator for reduction to find minumum and index
pair<int, int> operator_identity = {std::numeric_limits<int>::max(), std::numeric_limits<int>::min()};
*result = operator_identity;
auto reduction_object = reduction(result, operator_identity, minimum<pair<int, int>>());
//# parallel_for with user defined reduction object
q.parallel_for(nd_range<1>{N, B}, reduction_object, [=](nd_item<1> item, auto& temp) {
int i = item.get_global_id(0);
temp.combine({data[i], i});
}).wait();
std::cout << "Minimum value and index = " << result->val << " at " << result->idx << "\n";
free(result, q);
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_single_task.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
//# setup queue with in_order property
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# user single_task to add all numbers
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i++){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_reduction_usm.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# implicit USM for writing sum value
int* sum = malloc_shared<int>(1, q);
*sum = 0;
//# nd-range kernel parallel_for with reduction parameter
q.parallel_for(nd_range<1>{N, B}, reduction(sum, plus<>()), [=](nd_item<1> it, auto& temp) {
auto i = it.get_global_id(0);
temp.combine(data[i]);
}).wait();
std::cout << "Sum = " << *sum << "\n";
free(data, q);
free(sum, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_work_group.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with in_order property
queue q(property::queue::in_order{});
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for to calculate sum for each work_group
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item){
size_t index = item.get_global_id(0);
if(item.get_local_id(0) == 0 ){
int sum_wg = 0;
for(int i=index; i<index+B; i++){
sum_wg += data[i];
}
data[index] = sum_wg;
}
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=B){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
| cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.