3 Commits

Author SHA1 Message Date
Bram Veenboer
d7a7af5de9 TODO: first changes to add expf 2025-09-01 15:52:04 +02:00
Wiebe van Breukelen
9d3af8c202 Fixed broken pybind11 target check (#28)
* Fixed broken pybind11 target check

* Update python/CMakeLists.txt

Co-authored-by: Bram Veenboer <bram.veenboer@gmail.com>

---------

Co-authored-by: Bram Veenboer <bram.veenboer@gmail.com>
2025-08-27 17:07:30 +02:00
Bram Veenboer
0f7fd06be8 Extend CI (#27)
* Add build with Intel compiler
* Switch to upload/download-artifacts that retain permissions
2025-08-21 15:15:25 +02:00
13 changed files with 89 additions and 7 deletions

View File

@@ -13,6 +13,11 @@ jobs:
gres: gpu:A4000 gres: gpu:A4000
cmake_flags: "-DTRIGDX_USE_MKL=1 -DTRIGDX_USE_GPU=1 -DTRIGDX_USE_MKL=1 -DTRIGDX_USE_XSIMD=1 -DCMAKE_CUDA_ARCHITECTURES=86" cmake_flags: "-DTRIGDX_USE_MKL=1 -DTRIGDX_USE_GPU=1 -DTRIGDX_USE_MKL=1 -DTRIGDX_USE_XSIMD=1 -DCMAKE_CUDA_ARCHITECTURES=86"
environment_modules: "spack/20250403 intel-oneapi-mkl cuda python" environment_modules: "spack/20250403 intel-oneapi-mkl cuda python"
- name: Intel, NVIDIA A4000
partition: defq
gres: gpu:A4000
cmake_flags: "-DTRIGDX_USE_MKL=1 -DTRIGDX_USE_GPU=1 -DTRIGDX_USE_MKL=1 -DTRIGDX_USE_XSIMD=1 -DCMAKE_CUDA_ARCHITECTURES=86"
environment_modules: "spack/20250403 intel-oneapi-compilers intel-oneapi-mkl cuda python"
- name: NVIDIA GH200 - name: NVIDIA GH200
partition: ghq partition: ghq
gres: gpu:GH200 gres: gpu:GH200
@@ -38,7 +43,7 @@ jobs:
cmake -S . -B build ${CMAKE_FLAGS} cmake -S . -B build ${CMAKE_FLAGS}
make -C build -j make -C build -j
- name: Upload build - name: Upload build
uses: actions/upload-artifact@v4 uses: pyTooling/upload-artifact@v4
with: with:
name: build-${{ matrix.name }} name: build-${{ matrix.name }}
path: build path: build
@@ -53,7 +58,7 @@ jobs:
PARTITION_NAME: ${{ matrix.partition }} PARTITION_NAME: ${{ matrix.partition }}
steps: steps:
- *cleanup - *cleanup
- uses: actions/download-artifact@v4 - uses: pyTooling/download-artifact@v4
with: with:
name: build-${{ matrix.name }} name: build-${{ matrix.name }}
- uses: astron-rd/slurm-action@v1.2 - uses: astron-rd/slurm-action@v1.2
@@ -61,7 +66,7 @@ jobs:
partition: ${{ matrix.partition }} partition: ${{ matrix.partition }}
gres: ${{ matrix.gres }} gres: ${{ matrix.gres }}
commands: | commands: |
find tests -type f -executable -exec {} \; find build/tests -type f -executable -exec {} \;
benchmark: benchmark:
runs-on: [slurm] runs-on: [slurm]
@@ -73,7 +78,7 @@ jobs:
PARTITION_NAME: ${{ matrix.partition }} PARTITION_NAME: ${{ matrix.partition }}
steps: steps:
- *cleanup - *cleanup
- uses: actions/download-artifact@v4 - uses: pyTooling/download-artifact@v4
with: with:
name: build-${{ matrix.name }} name: build-${{ matrix.name }}
- uses: astron-rd/slurm-action@v1.2 - uses: astron-rd/slurm-action@v1.2
@@ -81,4 +86,4 @@ jobs:
partition: ${{ matrix.partition }} partition: ${{ matrix.partition }}
gres: ${{ matrix.gres }} gres: ${{ matrix.gres }}
commands: | commands: |
find benchmarks -type f -executable -exec {} \; find build/benchmarks -type f -executable -exec {} \;

View File

@@ -16,6 +16,7 @@ public:
void compute_cosf(size_t n, const float *x, float *c) const override; void compute_cosf(size_t n, const float *x, float *c) const override;
void compute_sincosf(size_t n, const float *x, float *s, void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override; float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
private: private:
struct Impl; struct Impl;

View File

@@ -19,4 +19,7 @@ public:
// Compute sine and cosine for n elements // Compute sine and cosine for n elements
virtual void compute_sincosf(size_t n, const float *x, float *s, virtual void compute_sincosf(size_t n, const float *x, float *s,
float *c) const = 0; float *c) const = 0;
// Compute exponent for n elements
virtual void compute_expf(size_t n, const float *x, float *e) const = 0;
}; };

View File

@@ -10,4 +10,6 @@ public:
void compute_sincosf(size_t n, const float *x, float *s, void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override; float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
}; };

View File

@@ -10,4 +10,6 @@ public:
void compute_sincosf(size_t n, const float *x, float *s, void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override; float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
}; };

View File

@@ -1,4 +1,6 @@
if(NOT TARGET pybind11) find_package(pybind11 CONFIG QUIET)
if(NOT pybind11_FOUND)
FetchContent_Declare( FetchContent_Declare(
pybind11 pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git GIT_REPOSITORY https://github.com/pybind/pybind11.git

View File

@@ -20,6 +20,9 @@ struct GPUBackend::Impl {
if (h_c) { if (h_c) {
cudaFreeHost(h_c); cudaFreeHost(h_c);
} }
if (h_e) {
cudaFreeHost(h_e);
}
if (d_x) { if (d_x) {
cudaFree(d_x); cudaFree(d_x);
} }
@@ -29,6 +32,9 @@ struct GPUBackend::Impl {
if (d_c) { if (d_c) {
cudaFree(d_c); cudaFree(d_c);
} }
if (d_e) {
cudaFree(d_e);
}
} }
void init(size_t n) { void init(size_t n) {
@@ -36,9 +42,11 @@ struct GPUBackend::Impl {
cudaMallocHost(&h_x, bytes); cudaMallocHost(&h_x, bytes);
cudaMallocHost(&h_s, bytes); cudaMallocHost(&h_s, bytes);
cudaMallocHost(&h_c, bytes); cudaMallocHost(&h_c, bytes);
cudaMallocHost(&h_e, bytes);
cudaMalloc(&d_x, bytes); cudaMalloc(&d_x, bytes);
cudaMalloc(&d_s, bytes); cudaMalloc(&d_s, bytes);
cudaMalloc(&d_c, bytes); cudaMalloc(&d_c, bytes);
cudaMalloc(&d_e, bytes);
} }
void compute_sinf(size_t n, const float *x, float *s) const { void compute_sinf(size_t n, const float *x, float *s) const {
@@ -70,12 +78,23 @@ struct GPUBackend::Impl {
std::memcpy(c, h_c, bytes); std::memcpy(c, h_c, bytes);
} }
void compute_expf(size_t n, const float *x, float *e) const {
const size_t bytes = n * sizeof(float);
std::memcpy(h_x, x, bytes);
cudaMemcpy(d_x, h_x, bytes, cudaMemcpyHostToDevice);
launch_expf_kernel(d_x, d_e, n);
cudaMemcpy(h_e, d_e, bytes, cudaMemcpyDeviceToHost);
std::memcpy(e, h_e, bytes);
}
float *h_x = nullptr; float *h_x = nullptr;
float *h_s = nullptr; float *h_s = nullptr;
float *h_c = nullptr; float *h_c = nullptr;
float *h_e = nullptr;
float *d_x = nullptr; float *d_x = nullptr;
float *d_s = nullptr; float *d_s = nullptr;
float *d_c = nullptr; float *d_c = nullptr;
float *d_e = nullptr;
}; };
GPUBackend::GPUBackend() : impl(std::make_unique<Impl>()) {} GPUBackend::GPUBackend() : impl(std::make_unique<Impl>()) {}
@@ -96,3 +115,7 @@ void GPUBackend::compute_sincosf(size_t n, const float *x, float *s,
float *c) const { float *c) const {
impl->compute_sincosf(n, x, s, c); impl->compute_sincosf(n, x, s, c);
} }
void GPUBackend::compute_expf(size_t n, const float *x, float *e) const {
impl->compute_expf(n, x, e);
}

View File

@@ -31,6 +31,15 @@ __global__ void kernel_sincosf(const float *__restrict__ x,
} }
} }
__global__ void kernel_expf(const float *__restrict__ x, float *__restrict__ e,
size_t n) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
// e[idx] = __expf(x[idx]);
e[idx] = expf(x[idx]);
}
}
namespace { namespace {
inline dim3 make_grid(size_t n, size_t threadsPerBlock = 256) { inline dim3 make_grid(size_t n, size_t threadsPerBlock = 256) {
return dim3((n + threadsPerBlock - 1) / threadsPerBlock); return dim3((n + threadsPerBlock - 1) / threadsPerBlock);
@@ -54,3 +63,9 @@ void launch_sincosf_kernel(const float *d_x, float *d_s, float *d_c, size_t n) {
dim3 grid = make_grid(n, blocks.x); dim3 grid = make_grid(n, blocks.x);
kernel_sincosf<<<grid, blocks>>>(d_x, d_s, d_c, n); kernel_sincosf<<<grid, blocks>>>(d_x, d_s, d_c, n);
} }
void launch_expf_kernel(const float *d_x, float *d_e, size_t n) {
dim3 blocks(256);
dim3 grid = make_grid(n, blocks.x);
kernel_expf<<<grid, blocks>>>(d_x, d_e, n);
}

View File

@@ -6,3 +6,4 @@ void launch_sinf_kernel(const float *d_x, float *d_s, size_t n);
void launch_cosf_kernel(const float *d_x, float *d_c, size_t n); void launch_cosf_kernel(const float *d_x, float *d_c, size_t n);
void launch_sincosf_kernel(const float *d_x, float *d_s, float *d_c, void launch_sincosf_kernel(const float *d_x, float *d_s, float *d_c,
std::size_t n); std::size_t n);
void launch_expf_kernel(const float *d_x, float *d_e, size_t n);

View File

@@ -14,3 +14,7 @@ void MKLBackend::compute_sincosf(size_t n, const float *x, float *s,
float *c) const { float *c) const {
vmsSinCos(static_cast<MKL_INT>(n), x, s, c, VML_HA); vmsSinCos(static_cast<MKL_INT>(n), x, s, c, VML_HA);
} }
void MKLBackend::compute_expf(size_t n, const float *x, float *e) const {
vmsExp(static_cast<MKL_INT>(n), x, e, VML_HA);
}

View File

@@ -21,3 +21,9 @@ void ReferenceBackend::compute_sincosf(size_t n, const float *x, float *s,
c[i] = cosf(x[i]); c[i] = cosf(x[i]);
} }
} }
void ReferenceBackend::compute_expf(size_t n, const float *x, float *e) const {
for (size_t i = 0; i < n; ++i) {
e[i] = expf(x[i]);
}
}

View File

@@ -8,3 +8,5 @@ TEST_CASE("sinf") { test_sinf<MKLBackend>(1e-6f); }
TEST_CASE("cosf") { test_cosf<MKLBackend>(1e-6f); } TEST_CASE("cosf") { test_cosf<MKLBackend>(1e-6f); }
TEST_CASE("sincosf") { test_sincosf<MKLBackend>(1e-6f); } TEST_CASE("sincosf") { test_sincosf<MKLBackend>(1e-6f); }
TEST_CASE("expf") { test_expf<MKLBackend>(1e-6f); }

View File

@@ -63,3 +63,19 @@ template <typename Backend> inline void test_sincosf(float tol) {
REQUIRE_THAT(c[i], Catch::Matchers::WithinAbs(c_ref[i], tol)); REQUIRE_THAT(c[i], Catch::Matchers::WithinAbs(c_ref[i], tol));
} }
} }
template <typename Backend> inline void test_expf(float tol) {
std::vector<float> x(N), e_ref(N), e(N);
init_x(x);
ReferenceBackend ref;
Backend backend;
backend.init(N);
ref.compute_expf(N, x.data(), e_ref.data());
backend.compute_expf(N, x.data(), e.data());
for (size_t i = 0; i < N; ++i) {
REQUIRE_THAT(e[i], Catch::Matchers::WithinAbs(e_ref[i], tol));
}
}