6 Commits

Author SHA1 Message Date
Wiebe van Breukelen
5f00c5d304 Add README.md (#38)
* Add README.md

* Improve README description of TrigDx library

* Apply suggestion from @mickveldhuis

Co-authored-by: Mick Veldhuis <mickveldhuis@hotmail.nl>

* Apply suggestion from @mickveldhuis

Co-authored-by: Mick Veldhuis <mickveldhuis@hotmail.nl>

* Apply suggestion from @mickveldhuis

Co-authored-by: Mick Veldhuis <mickveldhuis@hotmail.nl>

---------

Co-authored-by: Mick Veldhuis <mickveldhuis@hotmail.nl>
2025-10-22 16:55:37 +02:00
Wiebe van Breukelen
f85e67e669 Fix compiler warnings (#37) 2025-10-22 16:48:26 +02:00
mmancini-skao
76998a137a Fix error in Taylor expansion (#36)
Replaced term3 with term4 in calculations for t4.
2025-10-20 17:09:35 +02:00
Bram Veenboer
500d35070e Fix formatting (#35)
* Run pre-commit

* Skip line-length check in cmake-lint
2025-10-10 09:19:18 +02:00
Dantali0n
bfe752433f Fixes #30, Add CMake steps to install python bindings (#31) 2025-09-17 20:03:28 +02:00
Bram Veenboer
8fe8314905 Update GPU backend (#29)
* Update GPU memory management
* Add allocate_memory and free_memory
2025-09-03 09:16:28 +02:00
19 changed files with 186 additions and 151 deletions

View File

@@ -7,4 +7,5 @@ repos:
rev: v0.6.13
hooks:
- id: cmake-format
- id: cmake-lint
- id: cmake-lint
args: [--disabled-codes=C0301]

View File

@@ -12,6 +12,11 @@ option(TRIGDX_BUILD_TESTS "Build tests" ON)
option(TRIGDX_BUILD_BENCHMARKS "Build tests" ON)
option(TRIGDX_BUILD_PYTHON "Build Python interface" ON)
# Add compiler flags
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Wall -Wnon-virtual-dtor -Wduplicated-branches -Wvla -Wpointer-arith -Wextra -Wno-unused-parameter"
)
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/trigdx_config.hpp.in

54
README.md Normal file
View File

@@ -0,0 +1,54 @@
# TrigDx
Highperformance C++ library offering multiple implementations of transcendental trigonometric functions (e.g., sin, cos, tan and their variants), designed for numerical, signalprocessing, and realtime systems where trading a small loss of accuracy for significantly higher throughput on modern CPUs (scalar and SIMD) and NVIDIA GPUs is acceptable.
## Why TrigDx?
Many applications use the standard library implementations, which prioritise correctness but are not always optimal for throughput on vectorized or GPU hardware. TrigDx gives you multiple implementations so you can:
- Replace `std::sin` / `std::cos` calls with faster approximations when a small, bounded reduction in accuracy is acceptable.
- Use SIMD/vectorized implementations and compact lookup tables for high throughput lookups.
- Run massively parallel kernels that take advantage of a GPU's _Special Function Units_ (SFUs).
## Requirements
- A C++ compiler with at least C++17 support (GCC, Clang)
- CMake 3.15+
- Optional: NVIDIA CUDA Toolkit 11+ to build GPU kernels
- Optional: GoogleTest (for unit tests) and GoogleBenchmark (for microbenchmarks)
## Building
```bash
git clone https://github.com/astron-rd/TrigDx.git
cd TrigDx
mkdir build && cd build
# CPU-only:
cmake -DCMAKE_BUILD_TYPE=Release -DTRIGDX_USE_XSIMD=ON ..
cmake --build . -j
# Enable CUDA (if available):
cmake -DCMAKE_BUILD_TYPE=Release -DTRIGDX_USE_GPU=ON ..
cmake --build . -j
# Run tests:
ctest --output-on-failure -j
```
Common CMake options:
- `TRIGDX_USE_GPU=ON/OFF` — build GPU support.
- `TRIGDX_BUILD_TESTS=ON/OFF` — build tests.
- `TRIGDX_BUILD_BENCHMARKS=ON/OFF` — build benchmarks.
- `TRIGDX_BUILD_PYTHON` — build Python interface.
## Contributing
- Fork → create a feature branch → open a PR.
- Include unit tests for correctnesssensitive changes and benchmark results for performance changes.
- Follow project style (clangformat) and run tests locally before submitting.
## Reporting issues
When opening an issue for incorrect results or performance regressions, please include:
- Platform and CPU/GPU model.
- Compiler and version with exact compile flags.
- Small reproducer (input data and the TrigDx implementation used).
## License
See the LICENSE file in the repository for licensing details.

View File

@@ -2,13 +2,14 @@
#include <chrono>
#include <cmath>
#include <stdexcept>
#include <string>
#include <vector>
#include <benchmark/benchmark.h>
void init_x(std::vector<float> &x) {
for (size_t i = 0; i < x.size(); ++i) {
void init_x(float *x, size_t n) {
for (size_t i = 0; i < n; ++i) {
x[i] = (i % 360) * 0.0174533f; // degrees to radians
}
}
@@ -16,24 +17,31 @@ void init_x(std::vector<float> &x) {
template <typename Backend>
static void benchmark_sinf(benchmark::State &state) {
const size_t N = static_cast<size_t>(state.range(0));
std::vector<float> x(N), s(N);
init_x(x);
Backend backend;
auto start = std::chrono::high_resolution_clock::now();
backend.init(N);
float *x =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
float *s =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
auto end = std::chrono::high_resolution_clock::now();
state.counters["init_ms"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count() /
1.e3;
init_x(x, N);
for (auto _ : state) {
backend.compute_sinf(N, x.data(), s.data());
backend.compute_sinf(N, x, s);
benchmark::DoNotOptimize(s);
}
backend.free_memory(x);
backend.free_memory(s);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(N));
}
@@ -41,24 +49,35 @@ static void benchmark_sinf(benchmark::State &state) {
template <typename Backend>
static void benchmark_cosf(benchmark::State &state) {
const size_t N = static_cast<size_t>(state.range(0));
std::vector<float> x(N), c(N);
init_x(x);
Backend backend;
auto start = std::chrono::high_resolution_clock::now();
backend.init(N);
float *x =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
float *c =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
if (!x || !c) {
throw std::runtime_error("Buffer allocation failed");
}
auto end = std::chrono::high_resolution_clock::now();
state.counters["init_ms"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count() /
1.e3;
init_x(x, N);
for (auto _ : state) {
backend.compute_cosf(N, x.data(), c.data());
backend.compute_cosf(N, x, c);
benchmark::DoNotOptimize(c);
}
backend.free_memory(x);
backend.free_memory(c);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(N));
}
@@ -66,25 +85,38 @@ static void benchmark_cosf(benchmark::State &state) {
template <typename Backend>
static void benchmark_sincosf(benchmark::State &state) {
const size_t N = static_cast<size_t>(state.range(0));
std::vector<float> x(N), s(N), c(N);
init_x(x);
Backend backend;
auto start = std::chrono::high_resolution_clock::now();
backend.init(N);
float *x =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
float *s =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
float *c =
reinterpret_cast<float *>(backend.allocate_memory(N * sizeof(float)));
if (!x || !s || !c) {
throw std::runtime_error("Buffer allocation failed");
}
auto end = std::chrono::high_resolution_clock::now();
state.counters["init_ms"] =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count() /
1.e3;
init_x(x, N);
for (auto _ : state) {
backend.compute_sincosf(N, x.data(), s.data(), c.data());
backend.compute_sincosf(N, x, s, c);
benchmark::DoNotOptimize(s);
benchmark::DoNotOptimize(c);
}
backend.free_memory(x);
backend.free_memory(s);
backend.free_memory(c);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(N));
}

View File

@@ -11,12 +11,12 @@ public:
GPUBackend();
~GPUBackend() override;
void init(size_t n = 0) override;
void *allocate_memory(size_t bytes) const override;
void free_memory(void *ptr) const override;
void compute_sinf(size_t n, const float *x, float *s) const override;
void compute_cosf(size_t n, const float *x, float *c) const override;
void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
private:
struct Impl;

View File

@@ -1,6 +1,8 @@
#pragma once
#include <cstddef>
#include <cstdint>
#include <cstdlib>
// Base interface for all math backends
class Backend {
@@ -10,6 +12,12 @@ public:
// Optional initialization
virtual void init(size_t n = 0) {}
virtual void *allocate_memory(size_t bytes) const {
return static_cast<void *>(new uint8_t[bytes]);
};
virtual void free_memory(void *ptr) const { std::free(ptr); };
// Compute sine for n elements
virtual void compute_sinf(size_t n, const float *x, float *s) const = 0;
@@ -19,7 +27,4 @@ public:
// Compute sine and cosine for n elements
virtual void compute_sincosf(size_t n, const float *x, float *s,
float *c) const = 0;
// Compute exponent for n elements
virtual void compute_expf(size_t n, const float *x, float *e) const = 0;
};

View File

@@ -10,6 +10,4 @@ public:
void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
};

View File

@@ -10,6 +10,4 @@ public:
void compute_sincosf(size_t n, const float *x, float *s,
float *c) const override;
void compute_expf(size_t n, const float *x, float *e) const override;
};

View File

@@ -8,5 +8,16 @@ if(NOT pybind11_FOUND)
FetchContent_MakeAvailable(pybind11)
endif()
# Needed to set ${Python_VERSION_MAJOR} and ${Python_VERSION_MINOR}
find_package(Python REQUIRED)
pybind11_add_module(pytrigdx bindings.cpp)
target_link_libraries(pytrigdx PRIVATE trigdx)
set_target_properties(pytrigdx PROPERTIES OUTPUT_NAME "trigdx")
set(PYTHON_SITE_PACKAGES
"${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages/trigdx"
)
install(TARGETS pytrigdx DESTINATION ${PYTHON_SITE_PACKAGES})
install(FILES __init__.py DESTINATION ${PYTHON_SITE_PACKAGES})

16
python/__init__.py Normal file
View File

@@ -0,0 +1,16 @@
from .trigdx import Reference, Lookup16K, Lookup32K, LookupAVX16K, LookupAVX32K
try:
from .trigdx import MKL
except ImportError:
pass
try:
from .trigdx import GPU
except ImportError:
pass
try:
from .trigdx import LookupXSIMD16K, LookupXSIMD32K
except ImportError:
pass

View File

@@ -72,7 +72,9 @@ void bind_backend(py::module &m, const char *name) {
.def("compute_sincosf", &compute_sincos<float>);
}
PYBIND11_MODULE(pytrigdx, m) {
PYBIND11_MODULE(trigdx, m) {
m.doc() = "TrigDx python bindings";
py::class_<Backend, std::shared_ptr<Backend>>(m, "Backend")
.def("init", &Backend::init);
@@ -91,4 +93,4 @@ PYBIND11_MODULE(pytrigdx, m) {
bind_backend<LookupXSIMDBackend<16384>>(m, "LookupXSIMD16K");
bind_backend<LookupXSIMDBackend<32768>>(m, "LookupXSIMD32K");
#endif
}
}

View File

@@ -10,98 +10,63 @@
struct GPUBackend::Impl {
~Impl() {
if (h_x) {
cudaFreeHost(h_x);
}
if (h_s) {
cudaFreeHost(h_s);
}
if (h_c) {
cudaFreeHost(h_c);
}
if (h_e) {
cudaFreeHost(h_e);
}
if (d_x) {
cudaFree(d_x);
}
if (d_s) {
cudaFree(d_s);
}
if (d_c) {
cudaFree(d_c);
}
if (d_e) {
cudaFree(d_e);
}
void *allocate_memory(size_t bytes) const {
void *ptr;
cudaMallocHost(&ptr, bytes);
return ptr;
}
void init(size_t n) {
const size_t bytes = n * sizeof(float);
cudaMallocHost(&h_x, bytes);
cudaMallocHost(&h_s, bytes);
cudaMallocHost(&h_c, bytes);
cudaMallocHost(&h_e, bytes);
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_s, bytes);
cudaMalloc(&d_c, bytes);
cudaMalloc(&d_e, bytes);
}
void free_memory(void *ptr) const { cudaFreeHost(ptr); }
void compute_sinf(size_t n, const float *x, float *s) const {
const size_t bytes = n * sizeof(float);
std::memcpy(h_x, x, bytes);
cudaMemcpy(d_x, h_x, bytes, cudaMemcpyHostToDevice);
float *d_x, *d_s;
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_s, bytes);
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
launch_sinf_kernel(d_x, d_s, n);
cudaMemcpy(h_s, d_s, bytes, cudaMemcpyDeviceToHost);
std::memcpy(s, h_s, bytes);
cudaMemcpy(s, d_s, bytes, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_s);
}
void compute_cosf(size_t n, const float *x, float *c) const {
const size_t bytes = n * sizeof(float);
std::memcpy(h_x, x, bytes);
cudaMemcpy(d_x, h_x, bytes, cudaMemcpyHostToDevice);
float *d_x, *d_c;
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
launch_cosf_kernel(d_x, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
std::memcpy(c, h_c, bytes);
cudaMemcpy(c, d_c, bytes, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_c);
}
void compute_sincosf(size_t n, const float *x, float *s, float *c) const {
const size_t bytes = n * sizeof(float);
std::memcpy(h_x, x, bytes);
cudaMemcpy(d_x, h_x, bytes, cudaMemcpyHostToDevice);
float *d_x, *d_s, *d_c;
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_s, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
launch_sincosf_kernel(d_x, d_s, d_c, n);
cudaMemcpy(h_s, d_s, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
std::memcpy(s, h_s, bytes);
std::memcpy(c, h_c, bytes);
cudaMemcpy(s, d_s, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, bytes, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_s);
cudaFree(d_c);
}
void compute_expf(size_t n, const float *x, float *e) const {
const size_t bytes = n * sizeof(float);
std::memcpy(h_x, x, bytes);
cudaMemcpy(d_x, h_x, bytes, cudaMemcpyHostToDevice);
launch_expf_kernel(d_x, d_e, n);
cudaMemcpy(h_e, d_e, bytes, cudaMemcpyDeviceToHost);
std::memcpy(e, h_e, bytes);
}
float *h_x = nullptr;
float *h_s = nullptr;
float *h_c = nullptr;
float *h_e = nullptr;
float *d_x = nullptr;
float *d_s = nullptr;
float *d_c = nullptr;
float *d_e = nullptr;
};
GPUBackend::GPUBackend() : impl(std::make_unique<Impl>()) {}
GPUBackend::~GPUBackend() = default;
void GPUBackend::init(size_t n) { impl->init(n); }
void *GPUBackend::allocate_memory(size_t bytes) const {
return impl->allocate_memory(bytes);
}
void GPUBackend::free_memory(void *ptr) const { impl->free_memory(ptr); }
void GPUBackend::compute_sinf(size_t n, const float *x, float *s) const {
impl->compute_sinf(n, x, s);
@@ -114,8 +79,4 @@ void GPUBackend::compute_cosf(size_t n, const float *x, float *c) const {
void GPUBackend::compute_sincosf(size_t n, const float *x, float *s,
float *c) const {
impl->compute_sincosf(n, x, s, c);
}
void GPUBackend::compute_expf(size_t n, const float *x, float *e) const {
impl->compute_expf(n, x, e);
}
}

View File

@@ -31,15 +31,6 @@ __global__ void kernel_sincosf(const float *__restrict__ x,
}
}
__global__ void kernel_expf(const float *__restrict__ x, float *__restrict__ e,
size_t n) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
// e[idx] = __expf(x[idx]);
e[idx] = expf(x[idx]);
}
}
namespace {
inline dim3 make_grid(size_t n, size_t threadsPerBlock = 256) {
return dim3((n + threadsPerBlock - 1) / threadsPerBlock);
@@ -63,9 +54,3 @@ void launch_sincosf_kernel(const float *d_x, float *d_s, float *d_c, size_t n) {
dim3 grid = make_grid(n, blocks.x);
kernel_sincosf<<<grid, blocks>>>(d_x, d_s, d_c, n);
}
void launch_expf_kernel(const float *d_x, float *d_e, size_t n) {
dim3 blocks(256);
dim3 grid = make_grid(n, blocks.x);
kernel_expf<<<grid, blocks>>>(d_x, d_e, n);
}

View File

@@ -6,4 +6,3 @@ void launch_sinf_kernel(const float *d_x, float *d_s, size_t n);
void launch_cosf_kernel(const float *d_x, float *d_c, size_t n);
void launch_sincosf_kernel(const float *d_x, float *d_s, float *d_c,
std::size_t n);
void launch_expf_kernel(const float *d_x, float *d_e, size_t n);

View File

@@ -20,8 +20,8 @@ template <std::size_t NR_SAMPLES> struct lookup_table {
cos_values[i] = cosf(i * PI_FRAC);
}
}
std::array<float, NR_SAMPLES> cos_values;
std::array<float, NR_SAMPLES> sin_values;
std::array<float, NR_SAMPLES> cos_values;
};
template <std::size_t NR_SAMPLES> struct cosf_dispatcher {
@@ -33,7 +33,6 @@ template <std::size_t NR_SAMPLES> struct cosf_dispatcher {
constexpr uint_fast32_t VL = b_type::size;
const uint_fast32_t VS = n - n % VL;
const uint_fast32_t Q_PI = NR_SAMPLES / 4U;
const b_type scale = b_type::broadcast(lookup_table_.SCALE);
const b_type pi_frac = b_type::broadcast(lookup_table_.PI_FRAC);
const m_type mask = m_type::broadcast(lookup_table_.MASK);
@@ -42,7 +41,7 @@ template <std::size_t NR_SAMPLES> struct cosf_dispatcher {
const b_type term2 = b_type::broadcast(lookup_table_.TERM2); // 1/2!
const b_type term3 = b_type::broadcast(lookup_table_.TERM3); // 1/3!
const b_type term4 = b_type::broadcast(lookup_table_.TERM4); // 1/4!
const m_type quarter_pi = m_type::broadcast(Q_PI);
uint_fast32_t i;
for (i = 0; i < VS; i += VL) {
const b_type vx = b_type::load(a + i, Tag());
@@ -60,7 +59,7 @@ template <std::size_t NR_SAMPLES> struct cosf_dispatcher {
const b_type dx4 = xsimd::mul(dx2, dx);
const b_type t2 = xsimd::mul(dx2, term2);
const b_type t3 = xsimd::mul(dx3, term3);
const b_type t4 = xsimd::mul(dx4, term3);
const b_type t4 = xsimd::mul(dx4, term4);
const b_type cosdx = xsimd::add(xsimd::sub(term1, t2), t4);
@@ -98,7 +97,6 @@ template <std::size_t NR_SAMPLES> struct sinf_dispatcher {
constexpr uint_fast32_t VL = b_type::size;
const uint_fast32_t VS = n - n % VL;
const uint_fast32_t Q_PI = NR_SAMPLES / 4U;
const b_type scale = b_type::broadcast(lookup_table_.SCALE);
const b_type pi_frac = b_type::broadcast(lookup_table_.PI_FRAC);
const m_type mask = m_type::broadcast(lookup_table_.MASK);
@@ -107,7 +105,7 @@ template <std::size_t NR_SAMPLES> struct sinf_dispatcher {
const b_type term2 = b_type::broadcast(lookup_table_.TERM2); // 1/2!
const b_type term3 = b_type::broadcast(lookup_table_.TERM3); // 1/3!
const b_type term4 = b_type::broadcast(lookup_table_.TERM4); // 1/4!
const m_type quarter_pi = m_type::broadcast(Q_PI);
uint_fast32_t i;
for (i = 0; i < VS; i += VL) {
const b_type vx = b_type::load(a + i, Tag());
@@ -120,7 +118,7 @@ template <std::size_t NR_SAMPLES> struct sinf_dispatcher {
const b_type dx4 = xsimd::mul(dx2, dx);
const b_type t2 = xsimd::mul(dx2, term2);
const b_type t3 = xsimd::mul(dx3, term3);
const b_type t4 = xsimd::mul(dx4, term3);
const b_type t4 = xsimd::mul(dx4, term4);
const b_type cosdx = xsimd::add(xsimd::sub(term1, t2), t4);
const b_type sindx = xsimd::sub(dx, t3);
@@ -160,7 +158,6 @@ template <std::size_t NR_SAMPLES> struct sin_cosf_dispatcher {
constexpr uint_fast32_t VL = b_type::size;
const uint_fast32_t VS = n - n % VL;
const uint_fast32_t Q_PI = NR_SAMPLES / 4U;
const b_type scale = b_type::broadcast(lookup_table_.SCALE);
const m_type mask = m_type::broadcast(lookup_table_.MASK);
const b_type pi_frac = b_type::broadcast(lookup_table_.PI_FRAC);
@@ -170,7 +167,6 @@ template <std::size_t NR_SAMPLES> struct sin_cosf_dispatcher {
const b_type term3 = b_type::broadcast(lookup_table_.TERM3); // 1/3!
const b_type term4 = b_type::broadcast(lookup_table_.TERM4); // 1/4!
const m_type quarter_pi = m_type::broadcast(Q_PI);
uint_fast32_t i;
for (i = 0; i < VS; i += VL) {
const b_type vx = b_type::load(a + i, Tag());
@@ -183,7 +179,7 @@ template <std::size_t NR_SAMPLES> struct sin_cosf_dispatcher {
const b_type dx4 = xsimd::mul(dx2, dx);
const b_type t2 = xsimd::mul(dx2, term2);
const b_type t3 = xsimd::mul(dx3, term3);
const b_type t4 = xsimd::mul(dx4, term3);
const b_type t4 = xsimd::mul(dx4, term4);
idx = xsimd::bitwise_and(idx, mask);
b_type sinv = b_type::gather(lookup_table_.sin_values.data(), idx);

View File

@@ -14,7 +14,3 @@ void MKLBackend::compute_sincosf(size_t n, const float *x, float *s,
float *c) const {
vmsSinCos(static_cast<MKL_INT>(n), x, s, c, VML_HA);
}
void MKLBackend::compute_expf(size_t n, const float *x, float *e) const {
vmsExp(static_cast<MKL_INT>(n), x, e, VML_HA);
}

View File

@@ -21,9 +21,3 @@ void ReferenceBackend::compute_sincosf(size_t n, const float *x, float *s,
c[i] = cosf(x[i]);
}
}
void ReferenceBackend::compute_expf(size_t n, const float *x, float *e) const {
for (size_t i = 0; i < n; ++i) {
e[i] = expf(x[i]);
}
}

View File

@@ -8,5 +8,3 @@ TEST_CASE("sinf") { test_sinf<MKLBackend>(1e-6f); }
TEST_CASE("cosf") { test_cosf<MKLBackend>(1e-6f); }
TEST_CASE("sincosf") { test_sincosf<MKLBackend>(1e-6f); }
TEST_CASE("expf") { test_expf<MKLBackend>(1e-6f); }

View File

@@ -63,19 +63,3 @@ template <typename Backend> inline void test_sincosf(float tol) {
REQUIRE_THAT(c[i], Catch::Matchers::WithinAbs(c_ref[i], tol));
}
}
template <typename Backend> inline void test_expf(float tol) {
std::vector<float> x(N), e_ref(N), e(N);
init_x(x);
ReferenceBackend ref;
Backend backend;
backend.init(N);
ref.compute_expf(N, x.data(), e_ref.data());
backend.compute_expf(N, x.data(), e.data());
for (size_t i = 0; i < N; ++i) {
REQUIRE_THAT(e[i], Catch::Matchers::WithinAbs(e_ref[i], tol));
}
}