From af217660e927172beec9bc2a4e283a986aba7b94 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 21 Nov 2025 15:23:01 +0000 Subject: [PATCH 1/2] cpu: optimize ggml_vec_cvar_f32 with cascading SIMD for remaining elements Implement cascading SIMD instruction sets to process remaining elements efficiently in ggml_vec_cvar_f32, addressing TODO at lines 410-411. Changes: - AVX512 builds now cascade through AVX2 (8 elements) and SSE2 (4 elements) before falling back to scalar operations - AVX2 builds now cascade through SSE2 (4 elements) before scalar fallback - Reduces scalar iterations for non-aligned vector sizes - Follows the pattern used in ARM SVE implementations Performance impact: - Minimal impact on standard benchmarks (common dimensions are well-aligned) - Improves performance for non-standard vector sizes - Example: vector size 110 with AVX512 now uses 6 AVX512 + 1 AVX2 + 1 SSE2 + 2 scalar iterations instead of 6 AVX512 + 14 scalar iterations Testing: - All 37 tests pass including test-backend-ops - Build succeeds with -DLLAMA_FATAL_WARNINGS=ON - No performance regression on standard benchmarks Co-Authored-By: Jake Cosme --- ggml/src/ggml-cpu/vec.cpp | 42 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index 43dc7537c33..a7cb36d0426 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -407,8 +407,6 @@ void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const float mean) { int i = 0; ggml_float sum = 0; -// TODO: optimize to process the remaining elements in groups using the smaller vector sizes from AVX2 and SSE -// ref: https://github.com/ggml-org/llama.cpp/pull/15953#pullrequestreview-3310928344 #if defined(__AVX512F__) && defined(__AVX512DQ__) for (; i + 15 < n; i += 16) { __m512 val = _mm512_sub_ps(_mm512_loadu_ps(x + i), @@ -416,18 +414,56 @@ ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const floa _mm512_storeu_ps(y + i, val); sum += (ggml_float)_mm512_reduce_add_ps(_mm512_mul_ps(val, val)); } + #if defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + __m256 val = _mm256_sub_ps(_mm256_loadu_ps(x + i), + _mm256_set1_ps(mean)); + _mm256_storeu_ps(y + i, val); + val = _mm256_mul_ps(val, val); + __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), + _mm256_castps256_ps128(val)); + val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); + val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); + sum += (ggml_float)_mm_cvtss_f32(val2); + } + #endif // __AVX2__ && __FMA__ + for (; i + 3 < n; i += 4) { + __m128 val = _mm_sub_ps(_mm_loadu_ps(x + i), + _mm_set1_ps(mean)); + _mm_storeu_ps(y + i, val); + val = _mm_mul_ps(val, val); + val = _mm_add_ps(val, _mm_movehl_ps(val, val)); + val = _mm_add_ss(val, _mm_movehdup_ps(val)); + sum += (ggml_float)_mm_cvtss_f32(val); + } #elif defined(__AVX2__) && defined(__FMA__) for (; i + 7 < n; i += 8) { __m256 val = _mm256_sub_ps(_mm256_loadu_ps(x + i), _mm256_set1_ps(mean)); _mm256_storeu_ps(y + i, val); - val = _mm256_mul_ps(val,val); + val = _mm256_mul_ps(val, val); __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), _mm256_castps256_ps128(val)); val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); sum += (ggml_float)_mm_cvtss_f32(val2); } + for (; i + 3 < n; i += 4) { + __m128 val = _mm_sub_ps(_mm_loadu_ps(x + i), + _mm_set1_ps(mean)); + _mm_storeu_ps(y + i, val); + val = _mm_mul_ps(val, val); +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) + val = _mm_add_ps(val, _mm_movehl_ps(val, val)); + val = _mm_add_ss(val, _mm_movehdup_ps(val)); +#else + __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); + val = _mm_add_ps(val, tmp); + tmp = _mm_movehl_ps(tmp, val); + val = _mm_add_ss(val, tmp); +#endif // __AVX__ || __AVX2__ || __AVX512F__ + sum += (ggml_float)_mm_cvtss_f32(val); + } #elif defined(__SSE2__) for (; i + 3 < n; i += 4) { __m128 val = _mm_sub_ps(_mm_loadu_ps(x + i), From 91fc7d78681f7a4e263a3d532db0e5ca3af947fc Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 21 Nov 2025 15:32:56 +0000 Subject: [PATCH 2/2] cpu: add SSE instruction guards for AVX512 cascade Add proper SSE instruction guards in the AVX512 cascade's SSE reduction to match the pattern used in the pure SSE2 branch. This ensures proper instruction availability across different Windows/MSVC configurations. The _mm_movehdup_ps intrinsic requires SSE3, which is available when AVX/AVX2/AVX512 is defined, but may not be available in pure SSE2 builds. The fallback uses _mm_shuffle_ps which is part of SSE2. Co-Authored-By: Jake Cosme --- ggml/src/ggml-cpu/vec.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index a7cb36d0426..2745490bf5e 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -432,8 +432,15 @@ ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const floa _mm_set1_ps(mean)); _mm_storeu_ps(y + i, val); val = _mm_mul_ps(val, val); +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) val = _mm_add_ps(val, _mm_movehl_ps(val, val)); val = _mm_add_ss(val, _mm_movehdup_ps(val)); +#else + __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); + val = _mm_add_ps(val, tmp); + tmp = _mm_movehl_ps(tmp, val); + val = _mm_add_ss(val, tmp); +#endif // __AVX__ || __AVX2__ || __AVX512F__ sum += (ggml_float)_mm_cvtss_f32(val); } #elif defined(__AVX2__) && defined(__FMA__)