mirror of
https://github.com/wolfpld/tracy.git
synced 2025-03-20 07:40:02 +08:00
Add AVX2 version of etcpak.
This commit is contained in:
parent
7e9539ef2d
commit
9ef128995a
@ -5,10 +5,562 @@
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
typedef std::array<uint16_t, 4> v4i;
|
||||
|
||||
#if defined __AVX__ && !defined __SSE4_1__
|
||||
# define __SSE4_1__
|
||||
#endif
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <intrin.h>
|
||||
# include <Windows.h>
|
||||
# define _bswap(x) _byteswap_ulong(x)
|
||||
# define VS_VECTORCALL _vectorcall
|
||||
#else
|
||||
# include <x86intrin.h>
|
||||
# pragma GCC push_options
|
||||
# pragma GCC target ("avx2,fma,bmi2")
|
||||
# define VS_VECTORCALL
|
||||
#endif
|
||||
|
||||
#ifndef _bswap
|
||||
# define _bswap(x) __builtin_bswap32(x)
|
||||
#endif
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
const __m128i g_table128_SIMD[2] =
|
||||
{
|
||||
_mm_setr_epi16( 2*128, 5*128, 9*128, 13*128, 18*128, 24*128, 33*128, 47*128),
|
||||
_mm_setr_epi16( 8*128, 17*128, 29*128, 42*128, 60*128, 80*128, 106*128, 183*128)
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
static inline unsigned long _bit_scan_forward( unsigned long mask )
|
||||
{
|
||||
unsigned long ret;
|
||||
_BitScanForward( &ret, mask );
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static __m256i VS_VECTORCALL Sum4_AVX2( const uint8_t* data) noexcept
|
||||
{
|
||||
__m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
|
||||
__m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
|
||||
__m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
|
||||
__m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
|
||||
|
||||
__m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
|
||||
__m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
|
||||
|
||||
__m256i t0 = _mm256_cvtepu8_epi16(dm0);
|
||||
__m256i t1 = _mm256_cvtepu8_epi16(dm1);
|
||||
__m256i t2 = _mm256_cvtepu8_epi16(dm2);
|
||||
__m256i t3 = _mm256_cvtepu8_epi16(dm3);
|
||||
|
||||
__m256i sum0 = _mm256_add_epi16(t0, t1);
|
||||
__m256i sum1 = _mm256_add_epi16(t2, t3);
|
||||
|
||||
__m256i s0 = _mm256_permute2x128_si256(sum0, sum1, (0) | (3 << 4)); // 0, 0, 3, 3
|
||||
__m256i s1 = _mm256_permute2x128_si256(sum0, sum1, (1) | (2 << 4)); // 1, 1, 2, 2
|
||||
|
||||
__m256i s2 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(1, 3, 0, 2));
|
||||
__m256i s3 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(0, 2, 1, 3));
|
||||
__m256i s4 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(3, 1, 0, 2));
|
||||
__m256i s5 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(2, 0, 1, 3));
|
||||
|
||||
__m256i sum5 = _mm256_add_epi16(s2, s3); // 3, 0, 3, 0
|
||||
__m256i sum6 = _mm256_add_epi16(s4, s5); // 2, 1, 1, 2
|
||||
return _mm256_add_epi16(sum5, sum6); // 3+2, 0+1, 3+1, 3+2
|
||||
}
|
||||
|
||||
__m256i VS_VECTORCALL Average_AVX2( const __m256i data) noexcept
|
||||
{
|
||||
__m256i a = _mm256_add_epi16(data, _mm256_set1_epi16(4));
|
||||
|
||||
return _mm256_srli_epi16(a, 3);
|
||||
}
|
||||
|
||||
static __m128i VS_VECTORCALL CalcErrorBlock_AVX2( const __m256i data, const v4i a[8]) noexcept
|
||||
{
|
||||
//
|
||||
__m256i a0 = _mm256_load_si256((__m256i*)a[0].data());
|
||||
__m256i a1 = _mm256_load_si256((__m256i*)a[4].data());
|
||||
|
||||
// err = 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
|
||||
__m256i a4 = _mm256_madd_epi16(a0, a0);
|
||||
__m256i a5 = _mm256_madd_epi16(a1, a1);
|
||||
|
||||
__m256i a6 = _mm256_hadd_epi32(a4, a5);
|
||||
__m256i a7 = _mm256_slli_epi32(a6, 3);
|
||||
|
||||
__m256i a8 = _mm256_add_epi32(a7, _mm256_set1_epi32(0x3FFFFFFF)); // Big value to prevent negative values, but small enough to prevent overflow
|
||||
|
||||
// average is not swapped
|
||||
// err -= block[0] * 2 * average[0];
|
||||
// err -= block[1] * 2 * average[1];
|
||||
// err -= block[2] * 2 * average[2];
|
||||
__m256i a2 = _mm256_slli_epi16(a0, 1);
|
||||
__m256i a3 = _mm256_slli_epi16(a1, 1);
|
||||
__m256i b0 = _mm256_madd_epi16(a2, data);
|
||||
__m256i b1 = _mm256_madd_epi16(a3, data);
|
||||
|
||||
__m256i b2 = _mm256_hadd_epi32(b0, b1);
|
||||
__m256i b3 = _mm256_sub_epi32(a8, b2);
|
||||
__m256i b4 = _mm256_hadd_epi32(b3, b3);
|
||||
|
||||
__m256i b5 = _mm256_permutevar8x32_epi32(b4, _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0));
|
||||
|
||||
return _mm256_castsi256_si128(b5);
|
||||
}
|
||||
|
||||
static void VS_VECTORCALL ProcessAverages_AVX2(const __m256i d, v4i a[8] ) noexcept
|
||||
{
|
||||
__m256i t = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(31)), _mm256_set1_epi16(128));
|
||||
|
||||
__m256i c = _mm256_srli_epi16(_mm256_add_epi16(t, _mm256_srli_epi16(t, 8)), 8);
|
||||
|
||||
__m256i c1 = _mm256_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
|
||||
__m256i diff = _mm256_sub_epi16(c, c1);
|
||||
diff = _mm256_max_epi16(diff, _mm256_set1_epi16(-4));
|
||||
diff = _mm256_min_epi16(diff, _mm256_set1_epi16(3));
|
||||
|
||||
__m256i co = _mm256_add_epi16(c1, diff);
|
||||
|
||||
c = _mm256_blend_epi16(co, c, 0xF0);
|
||||
|
||||
__m256i a0 = _mm256_or_si256(_mm256_slli_epi16(c, 3), _mm256_srli_epi16(c, 2));
|
||||
|
||||
_mm256_store_si256((__m256i*)a[4].data(), a0);
|
||||
|
||||
__m256i t0 = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(15)), _mm256_set1_epi16(128));
|
||||
__m256i t1 = _mm256_srli_epi16(_mm256_add_epi16(t0, _mm256_srli_epi16(t0, 8)), 8);
|
||||
|
||||
__m256i t2 = _mm256_or_si256(t1, _mm256_slli_epi16(t1, 4));
|
||||
|
||||
_mm256_store_si256((__m256i*)a[0].data(), t2);
|
||||
}
|
||||
|
||||
static uint64_t VS_VECTORCALL EncodeAverages_AVX2( const v4i a[8], size_t idx ) noexcept
|
||||
{
|
||||
uint64_t d = ( idx << 24 );
|
||||
size_t base = idx << 1;
|
||||
|
||||
__m128i a0 = _mm_load_si128((const __m128i*)a[base].data());
|
||||
|
||||
__m128i r0, r1;
|
||||
|
||||
if( ( idx & 0x2 ) == 0 )
|
||||
{
|
||||
r0 = _mm_srli_epi16(a0, 4);
|
||||
|
||||
__m128i a1 = _mm_unpackhi_epi64(r0, r0);
|
||||
r1 = _mm_slli_epi16(a1, 4);
|
||||
}
|
||||
else
|
||||
{
|
||||
__m128i a1 = _mm_and_si128(a0, _mm_set1_epi16(-8));
|
||||
|
||||
r0 = _mm_unpackhi_epi64(a1, a1);
|
||||
__m128i a2 = _mm_sub_epi16(a1, r0);
|
||||
__m128i a3 = _mm_srai_epi16(a2, 3);
|
||||
r1 = _mm_and_si128(a3, _mm_set1_epi16(0x07));
|
||||
}
|
||||
|
||||
__m128i r2 = _mm_or_si128(r0, r1);
|
||||
// do missing swap for average values
|
||||
__m128i r3 = _mm_shufflelo_epi16(r2, _MM_SHUFFLE(3, 0, 1, 2));
|
||||
__m128i r4 = _mm_packus_epi16(r3, _mm_setzero_si128());
|
||||
d |= _mm_cvtsi128_si32(r4);
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
static uint64_t VS_VECTORCALL CheckSolid_AVX2( const uint8_t* src ) noexcept
|
||||
{
|
||||
__m256i d0 = _mm256_loadu_si256(((__m256i*)src) + 0);
|
||||
__m256i d1 = _mm256_loadu_si256(((__m256i*)src) + 1);
|
||||
|
||||
__m256i c = _mm256_broadcastd_epi32(_mm256_castsi256_si128(d0));
|
||||
|
||||
__m256i c0 = _mm256_cmpeq_epi8(d0, c);
|
||||
__m256i c1 = _mm256_cmpeq_epi8(d1, c);
|
||||
|
||||
__m256i m = _mm256_and_si256(c0, c1);
|
||||
|
||||
if (!_mm256_testc_si256(m, _mm256_set1_epi32(-1)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0x02000000 |
|
||||
( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
|
||||
( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
|
||||
( (unsigned int)( src[2] & 0xF8 ) );
|
||||
}
|
||||
|
||||
static __m128i VS_VECTORCALL PrepareAverages_AVX2( v4i a[8], const uint8_t* src) noexcept
|
||||
{
|
||||
__m256i sum4 = Sum4_AVX2( src );
|
||||
|
||||
ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
|
||||
|
||||
return CalcErrorBlock_AVX2( sum4, a);
|
||||
}
|
||||
|
||||
static void VS_VECTORCALL FindBestFit_4x2_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
|
||||
{
|
||||
__m256i sel0 = _mm256_setzero_si256();
|
||||
__m256i sel1 = _mm256_setzero_si256();
|
||||
|
||||
for (unsigned int j = 0; j < 2; ++j)
|
||||
{
|
||||
unsigned int bid = offset + 1 - j;
|
||||
|
||||
__m256i squareErrorSum = _mm256_setzero_si256();
|
||||
|
||||
__m128i a0 = _mm_loadl_epi64((const __m128i*)a[bid].data());
|
||||
__m256i a1 = _mm256_broadcastq_epi64(a0);
|
||||
|
||||
// Processing one full row each iteration
|
||||
for (size_t i = 0; i < 8; i += 4)
|
||||
{
|
||||
__m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
|
||||
|
||||
__m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
|
||||
__m256i d = _mm256_sub_epi16(a1, rgb16);
|
||||
|
||||
// The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
|
||||
// This produces slightly different results, but is significant faster
|
||||
__m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
|
||||
__m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
|
||||
__m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
|
||||
__m128i pixel3 = _mm256_castsi256_si128(pixel2);
|
||||
|
||||
__m128i pix0 = _mm_broadcastw_epi16(pixel3);
|
||||
__m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
|
||||
__m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
|
||||
|
||||
// Processing first two pixels of the row
|
||||
{
|
||||
__m256i pix = _mm256_abs_epi16(pixel);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
// Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
|
||||
__m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
|
||||
__m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
|
||||
|
||||
__m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
|
||||
__m256i minError = _mm256_min_epi16(error0, error1);
|
||||
|
||||
// Exploiting symmetry of the selector table and use the sign bit
|
||||
// This produces slightly different results, but is significant faster
|
||||
__m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
|
||||
|
||||
// Interleaving values so madd instruction can be used
|
||||
__m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
__m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
|
||||
__m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m256i squareError = _mm256_madd_epi16(minError2, minError2);
|
||||
|
||||
squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
|
||||
|
||||
// Packing selector bits
|
||||
__m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
|
||||
__m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
|
||||
|
||||
sel0 = _mm256_or_si256(sel0, minIndexLo2);
|
||||
sel1 = _mm256_or_si256(sel1, minIndexHi2);
|
||||
}
|
||||
|
||||
pixel3 = _mm256_extracti128_si256(pixel2, 1);
|
||||
pix0 = _mm_broadcastw_epi16(pixel3);
|
||||
pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
|
||||
pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
|
||||
|
||||
// Processing second two pixels of the row
|
||||
{
|
||||
__m256i pix = _mm256_abs_epi16(pixel);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
// Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
|
||||
__m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
|
||||
__m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
|
||||
|
||||
__m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
|
||||
__m256i minError = _mm256_min_epi16(error0, error1);
|
||||
|
||||
// Exploiting symmetry of the selector table and use the sign bit
|
||||
__m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
|
||||
|
||||
// Interleaving values so madd instruction can be used
|
||||
__m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
__m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
|
||||
__m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m256i squareError = _mm256_madd_epi16(minError2, minError2);
|
||||
|
||||
squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
|
||||
|
||||
// Packing selector bits
|
||||
__m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
|
||||
__m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
|
||||
__m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
|
||||
__m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
|
||||
|
||||
sel0 = _mm256_or_si256(sel0, minIndexLo3);
|
||||
sel1 = _mm256_or_si256(sel1, minIndexHi3);
|
||||
}
|
||||
}
|
||||
|
||||
data += 8 * 4;
|
||||
|
||||
_mm256_store_si256((__m256i*)terr[1 - j], squareErrorSum);
|
||||
}
|
||||
|
||||
// Interleave selector bits
|
||||
__m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
|
||||
__m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
|
||||
|
||||
__m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
|
||||
__m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
|
||||
|
||||
__m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
|
||||
|
||||
__m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
|
||||
|
||||
_mm256_store_si256((__m256i*)tsel, sel);
|
||||
}
|
||||
|
||||
static void VS_VECTORCALL FindBestFit_2x4_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
|
||||
{
|
||||
__m256i sel0 = _mm256_setzero_si256();
|
||||
__m256i sel1 = _mm256_setzero_si256();
|
||||
|
||||
__m256i squareErrorSum0 = _mm256_setzero_si256();
|
||||
__m256i squareErrorSum1 = _mm256_setzero_si256();
|
||||
|
||||
__m128i a0 = _mm_loadl_epi64((const __m128i*)a[offset + 1].data());
|
||||
__m128i a1 = _mm_loadl_epi64((const __m128i*)a[offset + 0].data());
|
||||
|
||||
__m128i a2 = _mm_broadcastq_epi64(a0);
|
||||
__m128i a3 = _mm_broadcastq_epi64(a1);
|
||||
__m256i a4 = _mm256_insertf128_si256(_mm256_castsi128_si256(a2), a3, 1);
|
||||
|
||||
// Processing one full row each iteration
|
||||
for (size_t i = 0; i < 16; i += 4)
|
||||
{
|
||||
__m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
|
||||
|
||||
__m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
|
||||
__m256i d = _mm256_sub_epi16(a4, rgb16);
|
||||
|
||||
// The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
|
||||
// This produces slightly different results, but is significant faster
|
||||
__m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
|
||||
__m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
|
||||
__m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
|
||||
__m128i pixel3 = _mm256_castsi256_si128(pixel2);
|
||||
|
||||
__m128i pix0 = _mm_broadcastw_epi16(pixel3);
|
||||
__m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
|
||||
__m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
|
||||
|
||||
// Processing first two pixels of the row
|
||||
{
|
||||
__m256i pix = _mm256_abs_epi16(pixel);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
// Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
|
||||
__m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
|
||||
__m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
|
||||
|
||||
__m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
|
||||
__m256i minError = _mm256_min_epi16(error0, error1);
|
||||
|
||||
// Exploiting symmetry of the selector table and use the sign bit
|
||||
__m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
|
||||
|
||||
// Interleaving values so madd instruction can be used
|
||||
__m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
__m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
|
||||
__m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m256i squareError = _mm256_madd_epi16(minError2, minError2);
|
||||
|
||||
squareErrorSum0 = _mm256_add_epi32(squareErrorSum0, squareError);
|
||||
|
||||
// Packing selector bits
|
||||
__m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
|
||||
__m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
|
||||
|
||||
sel0 = _mm256_or_si256(sel0, minIndexLo2);
|
||||
sel1 = _mm256_or_si256(sel1, minIndexHi2);
|
||||
}
|
||||
|
||||
pixel3 = _mm256_extracti128_si256(pixel2, 1);
|
||||
pix0 = _mm_broadcastw_epi16(pixel3);
|
||||
pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
|
||||
pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
|
||||
|
||||
// Processing second two pixels of the row
|
||||
{
|
||||
__m256i pix = _mm256_abs_epi16(pixel);
|
||||
|
||||
// Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
|
||||
// Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
|
||||
__m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
|
||||
__m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
|
||||
|
||||
__m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
|
||||
__m256i minError = _mm256_min_epi16(error0, error1);
|
||||
|
||||
// Exploiting symmetry of the selector table and use the sign bit
|
||||
__m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
|
||||
|
||||
// Interleaving values so madd instruction can be used
|
||||
__m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
|
||||
__m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
|
||||
|
||||
__m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
|
||||
// Squaring the minimum error to produce correct values when adding
|
||||
__m256i squareError = _mm256_madd_epi16(minError2, minError2);
|
||||
|
||||
squareErrorSum1 = _mm256_add_epi32(squareErrorSum1, squareError);
|
||||
|
||||
// Packing selector bits
|
||||
__m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
|
||||
__m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
|
||||
__m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
|
||||
__m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
|
||||
|
||||
sel0 = _mm256_or_si256(sel0, minIndexLo3);
|
||||
sel1 = _mm256_or_si256(sel1, minIndexHi3);
|
||||
}
|
||||
}
|
||||
|
||||
_mm256_store_si256((__m256i*)terr[1], squareErrorSum0);
|
||||
_mm256_store_si256((__m256i*)terr[0], squareErrorSum1);
|
||||
|
||||
// Interleave selector bits
|
||||
__m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
|
||||
__m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
|
||||
|
||||
__m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
|
||||
__m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
|
||||
|
||||
__m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
|
||||
|
||||
__m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
|
||||
|
||||
_mm256_store_si256((__m256i*)tsel, sel);
|
||||
}
|
||||
|
||||
uint64_t VS_VECTORCALL EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate) noexcept
|
||||
{
|
||||
size_t tidx[2];
|
||||
|
||||
// Get index of minimum error (terr[0] and terr[1])
|
||||
__m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
|
||||
__m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
|
||||
|
||||
__m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
|
||||
__m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
|
||||
|
||||
__m256i errMin0 = _mm256_min_epu32(errLo, errHi);
|
||||
|
||||
__m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
|
||||
__m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
|
||||
|
||||
__m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
|
||||
__m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
|
||||
|
||||
__m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
|
||||
__m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
|
||||
|
||||
__m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
|
||||
__m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
|
||||
|
||||
uint32_t mask0 = _mm256_movemask_epi8(errMask0);
|
||||
uint32_t mask1 = _mm256_movemask_epi8(errMask1);
|
||||
|
||||
tidx[0] = _bit_scan_forward(mask0) >> 2;
|
||||
tidx[1] = _bit_scan_forward(mask1) >> 2;
|
||||
|
||||
d |= tidx[0] << 26;
|
||||
d |= tidx[1] << 29;
|
||||
|
||||
unsigned int t0 = tsel[tidx[0]];
|
||||
unsigned int t1 = tsel[tidx[1]];
|
||||
|
||||
if (!rotate)
|
||||
{
|
||||
t0 &= 0xFF00FF00;
|
||||
t1 &= 0x00FF00FF;
|
||||
}
|
||||
else
|
||||
{
|
||||
t0 &= 0xCCCCCCCC;
|
||||
t1 &= 0x33333333;
|
||||
}
|
||||
|
||||
// Flip selectors from sign bit
|
||||
unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
|
||||
|
||||
return d | static_cast<uint64_t>(_bswap(t2)) << 32;
|
||||
}
|
||||
|
||||
static uint64_t ProcessRGB( const uint8_t* src )
|
||||
{
|
||||
uint64_t d = CheckSolid_AVX2( src );
|
||||
if( d != 0 ) return d;
|
||||
|
||||
alignas(32) v4i a[8];
|
||||
|
||||
__m128i err0 = PrepareAverages_AVX2( a, src );
|
||||
|
||||
// Get index of minimum error (err0)
|
||||
__m128i err1 = _mm_shuffle_epi32(err0, _MM_SHUFFLE(2, 3, 0, 1));
|
||||
__m128i errMin0 = _mm_min_epu32(err0, err1);
|
||||
|
||||
__m128i errMin1 = _mm_shuffle_epi32(errMin0, _MM_SHUFFLE(1, 0, 3, 2));
|
||||
__m128i errMin2 = _mm_min_epu32(errMin1, errMin0);
|
||||
|
||||
__m128i errMask = _mm_cmpeq_epi32(errMin2, err0);
|
||||
|
||||
uint32_t mask = _mm_movemask_epi8(errMask);
|
||||
|
||||
uint32_t idx = _bit_scan_forward(mask) >> 2;
|
||||
|
||||
d |= EncodeAverages_AVX2( a, idx );
|
||||
|
||||
alignas(32) uint32_t terr[2][8] = {};
|
||||
alignas(32) uint32_t tsel[8];
|
||||
|
||||
if ((idx == 0) || (idx == 2))
|
||||
{
|
||||
FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
|
||||
}
|
||||
else
|
||||
{
|
||||
FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
|
||||
}
|
||||
|
||||
return EncodeSelectors_AVX2( d, terr, tsel, (idx % 2) == 1 );
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
# ifdef _MSC_VER
|
||||
# include <intrin.h>
|
||||
@ -33,8 +585,6 @@
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
typedef std::array<uint16_t, 4> v4i;
|
||||
|
||||
const uint32_t g_avg2[16] = {
|
||||
0x00,
|
||||
0x11,
|
||||
@ -569,6 +1119,8 @@ static uint64_t ProcessRGB( const uint8_t* src )
|
||||
return FixByteOrder( EncodeSelectors( d, terr, tsel, id ) );
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void CompressImageEtc1( const char* src, char* dst, int w, int h )
|
||||
{
|
||||
assert( (w % 4) == 0 && (h % 4) == 0 );
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user