Skip to content

Commit

Permalink
qwq
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Feb 5, 2025
1 parent ac8e174 commit b05e650
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 207 deletions.
13 changes: 12 additions & 1 deletion src/layer/x86/convolution_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -993,7 +993,18 @@ int Convolution_x86::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, con
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack_int32 = num_output % 4 == 0 ? 4 : 1;
if (use_int8_requantize)
{
#if __AVX__
out_elempack_int32 = num_output % 8 == 0 ? 8 : 1;
#else
out_elempack_int32 = num_output % 4 == 0 ? 4 : 1;
#endif
}
else
{
out_elempack_int32 = num_output % 4 == 0 ? 4 : 1;
}
}
#endif // __SSE2__

Expand Down
206 changes: 0 additions & 206 deletions src/layer/x86/requantize_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -331,70 +331,6 @@ static void requantize(const int* intptr, signed char* ptr, const Mat& scale_in_
}

#if __SSE2__
#if __AVX512F__
static void requantize_pack16to8(const int* intptr, signed char* ptr0, signed char* ptr1, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, int activation_type, const Mat& activation_params, int elemcount)
{
const int scale_in_data_size = scale_in_data.w;
const int bias_data_size = bias_data.w;
const int scale_out_data_size = scale_out_data.w;

// NCNN_LOGE("requantize_pack16to8 %d %d %d %d", scale_in_data_size, bias_data_size, scale_out_data_size, elemcount);

__m512 _scale_in = _mm512_set1_ps(scale_in_data[0]);
if (scale_in_data_size > 1)
{
_scale_in = _mm512_loadu_ps((const float*)scale_in_data);
}

__m512 _scale_out = _mm512_set1_ps(scale_out_data[0]);
if (scale_out_data_size > 1)
{
_scale_out = _mm512_loadu_ps((const float*)scale_out_data);
}

if (bias_data_size == 0)
{
int i = 0;
for (; i < elemcount; i++)
{
__m512 _v = _mm512_cvtepi32_ps(_mm512_loadu_si512((const __m512i*)intptr));
_v = _mm512_mul_ps(_v, _scale_in);
_v = activation_avx512(_v, activation_type, activation_params);
_v = _mm512_mul_ps(_v, _scale_out);
__m128i v = float2int8_avx512(_v);
_mm_storel_pd((double*)ptr0, _mm_castsi128_pd(v));
_mm_storeh_pd((double*)ptr1, _mm_castsi128_pd(v));
intptr += 16;
ptr0 += 8;
ptr1 += 8;
}
}
else
{
__m512 _bias = _mm512_set1_ps(bias_data[0]);
if (bias_data_size > 1)
{
_bias = _mm512_loadu_ps((const float*)bias_data);
}

int i = 0;
for (; i < elemcount; i++)
{
__m512 _v = _mm512_cvtepi32_ps(_mm512_loadu_si512((const __m512i*)intptr));
_v = _mm512_fmadd_ps(_v, _scale_in, _bias);
_v = activation_avx512(_v, activation_type, activation_params);
_v = _mm512_mul_ps(_v, _scale_out);
__m128i v = float2int8_avx512(_v);
_mm_storel_pd((double*)ptr0, _mm_castsi128_pd(v));
_mm_storeh_pd((double*)ptr1, _mm_castsi128_pd(v));
intptr += 16;
ptr0 += 8;
ptr1 += 8;
}
}
}
#endif // __AVX512F__

#if !__AVX__
static void requantize_pack4to8(const int* intptr0, const int* intptr1, signed char* ptr, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, int activation_type, const Mat& activation_params, int elemcount)
{
Expand Down Expand Up @@ -468,76 +404,6 @@ static void requantize_pack4to8(const int* intptr0, const int* intptr1, signed c
}
}
#endif // !__AVX__

static void requantize_pack4to1(const int* intptr, signed char* ptr0, signed char* ptr1, signed char* ptr2, signed char* ptr3, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, int activation_type, const Mat& activation_params, int elemcount)
{
const int scale_in_data_size = scale_in_data.w;
const int bias_data_size = bias_data.w;
const int scale_out_data_size = scale_out_data.w;

// NCNN_LOGE("requantize_pack4to1 %d %d %d %d", scale_in_data_size, bias_data_size, scale_out_data_size, elemcount);

__m128 _scale_in = _mm_set1_ps(scale_in_data[0]);
if (scale_in_data_size > 1)
{
_scale_in = _mm_loadu_ps((const float*)scale_in_data);
}

__m128 _scale_out = _mm_set1_ps(scale_out_data[0]);
if (scale_out_data_size > 1)
{
_scale_out = _mm_loadu_ps((const float*)scale_out_data);
}

if (bias_data_size == 0)
{
int i = 0;
for (; i < elemcount; i++)
{
__m128 _v = _mm_cvtepi32_ps(_mm_loadu_si128((const __m128i*)intptr));
_v = _mm_mul_ps(_v, _scale_in);
_v = activation_sse(_v, activation_type, activation_params);
_v = _mm_mul_ps(_v, _scale_out);
int32_t v = float2int8_sse(_v);
ptr0[0] = (v >> 0) & 0xff;
ptr1[0] = (v >> 8) & 0xff;
ptr2[0] = (v >> 16) & 0xff;
ptr3[0] = (v >> 24) & 0xff;
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
else
{
__m128 _bias = _mm_set1_ps(bias_data[0]);
if (bias_data_size > 1)
{
_bias = _mm_loadu_ps((const float*)bias_data);
}

int i = 0;
for (; i < elemcount; i++)
{
__m128 _v = _mm_cvtepi32_ps(_mm_loadu_si128((const __m128i*)intptr));
_v = _mm_comp_fmadd_ps(_v, _scale_in, _bias);
_v = activation_sse(_v, activation_type, activation_params);
_v = _mm_mul_ps(_v, _scale_out);
int32_t v = float2int8_sse(_v);
ptr0[0] = (v >> 0) & 0xff;
ptr1[0] = (v >> 8) & 0xff;
ptr2[0] = (v >> 16) & 0xff;
ptr3[0] = (v >> 24) & 0xff;
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
#endif // __SSE2__

int Requantize_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
Expand Down Expand Up @@ -602,24 +468,6 @@ int Requantize_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option&
return -100;

#if __SSE2__
#if __AVX512F__
if (elempack == 16 && out_elempack == 8)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const int* intptr = bottom_blob.row<const int>(i);
signed char* ptr0 = top_blob.row<signed char>(i * 2);
signed char* ptr1 = top_blob.row<signed char>(i * 2 + 1);

const Mat scale_in_data_i = scale_in_data_size > 1 ? scale_in_data.range(i * elempack, elempack) : scale_in_data;
const Mat bias_data_i = bias_data_size > 1 ? bias_data.range(i * elempack, elempack) : bias_data;
const Mat scale_out_data_i = scale_out_data_size > 1 ? scale_out_data.range(i * elempack, elempack) : scale_out_data;

requantize_pack16to8(intptr, ptr0, ptr1, scale_in_data_i, bias_data_i, scale_out_data_i, activation_type, activation_params, w);
}
}
#endif // __AVX512F__
#if !__AVX__
if (elempack == 4 && out_elempack == 8)
{
Expand All @@ -638,24 +486,6 @@ int Requantize_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option&
}
}
#endif // !__AVX__
if (elempack == 4 && out_elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const int* intptr = bottom_blob.row<const int>(i);
signed char* ptr0 = top_blob.row<signed char>(i * 4);
signed char* ptr1 = top_blob.row<signed char>(i * 4 + 1);
signed char* ptr2 = top_blob.row<signed char>(i * 4 + 2);
signed char* ptr3 = top_blob.row<signed char>(i * 4 + 3);

const Mat scale_in_data_i = scale_in_data_size > 1 ? scale_in_data.range(i * elempack, elempack) : scale_in_data;
const Mat bias_data_i = bias_data_size > 1 ? bias_data.range(i * elempack, elempack) : bias_data;
const Mat scale_out_data_i = scale_out_data_size > 1 ? scale_out_data.range(i * elempack, elempack) : scale_out_data;

requantize_pack4to1(intptr, ptr0, ptr1, ptr2, ptr3, scale_in_data_i, bias_data_i, scale_out_data_i, activation_type, activation_params, w);
}
}
#endif // __SSE2__
if (elempack == out_elempack)
{
Expand Down Expand Up @@ -691,24 +521,6 @@ int Requantize_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option&
return -100;

#if __SSE2__
#if __AVX512F__
if (elempack == 16 && out_elempack == 8)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 2);
signed char* ptr1 = top_blob.channel(q * 2 + 1);

const Mat scale_in_data_q = scale_in_data_size > 1 ? scale_in_data.range(q * elempack, elempack) : scale_in_data;
const Mat bias_data_q = bias_data_size > 1 ? bias_data.range(q * elempack, elempack) : bias_data;
const Mat scale_out_data_q = scale_out_data_size > 1 ? scale_out_data.range(q * elempack, elempack) : scale_out_data;

requantize_pack16to8(intptr, ptr0, ptr1, scale_in_data_q, bias_data_q, scale_out_data_q, activation_type, activation_params, w * h);
}
}
#endif // __AVX512F__
#if !__AVX__
if (elempack == 4 && out_elempack == 8)
{
Expand All @@ -727,24 +539,6 @@ int Requantize_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option&
}
}
#endif // !__AVX__
if (elempack == 4 && out_elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);

const Mat scale_in_data_q = scale_in_data_size > 1 ? scale_in_data.range(q * elempack, elempack) : scale_in_data;
const Mat bias_data_q = bias_data_size > 1 ? bias_data.range(q * elempack, elempack) : bias_data;
const Mat scale_out_data_q = scale_out_data_size > 1 ? scale_out_data.range(q * elempack, elempack) : scale_out_data;

requantize_pack4to1(intptr, ptr0, ptr1, ptr2, ptr3, scale_in_data_q, bias_data_q, scale_out_data_q, activation_type, activation_params, w * h);
}
}
#endif // __SSE2__
if (elempack == out_elempack)
{
Expand Down

0 comments on commit b05e650

Please sign in to comment.