aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2022-01-31 11:49:55 -0800
committerlibyuv LUCI CQ <libyuv-scoped@luci-project-accounts.iam.gserviceaccount.com>2022-01-31 20:05:55 +0000
commit804980bbab748fd0e180cd6e7d9292ff49baf704 (patch)
tree79348716636fcffddf9630ce9654ad899cde68d6
parent2c6bfc02d5265c95df69190c785f5d00d13bb444 (diff)
downloadlibyuv-804980bbab748fd0e180cd6e7d9292ff49baf704.tar.gz
DetilePlane and unittest for NEON
Bug: libyuv:915, b/215425056 Change-Id: Iccab1ed3f6d385f02895d44faa94d198ad79d693 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/3424820 Reviewed-by: Justin Green <greenjustin@google.com> Reviewed-by: Frank Barchard <fbarchard@chromium.org> Commit-Queue: Frank Barchard <fbarchard@chromium.org>
-rw-r--r--.gitignore1
-rw-r--r--README.chromium2
-rw-r--r--include/libyuv/loongson_intrinsics.h1640
-rw-r--r--include/libyuv/planar_functions.h10
-rw-r--r--include/libyuv/row.h23
-rw-r--r--include/libyuv/scale_row.h1
-rw-r--r--include/libyuv/version.h2
-rw-r--r--source/convert.cc3
-rw-r--r--source/convert_argb.cc9
-rw-r--r--source/cpu_id.cc14
-rw-r--r--source/planar_functions.cc47
-rw-r--r--source/rotate_lsx.cc61
-rw-r--r--source/row_common.cc15
-rw-r--r--source/row_lasx.cc633
-rw-r--r--source/row_lsx.cc640
-rw-r--r--source/row_neon64.cc10
-rw-r--r--source/scale_argb.cc3
-rw-r--r--source/scale_lsx.cc143
-rw-r--r--unit_test/cpu_test.cc3
-rwxr-xr-x[-rw-r--r--]unit_test/planar_test.cc39
20 files changed, 1708 insertions, 1591 deletions
diff --git a/.gitignore b/.gitignore
index 7095d417..20d679b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@ pin-log.txt
/native_client
/net
/out
+/unit_test/out
/source/out
/sde-avx-sse-transition-out.txt
/testing
diff --git a/README.chromium b/README.chromium
index 1f165bfb..b6a06814 100644
--- a/README.chromium
+++ b/README.chromium
@@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
-Version: 1809
+Version: 1810
License: BSD
License File: LICENSE
diff --git a/include/libyuv/loongson_intrinsics.h b/include/libyuv/loongson_intrinsics.h
index ae19eb9c..d6cb7a06 100644
--- a/include/libyuv/loongson_intrinsics.h
+++ b/include/libyuv/loongson_intrinsics.h
@@ -35,42 +35,42 @@
#define LSOM_VERSION_MICRO 3
#define DUP2_ARG1(_INS, _IN0, _IN1, _OUT0, _OUT1) \
-{ \
- _OUT0 = _INS(_IN0); \
- _OUT1 = _INS(_IN1); \
-}
+ { \
+ _OUT0 = _INS(_IN0); \
+ _OUT1 = _INS(_IN1); \
+ }
#define DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1) \
-{ \
- _OUT0 = _INS(_IN0, _IN1); \
- _OUT1 = _INS(_IN2, _IN3); \
-}
+ { \
+ _OUT0 = _INS(_IN0, _IN1); \
+ _OUT1 = _INS(_IN2, _IN3); \
+ }
#define DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1) \
-{ \
- _OUT0 = _INS(_IN0, _IN1, _IN2); \
- _OUT1 = _INS(_IN3, _IN4, _IN5); \
-}
+ { \
+ _OUT0 = _INS(_IN0, _IN1, _IN2); \
+ _OUT1 = _INS(_IN3, _IN4, _IN5); \
+ }
#define DUP4_ARG1(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1, _OUT2, _OUT3) \
-{ \
- DUP2_ARG1(_INS, _IN0, _IN1, _OUT0, _OUT1); \
- DUP2_ARG1(_INS, _IN2, _IN3, _OUT2, _OUT3); \
-}
-
-#define DUP4_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, \
- _OUT0, _OUT1, _OUT2, _OUT3) \
-{ \
- DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1); \
- DUP2_ARG2(_INS, _IN4, _IN5, _IN6, _IN7, _OUT2, _OUT3); \
-}
-
-#define DUP4_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, \
- _IN8, _IN9, _IN10, _IN11, _OUT0, _OUT1, _OUT2, _OUT3) \
-{ \
- DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1); \
- DUP2_ARG3(_INS, _IN6, _IN7, _IN8, _IN9, _IN10, _IN11, _OUT2, _OUT3); \
-}
+ { \
+ DUP2_ARG1(_INS, _IN0, _IN1, _OUT0, _OUT1); \
+ DUP2_ARG1(_INS, _IN2, _IN3, _OUT2, _OUT3); \
+ }
+
+#define DUP4_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _OUT0, \
+ _OUT1, _OUT2, _OUT3) \
+ { \
+ DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1); \
+ DUP2_ARG2(_INS, _IN4, _IN5, _IN6, _IN7, _OUT2, _OUT3); \
+ }
+
+#define DUP4_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _IN8, \
+ _IN9, _IN10, _IN11, _OUT0, _OUT1, _OUT2, _OUT3) \
+ { \
+ DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1); \
+ DUP2_ARG3(_INS, _IN6, _IN7, _IN8, _IN9, _IN10, _IN11, _OUT2, _OUT3); \
+ }
#ifdef __loongarch_sx
#include <lsxintrin.h>
@@ -91,13 +91,14 @@
* out : 23,40,41,26, 23,40,41,26
* =============================================================================
*/
-static inline __m128i __lsx_vdp2add_h_b(__m128i in_c, __m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2add_h_b(__m128i in_c,
+ __m128i in_h,
+ __m128i in_l) {
+ __m128i out;
- out = __lsx_vmaddwev_h_b(in_c, in_h, in_l);
- out = __lsx_vmaddwod_h_b(out, in_h, in_l);
- return out;
+ out = __lsx_vmaddwev_h_b(in_c, in_h, in_l);
+ out = __lsx_vmaddwod_h_b(out, in_h, in_l);
+ return out;
}
/*
@@ -117,13 +118,14 @@ static inline __m128i __lsx_vdp2add_h_b(__m128i in_c, __m128i in_h, __m128i in_l
* out : 23,40,41,26, 23,40,41,26
* =============================================================================
*/
-static inline __m128i __lsx_vdp2add_h_bu(__m128i in_c, __m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2add_h_bu(__m128i in_c,
+ __m128i in_h,
+ __m128i in_l) {
+ __m128i out;
- out = __lsx_vmaddwev_h_bu(in_c, in_h, in_l);
- out = __lsx_vmaddwod_h_bu(out, in_h, in_l);
- return out;
+ out = __lsx_vmaddwev_h_bu(in_c, in_h, in_l);
+ out = __lsx_vmaddwod_h_bu(out, in_h, in_l);
+ return out;
}
/*
@@ -143,13 +145,14 @@ static inline __m128i __lsx_vdp2add_h_bu(__m128i in_c, __m128i in_h, __m128i in_
* out : 23,40,41,26
* =============================================================================
*/
-static inline __m128i __lsx_vdp2add_w_h(__m128i in_c, __m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2add_w_h(__m128i in_c,
+ __m128i in_h,
+ __m128i in_l) {
+ __m128i out;
- out = __lsx_vmaddwev_w_h(in_c, in_h, in_l);
- out = __lsx_vmaddwod_w_h(out, in_h, in_l);
- return out;
+ out = __lsx_vmaddwev_w_h(in_c, in_h, in_l);
+ out = __lsx_vmaddwod_w_h(out, in_h, in_l);
+ return out;
}
/*
@@ -167,13 +170,12 @@ static inline __m128i __lsx_vdp2add_w_h(__m128i in_c, __m128i in_h, __m128i in_l
* out : 22,38,38,22, 22,38,38,22
* =============================================================================
*/
-static inline __m128i __lsx_vdp2_h_b(__m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2_h_b(__m128i in_h, __m128i in_l) {
+ __m128i out;
- out = __lsx_vmulwev_h_b(in_h, in_l);
- out = __lsx_vmaddwod_h_b(out, in_h, in_l);
- return out;
+ out = __lsx_vmulwev_h_b(in_h, in_l);
+ out = __lsx_vmaddwod_h_b(out, in_h, in_l);
+ return out;
}
/*
@@ -191,13 +193,12 @@ static inline __m128i __lsx_vdp2_h_b(__m128i in_h, __m128i in_l)
* out : 22,38,38,22, 22,38,38,22
* =============================================================================
*/
-static inline __m128i __lsx_vdp2_h_bu(__m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2_h_bu(__m128i in_h, __m128i in_l) {
+ __m128i out;
- out = __lsx_vmulwev_h_bu(in_h, in_l);
- out = __lsx_vmaddwod_h_bu(out, in_h, in_l);
- return out;
+ out = __lsx_vmulwev_h_bu(in_h, in_l);
+ out = __lsx_vmaddwod_h_bu(out, in_h, in_l);
+ return out;
}
/*
@@ -215,13 +216,12 @@ static inline __m128i __lsx_vdp2_h_bu(__m128i in_h, __m128i in_l)
* out : 22,38,38,22, 22,38,38,6
* =============================================================================
*/
-static inline __m128i __lsx_vdp2_h_bu_b(__m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2_h_bu_b(__m128i in_h, __m128i in_l) {
+ __m128i out;
- out = __lsx_vmulwev_h_bu_b(in_h, in_l);
- out = __lsx_vmaddwod_h_bu_b(out, in_h, in_l);
- return out;
+ out = __lsx_vmulwev_h_bu_b(in_h, in_l);
+ out = __lsx_vmaddwod_h_bu_b(out, in_h, in_l);
+ return out;
}
/*
@@ -239,20 +239,19 @@ static inline __m128i __lsx_vdp2_h_bu_b(__m128i in_h, __m128i in_l)
* out : 22,38,38,22
* =============================================================================
*/
-static inline __m128i __lsx_vdp2_w_h(__m128i in_h, __m128i in_l)
-{
- __m128i out;
+static inline __m128i __lsx_vdp2_w_h(__m128i in_h, __m128i in_l) {
+ __m128i out;
- out = __lsx_vmulwev_w_h(in_h, in_l);
- out = __lsx_vmaddwod_w_h(out, in_h, in_l);
- return out;
+ out = __lsx_vmulwev_w_h(in_h, in_l);
+ out = __lsx_vmaddwod_w_h(out, in_h, in_l);
+ return out;
}
/*
* =============================================================================
* Description : Clip all halfword elements of input vector between min & max
- * out = ((_in) < (min)) ? (min) : (((_in) > (max)) ? (max) : (_in))
- * Arguments : Inputs - _in (input vector)
+ * out = ((_in) < (min)) ? (min) : (((_in) > (max)) ? (max) :
+ * (_in)) Arguments : Inputs - _in (input vector)
* - min (min threshold)
* - max (max threshold)
* Outputs - out (output vector with clipped elements)
@@ -264,13 +263,12 @@ static inline __m128i __lsx_vdp2_w_h(__m128i in_h, __m128i in_l)
* out : 1,2,9,9, 1,9,9,9
* =============================================================================
*/
-static inline __m128i __lsx_vclip_h(__m128i _in, __m128i min, __m128i max)
-{
- __m128i out;
+static inline __m128i __lsx_vclip_h(__m128i _in, __m128i min, __m128i max) {
+ __m128i out;
- out = __lsx_vmax_h(min, _in);
- out = __lsx_vmin_h(max, out);
- return out;
+ out = __lsx_vmax_h(min, _in);
+ out = __lsx_vmin_h(max, out);
+ return out;
}
/*
@@ -285,13 +283,12 @@ static inline __m128i __lsx_vclip_h(__m128i _in, __m128i min, __m128i max)
* out : 0,255,255,249, 0,255,255,249
* =============================================================================
*/
-static inline __m128i __lsx_vclip255_h(__m128i _in)
-{
- __m128i out;
+static inline __m128i __lsx_vclip255_h(__m128i _in) {
+ __m128i out;
- out = __lsx_vmaxi_h(_in, 0);
- out = __lsx_vsat_hu(out, 7);
- return out;
+ out = __lsx_vmaxi_h(_in, 0);
+ out = __lsx_vsat_hu(out, 7);
+ return out;
}
/*
@@ -306,13 +303,12 @@ static inline __m128i __lsx_vclip255_h(__m128i _in)
* out : 0,255,255,249
* =============================================================================
*/
-static inline __m128i __lsx_vclip255_w(__m128i _in)
-{
- __m128i out;
+static inline __m128i __lsx_vclip255_w(__m128i _in) {
+ __m128i out;
- out = __lsx_vmaxi_w(_in, 0);
- out = __lsx_vsat_wu(out, 7);
- return out;
+ out = __lsx_vmaxi_w(_in, 0);
+ out = __lsx_vsat_wu(out, 7);
+ return out;
}
/*
@@ -328,12 +324,12 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* _in1(out) : 1,2,3,4
* =============================================================================
*/
-#define LSX_SWAP(_in0, _in1) \
-{ \
- _in0 = __lsx_vxor_v(_in0, _in1); \
- _in1 = __lsx_vxor_v(_in0, _in1); \
- _in0 = __lsx_vxor_v(_in0, _in1); \
-} \
+#define LSX_SWAP(_in0, _in1) \
+ { \
+ _in0 = __lsx_vxor_v(_in0, _in1); \
+ _in1 = __lsx_vxor_v(_in0, _in1); \
+ _in0 = __lsx_vxor_v(_in0, _in1); \
+ }
/*
* =============================================================================
@@ -349,34 +345,34 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* =============================================================================
*/
#define LSX_TRANSPOSE4x4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
+ { \
__m128i _t0, _t1, _t2, _t3; \
\
- _t0 = __lsx_vilvl_w(_in1, _in0); \
- _t1 = __lsx_vilvh_w(_in1, _in0); \
- _t2 = __lsx_vilvl_w(_in3, _in2); \
- _t3 = __lsx_vilvh_w(_in3, _in2); \
+ _t0 = __lsx_vilvl_w(_in1, _in0); \
+ _t1 = __lsx_vilvh_w(_in1, _in0); \
+ _t2 = __lsx_vilvl_w(_in3, _in2); \
+ _t3 = __lsx_vilvh_w(_in3, _in2); \
_out0 = __lsx_vilvl_d(_t2, _t0); \
_out1 = __lsx_vilvh_d(_t2, _t0); \
_out2 = __lsx_vilvl_d(_t3, _t1); \
_out3 = __lsx_vilvh_d(_t3, _t1); \
-}
+ }
/*
* =============================================================================
* Description : Transpose 8x8 block with byte elements in vectors
* Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7
- * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : LSX_TRANSPOSE8x8_B
- * _in0 : 00,01,02,03,04,05,06,07, 00,00,00,00,00,00,00,00
- * _in1 : 10,11,12,13,14,15,16,17, 00,00,00,00,00,00,00,00
- * _in2 : 20,21,22,23,24,25,26,27, 00,00,00,00,00,00,00,00
- * _in3 : 30,31,32,33,34,35,36,37, 00,00,00,00,00,00,00,00
- * _in4 : 40,41,42,43,44,45,46,47, 00,00,00,00,00,00,00,00
- * _in5 : 50,51,52,53,54,55,56,57, 00,00,00,00,00,00,00,00
- * _in6 : 60,61,62,63,64,65,66,67, 00,00,00,00,00,00,00,00
- * _in7 : 70,71,72,73,74,75,76,77, 00,00,00,00,00,00,00,00
+ * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6,
+ * _out7 Details : The rows of the matrix become columns, and the columns
+ * become rows. Example : LSX_TRANSPOSE8x8_B _in0 : 00,01,02,03,04,05,06,07,
+ * 00,00,00,00,00,00,00,00 _in1 : 10,11,12,13,14,15,16,17,
+ * 00,00,00,00,00,00,00,00 _in2 : 20,21,22,23,24,25,26,27,
+ * 00,00,00,00,00,00,00,00 _in3 : 30,31,32,33,34,35,36,37,
+ * 00,00,00,00,00,00,00,00 _in4 : 40,41,42,43,44,45,46,47,
+ * 00,00,00,00,00,00,00,00 _in5 : 50,51,52,53,54,55,56,57,
+ * 00,00,00,00,00,00,00,00 _in6 : 60,61,62,63,64,65,66,67,
+ * 00,00,00,00,00,00,00,00 _in7 : 70,71,72,73,74,75,76,77,
+ * 00,00,00,00,00,00,00,00
*
* _ out0 : 00,10,20,30,40,50,60,70, 00,00,00,00,00,00,00,00
* _ out1 : 01,11,21,31,41,51,61,71, 00,00,00,00,00,00,00,00
@@ -388,30 +384,31 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* _ out7 : 07,17,27,37,47,57,67,77, 00,00,00,00,00,00,00,00
* =============================================================================
*/
-#define LSX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- __m128i zero = {0}; \
- __m128i shuf8 = {0x0F0E0D0C0B0A0908, 0x1716151413121110}; \
- __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
- \
- _t0 = __lsx_vilvl_b(_in2, _in0); \
- _t1 = __lsx_vilvl_b(_in3, _in1); \
- _t2 = __lsx_vilvl_b(_in6, _in4); \
- _t3 = __lsx_vilvl_b(_in7, _in5); \
- _t4 = __lsx_vilvl_b(_t1, _t0); \
- _t5 = __lsx_vilvh_b(_t1, _t0); \
- _t6 = __lsx_vilvl_b(_t3, _t2); \
- _t7 = __lsx_vilvh_b(_t3, _t2); \
- _out0 = __lsx_vilvl_w(_t6, _t4); \
- _out2 = __lsx_vilvh_w(_t6, _t4); \
- _out4 = __lsx_vilvl_w(_t7, _t5); \
- _out6 = __lsx_vilvh_w(_t7, _t5); \
- _out1 = __lsx_vshuf_b(zero, _out0, shuf8); \
- _out3 = __lsx_vshuf_b(zero, _out2, shuf8); \
- _out5 = __lsx_vshuf_b(zero, _out4, shuf8); \
- _out7 = __lsx_vshuf_b(zero, _out6, shuf8); \
-}
+#define LSX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ __m128i zero = {0}; \
+ __m128i shuf8 = {0x0F0E0D0C0B0A0908, 0x1716151413121110}; \
+ __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
+ \
+ _t0 = __lsx_vilvl_b(_in2, _in0); \
+ _t1 = __lsx_vilvl_b(_in3, _in1); \
+ _t2 = __lsx_vilvl_b(_in6, _in4); \
+ _t3 = __lsx_vilvl_b(_in7, _in5); \
+ _t4 = __lsx_vilvl_b(_t1, _t0); \
+ _t5 = __lsx_vilvh_b(_t1, _t0); \
+ _t6 = __lsx_vilvl_b(_t3, _t2); \
+ _t7 = __lsx_vilvh_b(_t3, _t2); \
+ _out0 = __lsx_vilvl_w(_t6, _t4); \
+ _out2 = __lsx_vilvh_w(_t6, _t4); \
+ _out4 = __lsx_vilvl_w(_t7, _t5); \
+ _out6 = __lsx_vilvh_w(_t7, _t5); \
+ _out1 = __lsx_vshuf_b(zero, _out0, shuf8); \
+ _out3 = __lsx_vshuf_b(zero, _out2, shuf8); \
+ _out5 = __lsx_vshuf_b(zero, _out4, shuf8); \
+ _out7 = __lsx_vshuf_b(zero, _out6, shuf8); \
+ }
/*
* =============================================================================
@@ -430,37 +427,38 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* 70,71,72,73,74,75,76,77 07,17,27,37,47,57,67,77
* =============================================================================
*/
-#define LSX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- __m128i _s0, _s1, _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
- \
- _s0 = __lsx_vilvl_h(_in6, _in4); \
- _s1 = __lsx_vilvl_h(_in7, _in5); \
- _t0 = __lsx_vilvl_h(_s1, _s0); \
- _t1 = __lsx_vilvh_h(_s1, _s0); \
- _s0 = __lsx_vilvh_h(_in6, _in4); \
- _s1 = __lsx_vilvh_h(_in7, _in5); \
- _t2 = __lsx_vilvl_h(_s1, _s0); \
- _t3 = __lsx_vilvh_h(_s1, _s0); \
- _s0 = __lsx_vilvl_h(_in2, _in0); \
- _s1 = __lsx_vilvl_h(_in3, _in1); \
- _t4 = __lsx_vilvl_h(_s1, _s0); \
- _t5 = __lsx_vilvh_h(_s1, _s0); \
- _s0 = __lsx_vilvh_h(_in2, _in0); \
- _s1 = __lsx_vilvh_h(_in3, _in1); \
- _t6 = __lsx_vilvl_h(_s1, _s0); \
- _t7 = __lsx_vilvh_h(_s1, _s0); \
- \
- _out0 = __lsx_vpickev_d(_t0, _t4); \
- _out2 = __lsx_vpickev_d(_t1, _t5); \
- _out4 = __lsx_vpickev_d(_t2, _t6); \
- _out6 = __lsx_vpickev_d(_t3, _t7); \
- _out1 = __lsx_vpickod_d(_t0, _t4); \
- _out3 = __lsx_vpickod_d(_t1, _t5); \
- _out5 = __lsx_vpickod_d(_t2, _t6); \
- _out7 = __lsx_vpickod_d(_t3, _t7); \
-}
+#define LSX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ __m128i _s0, _s1, _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
+ \
+ _s0 = __lsx_vilvl_h(_in6, _in4); \
+ _s1 = __lsx_vilvl_h(_in7, _in5); \
+ _t0 = __lsx_vilvl_h(_s1, _s0); \
+ _t1 = __lsx_vilvh_h(_s1, _s0); \
+ _s0 = __lsx_vilvh_h(_in6, _in4); \
+ _s1 = __lsx_vilvh_h(_in7, _in5); \
+ _t2 = __lsx_vilvl_h(_s1, _s0); \
+ _t3 = __lsx_vilvh_h(_s1, _s0); \
+ _s0 = __lsx_vilvl_h(_in2, _in0); \
+ _s1 = __lsx_vilvl_h(_in3, _in1); \
+ _t4 = __lsx_vilvl_h(_s1, _s0); \
+ _t5 = __lsx_vilvh_h(_s1, _s0); \
+ _s0 = __lsx_vilvh_h(_in2, _in0); \
+ _s1 = __lsx_vilvh_h(_in3, _in1); \
+ _t6 = __lsx_vilvl_h(_s1, _s0); \
+ _t7 = __lsx_vilvh_h(_s1, _s0); \
+ \
+ _out0 = __lsx_vpickev_d(_t0, _t4); \
+ _out2 = __lsx_vpickev_d(_t1, _t5); \
+ _out4 = __lsx_vpickev_d(_t2, _t6); \
+ _out6 = __lsx_vpickev_d(_t3, _t7); \
+ _out1 = __lsx_vpickod_d(_t0, _t4); \
+ _out3 = __lsx_vpickod_d(_t1, _t5); \
+ _out5 = __lsx_vpickod_d(_t2, _t6); \
+ _out7 = __lsx_vpickod_d(_t3, _t7); \
+ }
/*
* =============================================================================
@@ -468,16 +466,16 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* Arguments : Inputs - _in0, _in1, _in2, _in3 (input 8x4 byte block)
* Outputs - _out0, _out1, _out2, _out3 (output 4x8 byte block)
* Return Type - as per RTYPE
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : LSX_TRANSPOSE8x4_B
- * _in0 : 00,01,02,03,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in1 : 10,11,12,13,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in2 : 20,21,22,23,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in3 : 30,31,32,33,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in4 : 40,41,42,43,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in5 : 50,51,52,53,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in6 : 60,61,62,63,00,00,00,00, 00,00,00,00,00,00,00,00
- * _in7 : 70,71,72,73,00,00,00,00, 00,00,00,00,00,00,00,00
+ * Details : The rows of the matrix become columns, and the columns become
+ * rows. Example : LSX_TRANSPOSE8x4_B _in0 : 00,01,02,03,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in1 : 10,11,12,13,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in2 : 20,21,22,23,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in3 : 30,31,32,33,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in4 : 40,41,42,43,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in5 : 50,51,52,53,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in6 : 60,61,62,63,00,00,00,00,
+ * 00,00,00,00,00,00,00,00 _in7 : 70,71,72,73,00,00,00,00,
+ * 00,00,00,00,00,00,00,00
*
* _out0 : 00,10,20,30,40,50,60,70, 00,00,00,00,00,00,00,00
* _out1 : 01,11,21,31,41,51,61,71, 00,00,00,00,00,00,00,00
@@ -485,26 +483,26 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* _out3 : 03,13,23,33,43,53,63,73, 00,00,00,00,00,00,00,00
* =============================================================================
*/
-#define LSX_TRANSPOSE8x4_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3) \
-{ \
- __m128i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- \
- _tmp0_m = __lsx_vpackev_w(_in4, _in0); \
- _tmp1_m = __lsx_vpackev_w(_in5, _in1); \
- _tmp2_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \
- _tmp0_m = __lsx_vpackev_w(_in6, _in2); \
- _tmp1_m = __lsx_vpackev_w(_in7, _in3); \
- \
- _tmp3_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \
- _tmp0_m = __lsx_vilvl_h(_tmp3_m, _tmp2_m); \
- _tmp1_m = __lsx_vilvh_h(_tmp3_m, _tmp2_m); \
- \
- _out0 = __lsx_vilvl_w(_tmp1_m, _tmp0_m); \
- _out2 = __lsx_vilvh_w(_tmp1_m, _tmp0_m); \
- _out1 = __lsx_vilvh_d(_out2, _out0); \
- _out3 = __lsx_vilvh_d(_out0, _out2); \
-}
+#define LSX_TRANSPOSE8x4_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3) \
+ { \
+ __m128i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ \
+ _tmp0_m = __lsx_vpackev_w(_in4, _in0); \
+ _tmp1_m = __lsx_vpackev_w(_in5, _in1); \
+ _tmp2_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \
+ _tmp0_m = __lsx_vpackev_w(_in6, _in2); \
+ _tmp1_m = __lsx_vpackev_w(_in7, _in3); \
+ \
+ _tmp3_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \
+ _tmp0_m = __lsx_vilvl_h(_tmp3_m, _tmp2_m); \
+ _tmp1_m = __lsx_vilvh_h(_tmp3_m, _tmp2_m); \
+ \
+ _out0 = __lsx_vilvl_w(_tmp1_m, _tmp0_m); \
+ _out2 = __lsx_vilvh_w(_tmp1_m, _tmp0_m); \
+ _out1 = __lsx_vilvh_d(_out2, _out0); \
+ _out3 = __lsx_vilvh_d(_out0, _out2); \
+ }
/*
* =============================================================================
@@ -532,29 +530,30 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* 120,121,122,123,124,125,126,127
* =============================================================================
*/
-#define LSX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, _in8, \
- _in9, _in10, _in11, _in12, _in13, _in14, _in15, _out0, \
- _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
-{ \
- __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7; \
- __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
- DUP4_ARG2(__lsx_vilvl_b, _in2, _in0, _in3, _in1, _in6, _in4, _in7, _in5, \
- _tmp0, _tmp1, _tmp2, _tmp3); \
- DUP4_ARG2(__lsx_vilvl_b, _in10, _in8, _in11, _in9, _in14, _in12, _in15, \
- _in13, _tmp4, _tmp5, _tmp6, _tmp7); \
- DUP2_ARG2(__lsx_vilvl_b, _tmp1, _tmp0, _tmp3, _tmp2, _t0, _t2); \
- DUP2_ARG2(__lsx_vilvh_b, _tmp1, _tmp0, _tmp3, _tmp2, _t1, _t3); \
- DUP2_ARG2(__lsx_vilvl_b, _tmp5, _tmp4, _tmp7, _tmp6, _t4, _t6); \
- DUP2_ARG2(__lsx_vilvh_b, _tmp5, _tmp4, _tmp7, _tmp6, _t5, _t7); \
- DUP2_ARG2(__lsx_vilvl_w, _t2, _t0, _t3, _t1, _tmp0, _tmp4); \
- DUP2_ARG2(__lsx_vilvh_w, _t2, _t0, _t3, _t1, _tmp2, _tmp6); \
- DUP2_ARG2(__lsx_vilvl_w, _t6, _t4, _t7, _t5, _tmp1, _tmp5); \
- DUP2_ARG2(__lsx_vilvh_w, _t6, _t4, _t7, _t5, _tmp3, _tmp7); \
- DUP2_ARG2(__lsx_vilvl_d, _tmp1, _tmp0, _tmp3, _tmp2, _out0, _out2); \
- DUP2_ARG2(__lsx_vilvh_d, _tmp1, _tmp0, _tmp3, _tmp2, _out1, _out3); \
- DUP2_ARG2(__lsx_vilvl_d, _tmp5, _tmp4, _tmp7, _tmp6, _out4, _out6); \
- DUP2_ARG2(__lsx_vilvh_d, _tmp5, _tmp4, _tmp7, _tmp6, _out5, _out7); \
-}
+#define LSX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _in8, _in9, _in10, _in11, _in12, _in13, _in14, \
+ _in15, _out0, _out1, _out2, _out3, _out4, _out5, \
+ _out6, _out7) \
+ { \
+ __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7; \
+ __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
+ DUP4_ARG2(__lsx_vilvl_b, _in2, _in0, _in3, _in1, _in6, _in4, _in7, _in5, \
+ _tmp0, _tmp1, _tmp2, _tmp3); \
+ DUP4_ARG2(__lsx_vilvl_b, _in10, _in8, _in11, _in9, _in14, _in12, _in15, \
+ _in13, _tmp4, _tmp5, _tmp6, _tmp7); \
+ DUP2_ARG2(__lsx_vilvl_b, _tmp1, _tmp0, _tmp3, _tmp2, _t0, _t2); \
+ DUP2_ARG2(__lsx_vilvh_b, _tmp1, _tmp0, _tmp3, _tmp2, _t1, _t3); \
+ DUP2_ARG2(__lsx_vilvl_b, _tmp5, _tmp4, _tmp7, _tmp6, _t4, _t6); \
+ DUP2_ARG2(__lsx_vilvh_b, _tmp5, _tmp4, _tmp7, _tmp6, _t5, _t7); \
+ DUP2_ARG2(__lsx_vilvl_w, _t2, _t0, _t3, _t1, _tmp0, _tmp4); \
+ DUP2_ARG2(__lsx_vilvh_w, _t2, _t0, _t3, _t1, _tmp2, _tmp6); \
+ DUP2_ARG2(__lsx_vilvl_w, _t6, _t4, _t7, _t5, _tmp1, _tmp5); \
+ DUP2_ARG2(__lsx_vilvh_w, _t6, _t4, _t7, _t5, _tmp3, _tmp7); \
+ DUP2_ARG2(__lsx_vilvl_d, _tmp1, _tmp0, _tmp3, _tmp2, _out0, _out2); \
+ DUP2_ARG2(__lsx_vilvh_d, _tmp1, _tmp0, _tmp3, _tmp2, _out1, _out3); \
+ DUP2_ARG2(__lsx_vilvl_d, _tmp5, _tmp4, _tmp7, _tmp6, _out4, _out6); \
+ DUP2_ARG2(__lsx_vilvh_d, _tmp5, _tmp4, _tmp7, _tmp6, _out5, _out7); \
+ }
/*
* =============================================================================
@@ -570,33 +569,33 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* =============================================================================
*/
#define LSX_BUTTERFLY_4_B(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
+ { \
_out0 = __lsx_vadd_b(_in0, _in3); \
_out1 = __lsx_vadd_b(_in1, _in2); \
_out2 = __lsx_vsub_b(_in1, _in2); \
_out3 = __lsx_vsub_b(_in0, _in3); \
-}
+ }
#define LSX_BUTTERFLY_4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
+ { \
_out0 = __lsx_vadd_h(_in0, _in3); \
_out1 = __lsx_vadd_h(_in1, _in2); \
_out2 = __lsx_vsub_h(_in1, _in2); \
_out3 = __lsx_vsub_h(_in0, _in3); \
-}
+ }
#define LSX_BUTTERFLY_4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
+ { \
_out0 = __lsx_vadd_w(_in0, _in3); \
_out1 = __lsx_vadd_w(_in1, _in2); \
_out2 = __lsx_vsub_w(_in1, _in2); \
_out3 = __lsx_vsub_w(_in0, _in3); \
-}
+ }
#define LSX_BUTTERFLY_4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
+ { \
_out0 = __lsx_vadd_d(_in0, _in3); \
_out1 = __lsx_vadd_d(_in1, _in2); \
_out2 = __lsx_vsub_d(_in1, _in2); \
_out3 = __lsx_vsub_d(_in0, _in3); \
-}
+ }
/*
* =============================================================================
@@ -615,59 +614,63 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* _out7 = _in0 - _in7;
* =============================================================================
*/
-#define LSX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lsx_vadd_b(_in0, _in7); \
- _out1 = __lsx_vadd_b(_in1, _in6); \
- _out2 = __lsx_vadd_b(_in2, _in5); \
- _out3 = __lsx_vadd_b(_in3, _in4); \
- _out4 = __lsx_vsub_b(_in3, _in4); \
- _out5 = __lsx_vsub_b(_in2, _in5); \
- _out6 = __lsx_vsub_b(_in1, _in6); \
- _out7 = __lsx_vsub_b(_in0, _in7); \
-}
-
-#define LSX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lsx_vadd_h(_in0, _in7); \
- _out1 = __lsx_vadd_h(_in1, _in6); \
- _out2 = __lsx_vadd_h(_in2, _in5); \
- _out3 = __lsx_vadd_h(_in3, _in4); \
- _out4 = __lsx_vsub_h(_in3, _in4); \
- _out5 = __lsx_vsub_h(_in2, _in5); \
- _out6 = __lsx_vsub_h(_in1, _in6); \
- _out7 = __lsx_vsub_h(_in0, _in7); \
-}
-
-#define LSX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lsx_vadd_w(_in0, _in7); \
- _out1 = __lsx_vadd_w(_in1, _in6); \
- _out2 = __lsx_vadd_w(_in2, _in5); \
- _out3 = __lsx_vadd_w(_in3, _in4); \
- _out4 = __lsx_vsub_w(_in3, _in4); \
- _out5 = __lsx_vsub_w(_in2, _in5); \
- _out6 = __lsx_vsub_w(_in1, _in6); \
- _out7 = __lsx_vsub_w(_in0, _in7); \
-}
-
-#define LSX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lsx_vadd_d(_in0, _in7); \
- _out1 = __lsx_vadd_d(_in1, _in6); \
- _out2 = __lsx_vadd_d(_in2, _in5); \
- _out3 = __lsx_vadd_d(_in3, _in4); \
- _out4 = __lsx_vsub_d(_in3, _in4); \
- _out5 = __lsx_vsub_d(_in2, _in5); \
- _out6 = __lsx_vsub_d(_in1, _in6); \
- _out7 = __lsx_vsub_d(_in0, _in7); \
-}
-
-#endif //LSX
+#define LSX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lsx_vadd_b(_in0, _in7); \
+ _out1 = __lsx_vadd_b(_in1, _in6); \
+ _out2 = __lsx_vadd_b(_in2, _in5); \
+ _out3 = __lsx_vadd_b(_in3, _in4); \
+ _out4 = __lsx_vsub_b(_in3, _in4); \
+ _out5 = __lsx_vsub_b(_in2, _in5); \
+ _out6 = __lsx_vsub_b(_in1, _in6); \
+ _out7 = __lsx_vsub_b(_in0, _in7); \
+ }
+
+#define LSX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lsx_vadd_h(_in0, _in7); \
+ _out1 = __lsx_vadd_h(_in1, _in6); \
+ _out2 = __lsx_vadd_h(_in2, _in5); \
+ _out3 = __lsx_vadd_h(_in3, _in4); \
+ _out4 = __lsx_vsub_h(_in3, _in4); \
+ _out5 = __lsx_vsub_h(_in2, _in5); \
+ _out6 = __lsx_vsub_h(_in1, _in6); \
+ _out7 = __lsx_vsub_h(_in0, _in7); \
+ }
+
+#define LSX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lsx_vadd_w(_in0, _in7); \
+ _out1 = __lsx_vadd_w(_in1, _in6); \
+ _out2 = __lsx_vadd_w(_in2, _in5); \
+ _out3 = __lsx_vadd_w(_in3, _in4); \
+ _out4 = __lsx_vsub_w(_in3, _in4); \
+ _out5 = __lsx_vsub_w(_in2, _in5); \
+ _out6 = __lsx_vsub_w(_in1, _in6); \
+ _out7 = __lsx_vsub_w(_in0, _in7); \
+ }
+
+#define LSX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lsx_vadd_d(_in0, _in7); \
+ _out1 = __lsx_vadd_d(_in1, _in6); \
+ _out2 = __lsx_vadd_d(_in2, _in5); \
+ _out3 = __lsx_vadd_d(_in3, _in4); \
+ _out4 = __lsx_vsub_d(_in3, _in4); \
+ _out5 = __lsx_vsub_d(_in2, _in5); \
+ _out6 = __lsx_vsub_d(_in1, _in6); \
+ _out7 = __lsx_vsub_d(_in0, _in7); \
+ }
+
+#endif // LSX
#ifdef __loongarch_asx
#include <lasxintrin.h>
@@ -685,13 +688,12 @@ static inline __m128i __lsx_vclip255_w(__m128i _in)
* Example : See out = __lasx_xvdp2_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2_h_bu(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2_h_bu(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmulwev_h_bu(in_h, in_l);
- out = __lasx_xvmaddwod_h_bu(out, in_h, in_l);
- return out;
+ out = __lasx_xvmulwev_h_bu(in_h, in_l);
+ out = __lasx_xvmaddwod_h_bu(out, in_h, in_l);
+ return out;
}
/*
@@ -708,13 +710,12 @@ static inline __m256i __lasx_xvdp2_h_bu(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvdp2_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2_h_b(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2_h_b(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmulwev_h_b(in_h, in_l);
- out = __lasx_xvmaddwod_h_b(out, in_h, in_l);
- return out;
+ out = __lasx_xvmulwev_h_b(in_h, in_l);
+ out = __lasx_xvmaddwod_h_b(out, in_h, in_l);
+ return out;
}
/*
@@ -734,13 +735,12 @@ static inline __m256i __lasx_xvdp2_h_b(__m256i in_h, __m256i in_l)
* out : 22,38,38,22, 22,38,38,22
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2_w_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmulwev_w_h(in_h, in_l);
- out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
- return out;
+ out = __lasx_xvmulwev_w_h(in_h, in_l);
+ out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
+ return out;
}
/*
@@ -757,13 +757,12 @@ static inline __m256i __lasx_xvdp2_w_h(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvdp2_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2_d_w(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2_d_w(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmulwev_d_w(in_h, in_l);
- out = __lasx_xvmaddwod_d_w(out, in_h, in_l);
- return out;
+ out = __lasx_xvmulwev_d_w(in_h, in_l);
+ out = __lasx_xvmaddwod_d_w(out, in_h, in_l);
+ return out;
}
/*
@@ -780,13 +779,12 @@ static inline __m256i __lasx_xvdp2_d_w(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvdp2_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2_w_hu_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2_w_hu_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmulwev_w_hu_h(in_h, in_l);
- out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l);
- return out;
+ out = __lasx_xvmulwev_w_hu_h(in_h, in_l);
+ out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l);
+ return out;
}
/*
@@ -803,13 +801,14 @@ static inline __m256i __lasx_xvdp2_w_hu_h(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2add_h_b(__m256i in_c,__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2add_h_b(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmaddwev_h_b(in_c, in_h, in_l);
- out = __lasx_xvmaddwod_h_b(out, in_h, in_l);
- return out;
+ out = __lasx_xvmaddwev_h_b(in_c, in_h, in_l);
+ out = __lasx_xvmaddwod_h_b(out, in_h, in_l);
+ return out;
}
/*
@@ -830,13 +829,14 @@ static inline __m256i __lasx_xvdp2add_h_b(__m256i in_c,__m256i in_h, __m256i in_
* out : 23,40,41,26, 23,40,41,26
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2add_w_h(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2add_w_h(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmaddwev_w_h(in_c, in_h, in_l);
- out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
- return out;
+ out = __lasx_xvmaddwev_w_h(in_c, in_h, in_l);
+ out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
+ return out;
}
/*
@@ -853,13 +853,14 @@ static inline __m256i __lasx_xvdp2add_w_h(__m256i in_c, __m256i in_h, __m256i in
* Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2add_w_hu(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2add_w_hu(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmaddwev_w_hu(in_c, in_h, in_l);
- out = __lasx_xvmaddwod_w_hu(out, in_h, in_l);
- return out;
+ out = __lasx_xvmaddwev_w_hu(in_c, in_h, in_l);
+ out = __lasx_xvmaddwod_w_hu(out, in_h, in_l);
+ return out;
}
/*
@@ -876,13 +877,14 @@ static inline __m256i __lasx_xvdp2add_w_hu(__m256i in_c, __m256i in_h, __m256i i
* Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2add_w_hu_h(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvdp2add_w_hu_h(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
- out = __lasx_xvmaddwev_w_hu_h(in_c, in_h, in_l);
- out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l);
- return out;
+ out = __lasx_xvmaddwev_w_hu_h(in_c, in_h, in_l);
+ out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l);
+ return out;
}
/*
@@ -900,14 +902,15 @@ static inline __m256i __lasx_xvdp2add_w_hu_h(__m256i in_c, __m256i in_h, __m256i
* Example : See out = __lasx_xvdp2sub_w_h(in_c, in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2sub_h_bu(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i out;
-
- out = __lasx_xvmulwev_h_bu(in_h, in_l);
- out = __lasx_xvmaddwod_h_bu(out, in_h, in_l);
- out = __lasx_xvsub_h(in_c, out);
- return out;
+static inline __m256i __lasx_xvdp2sub_h_bu(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
+
+ out = __lasx_xvmulwev_h_bu(in_h, in_l);
+ out = __lasx_xvmaddwod_h_bu(out, in_h, in_l);
+ out = __lasx_xvsub_h(in_c, out);
+ return out;
}
/*
@@ -929,14 +932,15 @@ static inline __m256i __lasx_xvdp2sub_h_bu(__m256i in_c, __m256i in_h, __m256i i
* out : -7,-3,0,0, 0,-1,0,-1
* =============================================================================
*/
-static inline __m256i __lasx_xvdp2sub_w_h(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i out;
-
- out = __lasx_xvmulwev_w_h(in_h, in_l);
- out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
- out = __lasx_xvsub_w(in_c, out);
- return out;
+static inline __m256i __lasx_xvdp2sub_w_h(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i out;
+
+ out = __lasx_xvmulwev_w_h(in_h, in_l);
+ out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
+ out = __lasx_xvsub_w(in_c, out);
+ return out;
}
/*
@@ -956,14 +960,13 @@ static inline __m256i __lasx_xvdp2sub_w_h(__m256i in_c, __m256i in_h, __m256i in
* out : -2,0,1,1
* =============================================================================
*/
-static inline __m256i __lasx_xvdp4_d_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
-
- out = __lasx_xvmulwev_w_h(in_h, in_l);
- out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
- out = __lasx_xvhaddw_d_w(out, out);
- return out;
+static inline __m256i __lasx_xvdp4_d_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
+
+ out = __lasx_xvmulwev_w_h(in_h, in_l);
+ out = __lasx_xvmaddwod_w_h(out, in_h, in_l);
+ out = __lasx_xvhaddw_d_w(out, out);
+ return out;
}
/*
@@ -978,13 +981,12 @@ static inline __m256i __lasx_xvdp4_d_h(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvaddwh_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvaddwh_h_b(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddwh_h_b(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvilvh_b(in_h, in_l);
- out = __lasx_xvhaddw_h_b(out, out);
- return out;
+ out = __lasx_xvilvh_b(in_h, in_l);
+ out = __lasx_xvhaddw_h_b(out, out);
+ return out;
}
/*
@@ -1002,13 +1004,12 @@ static inline __m256i __lasx_xvaddwh_h_b(__m256i in_h, __m256i in_l)
* out : 1,0,0,-1, 1,0,0, 2
* =============================================================================
*/
- static inline __m256i __lasx_xvaddwh_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddwh_w_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvilvh_h(in_h, in_l);
- out = __lasx_xvhaddw_w_h(out, out);
- return out;
+ out = __lasx_xvilvh_h(in_h, in_l);
+ out = __lasx_xvhaddw_w_h(out, out);
+ return out;
}
/*
@@ -1023,13 +1024,12 @@ static inline __m256i __lasx_xvaddwh_h_b(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvaddwl_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvaddwl_h_b(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddwl_h_b(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvilvl_b(in_h, in_l);
- out = __lasx_xvhaddw_h_b(out, out);
- return out;
+ out = __lasx_xvilvl_b(in_h, in_l);
+ out = __lasx_xvhaddw_h_b(out, out);
+ return out;
}
/*
@@ -1047,13 +1047,12 @@ static inline __m256i __lasx_xvaddwl_h_b(__m256i in_h, __m256i in_l)
* out : 5,-1,4,2, 1,0,2,-1
* =============================================================================
*/
-static inline __m256i __lasx_xvaddwl_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddwl_w_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvilvl_h(in_h, in_l);
- out = __lasx_xvhaddw_w_h(out, out);
- return out;
+ out = __lasx_xvilvl_h(in_h, in_l);
+ out = __lasx_xvhaddw_w_h(out, out);
+ return out;
}
/*
@@ -1068,13 +1067,12 @@ static inline __m256i __lasx_xvaddwl_w_h(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvaddwl_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvaddwl_h_bu(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddwl_h_bu(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvilvl_b(in_h, in_l);
- out = __lasx_xvhaddw_hu_bu(out, out);
- return out;
+ out = __lasx_xvilvl_b(in_h, in_l);
+ out = __lasx_xvhaddw_hu_bu(out, out);
+ return out;
}
/*
@@ -1088,13 +1086,12 @@ static inline __m256i __lasx_xvaddwl_h_bu(__m256i in_h, __m256i in_l)
* Example : See out = __lasx_xvaddw_w_w_h(in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvaddw_h_h_bu(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddw_h_h_bu(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvsllwil_hu_bu(in_l, 0);
- out = __lasx_xvadd_h(in_h, out);
- return out;
+ out = __lasx_xvsllwil_hu_bu(in_l, 0);
+ out = __lasx_xvadd_h(in_h, out);
+ return out;
}
/*
@@ -1111,13 +1108,12 @@ static inline __m256i __lasx_xvaddw_h_h_bu(__m256i in_h, __m256i in_l)
* out : 2, 0,1,2, -1,0,1,1,
* =============================================================================
*/
-static inline __m256i __lasx_xvaddw_w_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i out;
+static inline __m256i __lasx_xvaddw_w_w_h(__m256i in_h, __m256i in_l) {
+ __m256i out;
- out = __lasx_xvsllwil_w_h(in_l, 0);
- out = __lasx_xvadd_w(in_h, out);
- return out;
+ out = __lasx_xvsllwil_w_h(in_l, 0);
+ out = __lasx_xvadd_w(in_h, out);
+ return out;
}
/*
@@ -1138,15 +1134,16 @@ static inline __m256i __lasx_xvaddw_w_w_h(__m256i in_h, __m256i in_l)
* out : 201, 602,1203,2004, -995, -1794,-2793,-3992
* =============================================================================
*/
-static inline __m256i __lasx_xvmaddwl_w_h(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i tmp0, tmp1, out;
-
- tmp0 = __lasx_xvsllwil_w_h(in_h, 0);
- tmp1 = __lasx_xvsllwil_w_h(in_l, 0);
- tmp0 = __lasx_xvmul_w(tmp0, tmp1);
- out = __lasx_xvadd_w(tmp0, in_c);
- return out;
+static inline __m256i __lasx_xvmaddwl_w_h(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i tmp0, tmp1, out;
+
+ tmp0 = __lasx_xvsllwil_w_h(in_h, 0);
+ tmp1 = __lasx_xvsllwil_w_h(in_l, 0);
+ tmp0 = __lasx_xvmul_w(tmp0, tmp1);
+ out = __lasx_xvadd_w(tmp0, in_c);
+ return out;
}
/*
@@ -1162,15 +1159,16 @@ static inline __m256i __lasx_xvmaddwl_w_h(__m256i in_c, __m256i in_h, __m256i in
* Example : See out = __lasx_xvmaddwl_w_h(in_c, in_h, in_l)
* =============================================================================
*/
-static inline __m256i __lasx_xvmaddwh_w_h(__m256i in_c, __m256i in_h, __m256i in_l)
-{
- __m256i tmp0, tmp1, out;
-
- tmp0 = __lasx_xvilvh_h(in_h, in_h);
- tmp1 = __lasx_xvilvh_h(in_l, in_l);
- tmp0 = __lasx_xvmulwev_w_h(tmp0, tmp1);
- out = __lasx_xvadd_w(tmp0, in_c);
- return out;
+static inline __m256i __lasx_xvmaddwh_w_h(__m256i in_c,
+ __m256i in_h,
+ __m256i in_l) {
+ __m256i tmp0, tmp1, out;
+
+ tmp0 = __lasx_xvilvh_h(in_h, in_h);
+ tmp1 = __lasx_xvilvh_h(in_l, in_l);
+ tmp0 = __lasx_xvmulwev_w_h(tmp0, tmp1);
+ out = __lasx_xvadd_w(tmp0, in_c);
+ return out;
}
/*
@@ -1188,14 +1186,13 @@ static inline __m256i __lasx_xvmaddwh_w_h(__m256i in_c, __m256i in_h, __m256i in
* out : 6,1,3,0, 0,0,1,0
* =============================================================================
*/
-static inline __m256i __lasx_xvmulwl_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i tmp0, tmp1, out;
-
- tmp0 = __lasx_xvsllwil_w_h(in_h, 0);
- tmp1 = __lasx_xvsllwil_w_h(in_l, 0);
- out = __lasx_xvmul_w(tmp0, tmp1);
- return out;
+static inline __m256i __lasx_xvmulwl_w_h(__m256i in_h, __m256i in_l) {
+ __m256i tmp0, tmp1, out;
+
+ tmp0 = __lasx_xvsllwil_w_h(in_h, 0);
+ tmp1 = __lasx_xvsllwil_w_h(in_l, 0);
+ out = __lasx_xvmul_w(tmp0, tmp1);
+ return out;
}
/*
@@ -1213,14 +1210,13 @@ static inline __m256i __lasx_xvmulwl_w_h(__m256i in_h, __m256i in_l)
* out : 0,0,0,0, 0,0,0,1
* =============================================================================
*/
-static inline __m256i __lasx_xvmulwh_w_h(__m256i in_h, __m256i in_l)
-{
- __m256i tmp0, tmp1, out;
-
- tmp0 = __lasx_xvilvh_h(in_h, in_h);
- tmp1 = __lasx_xvilvh_h(in_l, in_l);
- out = __lasx_xvmulwev_w_h(tmp0, tmp1);
- return out;
+static inline __m256i __lasx_xvmulwh_w_h(__m256i in_h, __m256i in_l) {
+ __m256i tmp0, tmp1, out;
+
+ tmp0 = __lasx_xvilvh_h(in_h, in_h);
+ tmp1 = __lasx_xvilvh_h(in_l, in_l);
+ out = __lasx_xvmulwev_w_h(tmp0, tmp1);
+ return out;
}
/*
@@ -1234,18 +1230,17 @@ static inline __m256i __lasx_xvmulwh_w_h(__m256i in_h, __m256i in_l)
* halfword) and the results are stored to the out vector.
* Example : out = __lasx_xvsaddw_hu_hu_bu(in_h, in_l)
* in_h : 2,65532,1,2, 1,0,0,0, 0,0,1,0, 1,0,0,1
- * in_l : 3,6,3,0, 0,0,0,1, 0,0,1,1, 0,0,0,1, 3,18,3,0, 0,0,0,1, 0,0,1,1, 0,0,0,1
- * out : 5,65535,4,2, 1,0,0,1, 3,18,4,0, 1,0,0,2,
+ * in_l : 3,6,3,0, 0,0,0,1, 0,0,1,1, 0,0,0,1, 3,18,3,0, 0,0,0,1, 0,0,1,1,
+ * 0,0,0,1 out : 5,65535,4,2, 1,0,0,1, 3,18,4,0, 1,0,0,2,
* =============================================================================
*/
-static inline __m256i __lasx_xvsaddw_hu_hu_bu(__m256i in_h, __m256i in_l)
-{
- __m256i tmp1, out;
- __m256i zero = {0};
-
- tmp1 = __lasx_xvilvl_b(zero, in_l);
- out = __lasx_xvsadd_hu(in_h, tmp1);
- return out;
+static inline __m256i __lasx_xvsaddw_hu_hu_bu(__m256i in_h, __m256i in_l) {
+ __m256i tmp1, out;
+ __m256i zero = {0};
+
+ tmp1 = __lasx_xvilvl_b(zero, in_l);
+ out = __lasx_xvsadd_hu(in_h, tmp1);
+ return out;
}
/*
@@ -1264,13 +1259,12 @@ static inline __m256i __lasx_xvsaddw_hu_hu_bu(__m256i in_h, __m256i in_l)
* out : 1,2,9,9, 1,9,9,9, 4,4,4,4, 5,5,5,5
* =============================================================================
*/
-static inline __m256i __lasx_xvclip_h(__m256i in, __m256i min, __m256i max)
-{
- __m256i out;
+static inline __m256i __lasx_xvclip_h(__m256i in, __m256i min, __m256i max) {
+ __m256i out;
- out = __lasx_xvmax_h(min, in);
- out = __lasx_xvmin_h(max, out);
- return out;
+ out = __lasx_xvmax_h(min, in);
+ out = __lasx_xvmin_h(max, out);
+ return out;
}
/*
@@ -1283,13 +1277,12 @@ static inline __m256i __lasx_xvclip_h(__m256i in, __m256i min, __m256i max)
* Example : See out = __lasx_xvclip255_w(in)
* =============================================================================
*/
-static inline __m256i __lasx_xvclip255_h(__m256i in)
-{
- __m256i out;
+static inline __m256i __lasx_xvclip255_h(__m256i in) {
+ __m256i out;
- out = __lasx_xvmaxi_h(in, 0);
- out = __lasx_xvsat_hu(out, 7);
- return out;
+ out = __lasx_xvmaxi_h(in, 0);
+ out = __lasx_xvsat_hu(out, 7);
+ return out;
}
/*
@@ -1304,13 +1297,12 @@ static inline __m256i __lasx_xvclip255_h(__m256i in)
* out : 0,255,255,249, 0,255,255,249
* =============================================================================
*/
-static inline __m256i __lasx_xvclip255_w(__m256i in)
-{
- __m256i out;
+static inline __m256i __lasx_xvclip255_w(__m256i in) {
+ __m256i out;
- out = __lasx_xvmaxi_w(in, 0);
- out = __lasx_xvsat_wu(out, 7);
- return out;
+ out = __lasx_xvmaxi_w(in, 0);
+ out = __lasx_xvsat_wu(out, 7);
+ return out;
}
/*
@@ -1329,13 +1321,12 @@ static inline __m256i __lasx_xvclip255_w(__m256i in)
* out : 11,11,11,11, 11,11,11,11, 11,11,11,11, 11,11,11,11
* =============================================================================
*/
-static inline __m256i __lasx_xvsplati_l_h(__m256i in, int idx)
-{
- __m256i out;
+static inline __m256i __lasx_xvsplati_l_h(__m256i in, int idx) {
+ __m256i out;
- out = __lasx_xvpermi_q(in, in, 0x02);
- out = __lasx_xvreplve_h(out, idx);
- return out;
+ out = __lasx_xvpermi_q(in, in, 0x02);
+ out = __lasx_xvreplve_h(out, idx);
+ return out;
}
/*
@@ -1354,13 +1345,12 @@ static inline __m256i __lasx_xvsplati_l_h(__m256i in, int idx)
* out : 2,2,2,2, 2,2,2,2, 2,2,2,2, 2,2,2,2
* =============================================================================
*/
-static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
-{
- __m256i out;
+static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx) {
+ __m256i out;
- out = __lasx_xvpermi_q(in, in, 0x13);
- out = __lasx_xvreplve_h(out, idx);
- return out;
+ out = __lasx_xvpermi_q(in, in, 0x13);
+ out = __lasx_xvreplve_h(out, idx);
+ return out;
}
/*
@@ -1380,33 +1370,29 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out3 : 4,4,4,4
* =============================================================================
*/
-#define LASX_TRANSPOSE4x4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- __m256i _tmp0, _tmp1, _tmp2, _tmp3; \
- _tmp0 = __lasx_xvilvl_d(_in1, _in0); \
- _tmp1 = __lasx_xvilvh_d(_in1, _in0); \
- _tmp2 = __lasx_xvilvl_d(_in3, _in2); \
- _tmp3 = __lasx_xvilvh_d(_in3, _in2); \
- _out0 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x20); \
- _out2 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x31); \
- _out1 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x20); \
- _out3 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x31); \
-}
+#define LASX_TRANSPOSE4x4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, \
+ _out3) \
+ { \
+ __m256i _tmp0, _tmp1, _tmp2, _tmp3; \
+ _tmp0 = __lasx_xvilvl_d(_in1, _in0); \
+ _tmp1 = __lasx_xvilvh_d(_in1, _in0); \
+ _tmp2 = __lasx_xvilvl_d(_in3, _in2); \
+ _tmp3 = __lasx_xvilvh_d(_in3, _in2); \
+ _out0 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x20); \
+ _out2 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x31); \
+ _out1 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x20); \
+ _out3 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x31); \
+ }
/*
* =============================================================================
* Description : Transpose 8x8 block with word elements in vectors
* Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7
- * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7
- * Example : LASX_TRANSPOSE8x8_W
- * _in0 : 1,2,3,4,5,6,7,8
- * _in1 : 2,2,3,4,5,6,7,8
- * _in2 : 3,2,3,4,5,6,7,8
- * _in3 : 4,2,3,4,5,6,7,8
- * _in4 : 5,2,3,4,5,6,7,8
- * _in5 : 6,2,3,4,5,6,7,8
- * _in6 : 7,2,3,4,5,6,7,8
- * _in7 : 8,2,3,4,5,6,7,8
+ * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6,
+ * _out7 Example : LASX_TRANSPOSE8x8_W _in0 : 1,2,3,4,5,6,7,8 _in1 :
+ * 2,2,3,4,5,6,7,8 _in2 : 3,2,3,4,5,6,7,8 _in3 : 4,2,3,4,5,6,7,8 _in4 :
+ * 5,2,3,4,5,6,7,8 _in5 : 6,2,3,4,5,6,7,8 _in6 : 7,2,3,4,5,6,7,8 _in7 :
+ * 8,2,3,4,5,6,7,8
*
* _out0 : 1,2,3,4,5,6,7,8
* _out1 : 2,2,2,2,2,2,2,2
@@ -1418,38 +1404,39 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out7 : 8,8,8,8,8,8,8,8
* =============================================================================
*/
-#define LASX_TRANSPOSE8x8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
-{ \
- __m256i _s0_m, _s1_m; \
- __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
- \
- _s0_m = __lasx_xvilvl_w(_in2, _in0); \
- _s1_m = __lasx_xvilvl_w(_in3, _in1); \
- _tmp0_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
- _tmp1_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
- _s0_m = __lasx_xvilvh_w(_in2, _in0); \
- _s1_m = __lasx_xvilvh_w(_in3, _in1); \
- _tmp2_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
- _tmp3_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
- _s0_m = __lasx_xvilvl_w(_in6, _in4); \
- _s1_m = __lasx_xvilvl_w(_in7, _in5); \
- _tmp4_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
- _tmp5_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
- _s0_m = __lasx_xvilvh_w(_in6, _in4); \
- _s1_m = __lasx_xvilvh_w(_in7, _in5); \
- _tmp6_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
- _tmp7_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
- _out0 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x20); \
- _out1 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x20); \
- _out2 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x20); \
- _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x20); \
- _out4 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x31); \
- _out5 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x31); \
- _out6 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x31); \
- _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x31); \
-}
+#define LASX_TRANSPOSE8x8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ __m256i _s0_m, _s1_m; \
+ __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
+ \
+ _s0_m = __lasx_xvilvl_w(_in2, _in0); \
+ _s1_m = __lasx_xvilvl_w(_in3, _in1); \
+ _tmp0_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
+ _tmp1_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
+ _s0_m = __lasx_xvilvh_w(_in2, _in0); \
+ _s1_m = __lasx_xvilvh_w(_in3, _in1); \
+ _tmp2_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
+ _tmp3_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
+ _s0_m = __lasx_xvilvl_w(_in6, _in4); \
+ _s1_m = __lasx_xvilvl_w(_in7, _in5); \
+ _tmp4_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
+ _tmp5_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
+ _s0_m = __lasx_xvilvh_w(_in6, _in4); \
+ _s1_m = __lasx_xvilvh_w(_in7, _in5); \
+ _tmp6_m = __lasx_xvilvl_w(_s1_m, _s0_m); \
+ _tmp7_m = __lasx_xvilvh_w(_s1_m, _s0_m); \
+ _out0 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x20); \
+ _out1 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x20); \
+ _out2 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x20); \
+ _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x20); \
+ _out4 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x31); \
+ _out5 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x31); \
+ _out6 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x31); \
+ _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x31); \
+ }
/*
* =============================================================================
@@ -1457,52 +1444,52 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7,
* _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15
* (input 16x8 byte block)
- * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7
- * (output 8x16 byte block)
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : See LASX_TRANSPOSE16x8_H
+ * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6,
+ * _out7 (output 8x16 byte block) Details : The rows of the matrix become
+ * columns, and the columns become rows. Example : See LASX_TRANSPOSE16x8_H
* =============================================================================
*/
-#define LASX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
-{ \
- __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
- \
- _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \
- _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \
- _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \
- _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \
- _tmp4_m = __lasx_xvilvl_b(_in10, _in8); \
- _tmp5_m = __lasx_xvilvl_b(_in11, _in9); \
- _tmp6_m = __lasx_xvilvl_b(_in14, _in12); \
- _tmp7_m = __lasx_xvilvl_b(_in15, _in13); \
- _out0 = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \
- _out1 = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \
- _out2 = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \
- _out3 = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \
- _out4 = __lasx_xvilvl_b(_tmp5_m, _tmp4_m); \
- _out5 = __lasx_xvilvh_b(_tmp5_m, _tmp4_m); \
- _out6 = __lasx_xvilvl_b(_tmp7_m, _tmp6_m); \
- _out7 = __lasx_xvilvh_b(_tmp7_m, _tmp6_m); \
- _tmp0_m = __lasx_xvilvl_w(_out2, _out0); \
- _tmp2_m = __lasx_xvilvh_w(_out2, _out0); \
- _tmp4_m = __lasx_xvilvl_w(_out3, _out1); \
- _tmp6_m = __lasx_xvilvh_w(_out3, _out1); \
- _tmp1_m = __lasx_xvilvl_w(_out6, _out4); \
- _tmp3_m = __lasx_xvilvh_w(_out6, _out4); \
- _tmp5_m = __lasx_xvilvl_w(_out7, _out5); \
- _tmp7_m = __lasx_xvilvh_w(_out7, _out5); \
- _out0 = __lasx_xvilvl_d(_tmp1_m, _tmp0_m); \
- _out1 = __lasx_xvilvh_d(_tmp1_m, _tmp0_m); \
- _out2 = __lasx_xvilvl_d(_tmp3_m, _tmp2_m); \
- _out3 = __lasx_xvilvh_d(_tmp3_m, _tmp2_m); \
- _out4 = __lasx_xvilvl_d(_tmp5_m, _tmp4_m); \
- _out5 = __lasx_xvilvh_d(_tmp5_m, _tmp4_m); \
- _out6 = __lasx_xvilvl_d(_tmp7_m, _tmp6_m); \
- _out7 = __lasx_xvilvh_d(_tmp7_m, _tmp6_m); \
-}
+#define LASX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _in8, _in9, _in10, _in11, _in12, _in13, _in14, \
+ _in15, _out0, _out1, _out2, _out3, _out4, _out5, \
+ _out6, _out7) \
+ { \
+ __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
+ \
+ _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \
+ _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \
+ _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \
+ _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \
+ _tmp4_m = __lasx_xvilvl_b(_in10, _in8); \
+ _tmp5_m = __lasx_xvilvl_b(_in11, _in9); \
+ _tmp6_m = __lasx_xvilvl_b(_in14, _in12); \
+ _tmp7_m = __lasx_xvilvl_b(_in15, _in13); \
+ _out0 = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \
+ _out1 = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \
+ _out2 = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \
+ _out3 = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \
+ _out4 = __lasx_xvilvl_b(_tmp5_m, _tmp4_m); \
+ _out5 = __lasx_xvilvh_b(_tmp5_m, _tmp4_m); \
+ _out6 = __lasx_xvilvl_b(_tmp7_m, _tmp6_m); \
+ _out7 = __lasx_xvilvh_b(_tmp7_m, _tmp6_m); \
+ _tmp0_m = __lasx_xvilvl_w(_out2, _out0); \
+ _tmp2_m = __lasx_xvilvh_w(_out2, _out0); \
+ _tmp4_m = __lasx_xvilvl_w(_out3, _out1); \
+ _tmp6_m = __lasx_xvilvh_w(_out3, _out1); \
+ _tmp1_m = __lasx_xvilvl_w(_out6, _out4); \
+ _tmp3_m = __lasx_xvilvh_w(_out6, _out4); \
+ _tmp5_m = __lasx_xvilvl_w(_out7, _out5); \
+ _tmp7_m = __lasx_xvilvh_w(_out7, _out5); \
+ _out0 = __lasx_xvilvl_d(_tmp1_m, _tmp0_m); \
+ _out1 = __lasx_xvilvh_d(_tmp1_m, _tmp0_m); \
+ _out2 = __lasx_xvilvl_d(_tmp3_m, _tmp2_m); \
+ _out3 = __lasx_xvilvh_d(_tmp3_m, _tmp2_m); \
+ _out4 = __lasx_xvilvl_d(_tmp5_m, _tmp4_m); \
+ _out5 = __lasx_xvilvh_d(_tmp5_m, _tmp4_m); \
+ _out6 = __lasx_xvilvl_d(_tmp7_m, _tmp6_m); \
+ _out7 = __lasx_xvilvh_d(_tmp7_m, _tmp6_m); \
+ }
/*
* =============================================================================
@@ -1510,20 +1497,14 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7,
* _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15
* (input 16x8 byte block)
- * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7
- * (output 8x16 byte block)
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : LASX_TRANSPOSE16x8_H
- * _in0 : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in1 : 2,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in2 : 3,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in3 : 4,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in4 : 5,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in5 : 6,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in6 : 7,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in7 : 8,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in8 : 9,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
- * _in9 : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
+ * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6,
+ * _out7 (output 8x16 byte block) Details : The rows of the matrix become
+ * columns, and the columns become rows. Example : LASX_TRANSPOSE16x8_H _in0
+ * : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in1 : 2,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in2
+ * : 3,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in3 : 4,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in4
+ * : 5,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in5 : 6,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in6
+ * : 7,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in7 : 8,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in8
+ * : 9,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 _in9 : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
* _in10 : 0,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
* _in11 : 2,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
* _in12 : 3,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0
@@ -1541,72 +1522,73 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out7 : 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
* =============================================================================
*/
-#define LASX_TRANSPOSE16x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
- { \
- __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
- __m256i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
- \
- _tmp0_m = __lasx_xvilvl_h(_in2, _in0); \
- _tmp1_m = __lasx_xvilvl_h(_in3, _in1); \
- _tmp2_m = __lasx_xvilvl_h(_in6, _in4); \
- _tmp3_m = __lasx_xvilvl_h(_in7, _in5); \
- _tmp4_m = __lasx_xvilvl_h(_in10, _in8); \
- _tmp5_m = __lasx_xvilvl_h(_in11, _in9); \
- _tmp6_m = __lasx_xvilvl_h(_in14, _in12); \
- _tmp7_m = __lasx_xvilvl_h(_in15, _in13); \
- _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \
- _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \
- _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \
- _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \
- _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \
- _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \
- _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \
- _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \
- _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \
- _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \
- _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \
- _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \
- _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \
- _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \
- _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \
- _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \
- _out0 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \
- _out1 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \
- _out2 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \
- _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \
- \
- _tmp0_m = __lasx_xvilvh_h(_in2, _in0); \
- _tmp1_m = __lasx_xvilvh_h(_in3, _in1); \
- _tmp2_m = __lasx_xvilvh_h(_in6, _in4); \
- _tmp3_m = __lasx_xvilvh_h(_in7, _in5); \
- _tmp4_m = __lasx_xvilvh_h(_in10, _in8); \
- _tmp5_m = __lasx_xvilvh_h(_in11, _in9); \
- _tmp6_m = __lasx_xvilvh_h(_in14, _in12); \
- _tmp7_m = __lasx_xvilvh_h(_in15, _in13); \
- _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \
- _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \
- _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \
- _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \
- _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \
- _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \
- _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \
- _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \
- _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \
- _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \
- _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \
- _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \
- _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \
- _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \
- _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \
- _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \
- _out4 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \
- _out5 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \
- _out6 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \
- _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \
-}
+#define LASX_TRANSPOSE16x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _in8, _in9, _in10, _in11, _in12, _in13, _in14, \
+ _in15, _out0, _out1, _out2, _out3, _out4, _out5, \
+ _out6, _out7) \
+ { \
+ __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
+ __m256i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \
+ \
+ _tmp0_m = __lasx_xvilvl_h(_in2, _in0); \
+ _tmp1_m = __lasx_xvilvl_h(_in3, _in1); \
+ _tmp2_m = __lasx_xvilvl_h(_in6, _in4); \
+ _tmp3_m = __lasx_xvilvl_h(_in7, _in5); \
+ _tmp4_m = __lasx_xvilvl_h(_in10, _in8); \
+ _tmp5_m = __lasx_xvilvl_h(_in11, _in9); \
+ _tmp6_m = __lasx_xvilvl_h(_in14, _in12); \
+ _tmp7_m = __lasx_xvilvl_h(_in15, _in13); \
+ _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \
+ _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \
+ _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \
+ _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \
+ _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \
+ _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \
+ _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \
+ _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \
+ _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \
+ _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \
+ _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \
+ _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \
+ _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \
+ _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \
+ _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \
+ _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \
+ _out0 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \
+ _out1 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \
+ _out2 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \
+ _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \
+ \
+ _tmp0_m = __lasx_xvilvh_h(_in2, _in0); \
+ _tmp1_m = __lasx_xvilvh_h(_in3, _in1); \
+ _tmp2_m = __lasx_xvilvh_h(_in6, _in4); \
+ _tmp3_m = __lasx_xvilvh_h(_in7, _in5); \
+ _tmp4_m = __lasx_xvilvh_h(_in10, _in8); \
+ _tmp5_m = __lasx_xvilvh_h(_in11, _in9); \
+ _tmp6_m = __lasx_xvilvh_h(_in14, _in12); \
+ _tmp7_m = __lasx_xvilvh_h(_in15, _in13); \
+ _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \
+ _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \
+ _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \
+ _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \
+ _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \
+ _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \
+ _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \
+ _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \
+ _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \
+ _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \
+ _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \
+ _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \
+ _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \
+ _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \
+ _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \
+ _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \
+ _out4 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \
+ _out5 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \
+ _out6 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \
+ _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \
+ }
/*
* =============================================================================
@@ -1614,70 +1596,67 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* Arguments : Inputs - _in0, _in1, _in2, _in3
* Outputs - _out0, _out1, _out2, _out3
* Return Type - signed halfword
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : See LASX_TRANSPOSE8x8_H
+ * Details : The rows of the matrix become columns, and the columns become
+ * rows. Example : See LASX_TRANSPOSE8x8_H
* =============================================================================
*/
-#define LASX_TRANSPOSE4x4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- __m256i _s0_m, _s1_m; \
- \
- _s0_m = __lasx_xvilvl_h(_in1, _in0); \
- _s1_m = __lasx_xvilvl_h(_in3, _in2); \
- _out0 = __lasx_xvilvl_w(_s1_m, _s0_m); \
- _out2 = __lasx_xvilvh_w(_s1_m, _s0_m); \
- _out1 = __lasx_xvilvh_d(_out0, _out0); \
- _out3 = __lasx_xvilvh_d(_out2, _out2); \
-}
+#define LASX_TRANSPOSE4x4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, \
+ _out3) \
+ { \
+ __m256i _s0_m, _s1_m; \
+ \
+ _s0_m = __lasx_xvilvl_h(_in1, _in0); \
+ _s1_m = __lasx_xvilvl_h(_in3, _in2); \
+ _out0 = __lasx_xvilvl_w(_s1_m, _s0_m); \
+ _out2 = __lasx_xvilvh_w(_s1_m, _s0_m); \
+ _out1 = __lasx_xvilvh_d(_out0, _out0); \
+ _out3 = __lasx_xvilvh_d(_out2, _out2); \
+ }
/*
* =============================================================================
* Description : Transpose input 8x8 byte block
* Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7
* (input 8x8 byte block)
- * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7
- * (output 8x8 byte block)
- * Example : See LASX_TRANSPOSE8x8_H
+ * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6,
+ * _out7 (output 8x8 byte block) Example : See LASX_TRANSPOSE8x8_H
* =============================================================================
*/
-#define LASX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, _out0, \
- _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
-{ \
- __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
- _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \
- _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \
- _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \
- _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \
- _tmp4_m = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \
- _tmp5_m = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \
- _tmp6_m = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \
- _tmp7_m = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \
- _out0 = __lasx_xvilvl_w(_tmp6_m, _tmp4_m); \
- _out2 = __lasx_xvilvh_w(_tmp6_m, _tmp4_m); \
- _out4 = __lasx_xvilvl_w(_tmp7_m, _tmp5_m); \
- _out6 = __lasx_xvilvh_w(_tmp7_m, _tmp5_m); \
- _out1 = __lasx_xvbsrl_v(_out0, 8); \
- _out3 = __lasx_xvbsrl_v(_out2, 8); \
- _out5 = __lasx_xvbsrl_v(_out4, 8); \
- _out7 = __lasx_xvbsrl_v(_out6, 8); \
-}
+#define LASX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
+ _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \
+ _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \
+ _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \
+ _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \
+ _tmp4_m = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \
+ _tmp5_m = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \
+ _tmp6_m = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \
+ _tmp7_m = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \
+ _out0 = __lasx_xvilvl_w(_tmp6_m, _tmp4_m); \
+ _out2 = __lasx_xvilvh_w(_tmp6_m, _tmp4_m); \
+ _out4 = __lasx_xvilvl_w(_tmp7_m, _tmp5_m); \
+ _out6 = __lasx_xvilvh_w(_tmp7_m, _tmp5_m); \
+ _out1 = __lasx_xvbsrl_v(_out0, 8); \
+ _out3 = __lasx_xvbsrl_v(_out2, 8); \
+ _out5 = __lasx_xvbsrl_v(_out4, 8); \
+ _out7 = __lasx_xvbsrl_v(_out6, 8); \
+ }
/*
* =============================================================================
* Description : Transpose 8x8 block with halfword elements in vectors.
* Arguments : Inputs - _in0, _in1, ~
* Outputs - _out0, _out1, ~
- * Details : The rows of the matrix become columns, and the columns become rows.
- * Example : LASX_TRANSPOSE8x8_H
- * _in0 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8
- * _in1 : 8,2,3,4, 5,6,7,8, 8,2,3,4, 5,6,7,8
- * _in2 : 8,2,3,4, 5,6,7,8, 8,2,3,4, 5,6,7,8
- * _in3 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8
- * _in4 : 9,2,3,4, 5,6,7,8, 9,2,3,4, 5,6,7,8
- * _in5 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8
- * _in6 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8
- * _in7 : 9,2,3,4, 5,6,7,8, 9,2,3,4, 5,6,7,8
+ * Details : The rows of the matrix become columns, and the columns become
+ * rows. Example : LASX_TRANSPOSE8x8_H _in0 : 1,2,3,4, 5,6,7,8, 1,2,3,4,
+ * 5,6,7,8 _in1 : 8,2,3,4, 5,6,7,8, 8,2,3,4, 5,6,7,8 _in2 : 8,2,3,4, 5,6,7,8,
+ * 8,2,3,4, 5,6,7,8 _in3 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 _in4 : 9,2,3,4,
+ * 5,6,7,8, 9,2,3,4, 5,6,7,8 _in5 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 _in6 :
+ * 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 _in7 : 9,2,3,4, 5,6,7,8, 9,2,3,4, 5,6,7,8
*
* _out0 : 1,8,8,1, 9,1,1,9, 1,8,8,1, 9,1,1,9
* _out1 : 2,2,2,2, 2,2,2,2, 2,2,2,2, 2,2,2,2
@@ -1689,40 +1668,41 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out7 : 8,8,8,8, 8,8,8,8, 8,8,8,8, 8,8,8,8
* =============================================================================
*/
-#define LASX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, _out0, \
- _out1, _out2, _out3, _out4, _out5, _out6, _out7) \
-{ \
- __m256i _s0_m, _s1_m; \
- __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
- __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
- \
- _s0_m = __lasx_xvilvl_h(_in6, _in4); \
- _s1_m = __lasx_xvilvl_h(_in7, _in5); \
- _tmp0_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
- _tmp1_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
- _s0_m = __lasx_xvilvh_h(_in6, _in4); \
- _s1_m = __lasx_xvilvh_h(_in7, _in5); \
- _tmp2_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
- _tmp3_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
- \
- _s0_m = __lasx_xvilvl_h(_in2, _in0); \
- _s1_m = __lasx_xvilvl_h(_in3, _in1); \
- _tmp4_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
- _tmp5_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
- _s0_m = __lasx_xvilvh_h(_in2, _in0); \
- _s1_m = __lasx_xvilvh_h(_in3, _in1); \
- _tmp6_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
- _tmp7_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
- \
- _out0 = __lasx_xvpickev_d(_tmp0_m, _tmp4_m); \
- _out2 = __lasx_xvpickev_d(_tmp1_m, _tmp5_m); \
- _out4 = __lasx_xvpickev_d(_tmp2_m, _tmp6_m); \
- _out6 = __lasx_xvpickev_d(_tmp3_m, _tmp7_m); \
- _out1 = __lasx_xvpickod_d(_tmp0_m, _tmp4_m); \
- _out3 = __lasx_xvpickod_d(_tmp1_m, _tmp5_m); \
- _out5 = __lasx_xvpickod_d(_tmp2_m, _tmp6_m); \
- _out7 = __lasx_xvpickod_d(_tmp3_m, _tmp7_m); \
-}
+#define LASX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ __m256i _s0_m, _s1_m; \
+ __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \
+ __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \
+ \
+ _s0_m = __lasx_xvilvl_h(_in6, _in4); \
+ _s1_m = __lasx_xvilvl_h(_in7, _in5); \
+ _tmp0_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
+ _tmp1_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
+ _s0_m = __lasx_xvilvh_h(_in6, _in4); \
+ _s1_m = __lasx_xvilvh_h(_in7, _in5); \
+ _tmp2_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
+ _tmp3_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
+ \
+ _s0_m = __lasx_xvilvl_h(_in2, _in0); \
+ _s1_m = __lasx_xvilvl_h(_in3, _in1); \
+ _tmp4_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
+ _tmp5_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
+ _s0_m = __lasx_xvilvh_h(_in2, _in0); \
+ _s1_m = __lasx_xvilvh_h(_in3, _in1); \
+ _tmp6_m = __lasx_xvilvl_h(_s1_m, _s0_m); \
+ _tmp7_m = __lasx_xvilvh_h(_s1_m, _s0_m); \
+ \
+ _out0 = __lasx_xvpickev_d(_tmp0_m, _tmp4_m); \
+ _out2 = __lasx_xvpickev_d(_tmp1_m, _tmp5_m); \
+ _out4 = __lasx_xvpickev_d(_tmp2_m, _tmp6_m); \
+ _out6 = __lasx_xvpickev_d(_tmp3_m, _tmp7_m); \
+ _out1 = __lasx_xvpickod_d(_tmp0_m, _tmp4_m); \
+ _out3 = __lasx_xvpickod_d(_tmp1_m, _tmp5_m); \
+ _out5 = __lasx_xvpickod_d(_tmp2_m, _tmp6_m); \
+ _out7 = __lasx_xvpickod_d(_tmp3_m, _tmp7_m); \
+ }
/*
* =============================================================================
@@ -1737,34 +1717,34 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out3 = _in0 - _in3;
* =============================================================================
*/
-#define LASX_BUTTERFLY_4_B(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- _out0 = __lasx_xvadd_b(_in0, _in3); \
- _out1 = __lasx_xvadd_b(_in1, _in2); \
- _out2 = __lasx_xvsub_b(_in1, _in2); \
- _out3 = __lasx_xvsub_b(_in0, _in3); \
-}
-#define LASX_BUTTERFLY_4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- _out0 = __lasx_xvadd_h(_in0, _in3); \
- _out1 = __lasx_xvadd_h(_in1, _in2); \
- _out2 = __lasx_xvsub_h(_in1, _in2); \
- _out3 = __lasx_xvsub_h(_in0, _in3); \
-}
-#define LASX_BUTTERFLY_4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- _out0 = __lasx_xvadd_w(_in0, _in3); \
- _out1 = __lasx_xvadd_w(_in1, _in2); \
- _out2 = __lasx_xvsub_w(_in1, _in2); \
- _out3 = __lasx_xvsub_w(_in0, _in3); \
-}
-#define LASX_BUTTERFLY_4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
-{ \
- _out0 = __lasx_xvadd_d(_in0, _in3); \
- _out1 = __lasx_xvadd_d(_in1, _in2); \
- _out2 = __lasx_xvsub_d(_in1, _in2); \
- _out3 = __lasx_xvsub_d(_in0, _in3); \
-}
+#define LASX_BUTTERFLY_4_B(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
+ { \
+ _out0 = __lasx_xvadd_b(_in0, _in3); \
+ _out1 = __lasx_xvadd_b(_in1, _in2); \
+ _out2 = __lasx_xvsub_b(_in1, _in2); \
+ _out3 = __lasx_xvsub_b(_in0, _in3); \
+ }
+#define LASX_BUTTERFLY_4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
+ { \
+ _out0 = __lasx_xvadd_h(_in0, _in3); \
+ _out1 = __lasx_xvadd_h(_in1, _in2); \
+ _out2 = __lasx_xvsub_h(_in1, _in2); \
+ _out3 = __lasx_xvsub_h(_in0, _in3); \
+ }
+#define LASX_BUTTERFLY_4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
+ { \
+ _out0 = __lasx_xvadd_w(_in0, _in3); \
+ _out1 = __lasx_xvadd_w(_in1, _in2); \
+ _out2 = __lasx_xvsub_w(_in1, _in2); \
+ _out3 = __lasx_xvsub_w(_in0, _in3); \
+ }
+#define LASX_BUTTERFLY_4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \
+ { \
+ _out0 = __lasx_xvadd_d(_in0, _in3); \
+ _out1 = __lasx_xvadd_d(_in1, _in2); \
+ _out2 = __lasx_xvsub_d(_in1, _in2); \
+ _out3 = __lasx_xvsub_d(_in0, _in3); \
+ }
/*
* =============================================================================
@@ -1783,59 +1763,63 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* _out7 = _in0 - _in7;
* =============================================================================
*/
-#define LASX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lasx_xvadd_b(_in0, _in7); \
- _out1 = __lasx_xvadd_b(_in1, _in6); \
- _out2 = __lasx_xvadd_b(_in2, _in5); \
- _out3 = __lasx_xvadd_b(_in3, _in4); \
- _out4 = __lasx_xvsub_b(_in3, _in4); \
- _out5 = __lasx_xvsub_b(_in2, _in5); \
- _out6 = __lasx_xvsub_b(_in1, _in6); \
- _out7 = __lasx_xvsub_b(_in0, _in7); \
-}
-
-#define LASX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lasx_xvadd_h(_in0, _in7); \
- _out1 = __lasx_xvadd_h(_in1, _in6); \
- _out2 = __lasx_xvadd_h(_in2, _in5); \
- _out3 = __lasx_xvadd_h(_in3, _in4); \
- _out4 = __lasx_xvsub_h(_in3, _in4); \
- _out5 = __lasx_xvsub_h(_in2, _in5); \
- _out6 = __lasx_xvsub_h(_in1, _in6); \
- _out7 = __lasx_xvsub_h(_in0, _in7); \
-}
-
-#define LASX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lasx_xvadd_w(_in0, _in7); \
- _out1 = __lasx_xvadd_w(_in1, _in6); \
- _out2 = __lasx_xvadd_w(_in2, _in5); \
- _out3 = __lasx_xvadd_w(_in3, _in4); \
- _out4 = __lasx_xvsub_w(_in3, _in4); \
- _out5 = __lasx_xvsub_w(_in2, _in5); \
- _out6 = __lasx_xvsub_w(_in1, _in6); \
- _out7 = __lasx_xvsub_w(_in0, _in7); \
-}
-
-#define LASX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
- _out0, _out1, _out2, _out3, _out4, _out5, _out6, _out7)\
-{ \
- _out0 = __lasx_xvadd_d(_in0, _in7); \
- _out1 = __lasx_xvadd_d(_in1, _in6); \
- _out2 = __lasx_xvadd_d(_in2, _in5); \
- _out3 = __lasx_xvadd_d(_in3, _in4); \
- _out4 = __lasx_xvsub_d(_in3, _in4); \
- _out5 = __lasx_xvsub_d(_in2, _in5); \
- _out6 = __lasx_xvsub_d(_in1, _in6); \
- _out7 = __lasx_xvsub_d(_in0, _in7); \
-}
-
-#endif //LASX
+#define LASX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lasx_xvadd_b(_in0, _in7); \
+ _out1 = __lasx_xvadd_b(_in1, _in6); \
+ _out2 = __lasx_xvadd_b(_in2, _in5); \
+ _out3 = __lasx_xvadd_b(_in3, _in4); \
+ _out4 = __lasx_xvsub_b(_in3, _in4); \
+ _out5 = __lasx_xvsub_b(_in2, _in5); \
+ _out6 = __lasx_xvsub_b(_in1, _in6); \
+ _out7 = __lasx_xvsub_b(_in0, _in7); \
+ }
+
+#define LASX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lasx_xvadd_h(_in0, _in7); \
+ _out1 = __lasx_xvadd_h(_in1, _in6); \
+ _out2 = __lasx_xvadd_h(_in2, _in5); \
+ _out3 = __lasx_xvadd_h(_in3, _in4); \
+ _out4 = __lasx_xvsub_h(_in3, _in4); \
+ _out5 = __lasx_xvsub_h(_in2, _in5); \
+ _out6 = __lasx_xvsub_h(_in1, _in6); \
+ _out7 = __lasx_xvsub_h(_in0, _in7); \
+ }
+
+#define LASX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lasx_xvadd_w(_in0, _in7); \
+ _out1 = __lasx_xvadd_w(_in1, _in6); \
+ _out2 = __lasx_xvadd_w(_in2, _in5); \
+ _out3 = __lasx_xvadd_w(_in3, _in4); \
+ _out4 = __lasx_xvsub_w(_in3, _in4); \
+ _out5 = __lasx_xvsub_w(_in2, _in5); \
+ _out6 = __lasx_xvsub_w(_in1, _in6); \
+ _out7 = __lasx_xvsub_w(_in0, _in7); \
+ }
+
+#define LASX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \
+ _out0, _out1, _out2, _out3, _out4, _out5, _out6, \
+ _out7) \
+ { \
+ _out0 = __lasx_xvadd_d(_in0, _in7); \
+ _out1 = __lasx_xvadd_d(_in1, _in6); \
+ _out2 = __lasx_xvadd_d(_in2, _in5); \
+ _out3 = __lasx_xvadd_d(_in3, _in4); \
+ _out4 = __lasx_xvsub_d(_in3, _in4); \
+ _out5 = __lasx_xvsub_d(_in2, _in5); \
+ _out6 = __lasx_xvsub_d(_in1, _in6); \
+ _out7 = __lasx_xvsub_d(_in0, _in7); \
+ }
+
+#endif // LASX
/*
* =============================================================================
@@ -1848,15 +1832,15 @@ static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx)
* VP:1,2,3,4,
* =============================================================================
*/
-#define VECT_PRINT(RTYPE, element_num, in0, enter) \
-{ \
- RTYPE _tmp0 = (RTYPE)in0; \
- int _i = 0; \
- if (enter) \
- printf("\nVP:"); \
- for(_i = 0; _i < element_num; _i++) \
- printf("%d,",_tmp0[_i]); \
-}
+#define VECT_PRINT(RTYPE, element_num, in0, enter) \
+ { \
+ RTYPE _tmp0 = (RTYPE)in0; \
+ int _i = 0; \
+ if (enter) \
+ printf("\nVP:"); \
+ for (_i = 0; _i < element_num; _i++) \
+ printf("%d,", _tmp0[_i]); \
+ }
#endif /* LOONGSON_INTRINSICS_H */
#endif /* INCLUDE_LIBYUV_LOONGSON_INTRINSICS_H */
diff --git a/include/libyuv/planar_functions.h b/include/libyuv/planar_functions.h
index def773cb..47f3446a 100644
--- a/include/libyuv/planar_functions.h
+++ b/include/libyuv/planar_functions.h
@@ -83,6 +83,16 @@ void SetPlane(uint8_t* dst_y,
int height,
uint32_t value);
+// Convert a plane of tiles of 16 x H to linear.
+LIBYUV_API
+void DetilePlane(const uint8_t* src_y,
+ int src_stride_y,
+ uint8_t* dst_y,
+ int dst_stride_y,
+ int width,
+ int height,
+ int tile_height);
+
// Split interleaved UV plane into separate U and V planes.
LIBYUV_API
void SplitUVPlane(const uint8_t* src_uv,
diff --git a/include/libyuv/row.h b/include/libyuv/row.h
index 1cc40298..12e53dbe 100644
--- a/include/libyuv/row.h
+++ b/include/libyuv/row.h
@@ -400,8 +400,8 @@ extern "C" {
// The following are available for AVX512 clang x64 platforms:
// TODO(fbarchard): Port to x86
-#if !defined(LIBYUV_DISABLE_X86) && \
- defined(__x86_64__) && (defined(CLANG_HAS_AVX512))
+#if !defined(LIBYUV_DISABLE_X86) && defined(__x86_64__) && \
+ (defined(CLANG_HAS_AVX512))
#define HAS_I422TOARGBROW_AVX512BW
#endif
@@ -536,7 +536,7 @@ extern "C" {
#define HAS_SCALESUMSAMPLES_NEON
#define HAS_GAUSSROW_F32_NEON
#define HAS_GAUSSCOL_F32_NEON
-
+#define HAS_DETILEROW_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_ABGRTOUVROW_MSA
@@ -1768,7 +1768,9 @@ void ARGBMirrorRow_Any_NEON(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int width);
void ARGBMirrorRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width);
-void ARGBMirrorRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width);
+void ARGBMirrorRow_Any_LASX(const uint8_t* src_ptr,
+ uint8_t* dst_ptr,
+ int width);
void RGB24MirrorRow_SSSE3(const uint8_t* src_rgb24,
uint8_t* dst_rgb24,
@@ -1828,7 +1830,15 @@ void SplitUVRow_Any_LSX(const uint8_t* src_ptr,
uint8_t* dst_u,
uint8_t* dst_v,
int width);
+void DetileRow_C(const uint8_t* src,
+ ptrdiff_t src_tile_stride,
+ uint8_t* dst,
+ int width);
+void DetileRow_NEON(const uint8_t* src,
+ ptrdiff_t src_tile_stride,
+ uint8_t* dst,
+ int width);
void MergeUVRow_C(const uint8_t* src_u,
const uint8_t* src_v,
uint8_t* dst_uv,
@@ -2802,7 +2812,6 @@ void ARGBToARGB4444Row_LASX(const uint8_t* src_argb,
uint8_t* dst_rgb,
int width);
-
void ARGBToRGBARow_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
void ARGBToRGB24Row_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
void ARGBToRAWRow_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width);
@@ -4097,7 +4106,6 @@ void ARGBToARGB4444Row_Any_LASX(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int width);
-
void I444ToARGBRow_Any_NEON(const uint8_t* y_buf,
const uint8_t* u_buf,
const uint8_t* v_buf,
@@ -4878,7 +4886,6 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
int interval_offset,
int width);
-
void ARGBShadeRow_C(const uint8_t* src_argb,
uint8_t* dst_argb,
int width,
@@ -4912,7 +4919,6 @@ void ComputeCumulativeSumRow_SSE2(const uint8_t* row,
const int32_t* previous_cumsum,
int width);
-
void CumulativeSumToAverageRow_C(const int32_t* tl,
const int32_t* bl,
int w,
@@ -5259,7 +5265,6 @@ float ScaleSumSamples_NEON(const float* src,
void ScaleSamples_C(const float* src, float* dst, float scale, int width);
void ScaleSamples_NEON(const float* src, float* dst, float scale, int width);
-
void GaussRow_F32_NEON(const float* src, float* dst, int width);
void GaussRow_F32_C(const float* src, float* dst, int width);
diff --git a/include/libyuv/scale_row.h b/include/libyuv/scale_row.h
index 05d8406a..682b3342 100644
--- a/include/libyuv/scale_row.h
+++ b/include/libyuv/scale_row.h
@@ -1564,7 +1564,6 @@ void ScaleRowDown34_1_Box_Any_MSA(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int dst_width);
-
void ScaleRowDown2_LSX(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
diff --git a/include/libyuv/version.h b/include/libyuv/version.h
index 30e0a9dd..acb50f9e 100644
--- a/include/libyuv/version.h
+++ b/include/libyuv/version.h
@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
-#define LIBYUV_VERSION 1809
+#define LIBYUV_VERSION 1810
#endif // INCLUDE_LIBYUV_VERSION_H_ \ No newline at end of file
diff --git a/source/convert.cc b/source/convert.cc
index c7c48c62..b54f88b7 100644
--- a/source/convert.cc
+++ b/source/convert.cc
@@ -2448,8 +2448,7 @@ int RGB565ToI420(const uint8_t* src_rgb565,
}
}
// MSA version does direct RGB565 to YUV.
-#elif (defined(HAS_RGB565TOYROW_MSA) \
- || defined(HAS_RGB565TOYROW_LSX))
+#elif (defined(HAS_RGB565TOYROW_MSA) || defined(HAS_RGB565TOYROW_LSX))
#if defined(HAS_RGB565TOYROW_MSA) && defined(HAS_RGB565TOUVROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
RGB565ToUVRow = RGB565ToUVRow_Any_MSA;
diff --git a/source/convert_argb.cc b/source/convert_argb.cc
index 598aabbd..f23b5d11 100644
--- a/source/convert_argb.cc
+++ b/source/convert_argb.cc
@@ -90,7 +90,8 @@ int I420ToARGBMatrix(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
- if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
+ if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
+ (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
@@ -329,7 +330,8 @@ int I422ToARGBMatrix(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
- if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
+ if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
+ (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
@@ -5094,7 +5096,8 @@ int I420ToRGB565Dither(const uint8_t* src_y,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
- if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
+ if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
+ (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
diff --git a/source/cpu_id.cc b/source/cpu_id.cc
index 6f66446b..39744384 100644
--- a/source/cpu_id.cc
+++ b/source/cpu_id.cc
@@ -193,25 +193,21 @@ LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
// TODO(fbarchard): Consider read_loongarch_ir().
#define LOONGARCH_CFG2 0x2
-#define LOONGARCH_CFG2_LSX (1 << 6)
-#define LOONGARCH_CFG2_LASX (1 << 7)
+#define LOONGARCH_CFG2_LSX (1 << 6)
+#define LOONGARCH_CFG2_LASX (1 << 7)
#if defined(__loongarch__) && defined(__linux__)
LIBYUV_API SAFEBUFFERS int LoongarchCpuCaps(void) {
int flag = 0x0;
uint32_t cfg2 = 0;
- __asm__ volatile(
- "cpucfg %0, %1 \n\t"
- : "+&r"(cfg2)
- : "r"(LOONGARCH_CFG2)
- );
+ __asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(cfg2) : "r"(LOONGARCH_CFG2));
if (cfg2 & LOONGARCH_CFG2_LSX)
- flag |= kCpuHasLSX;
+ flag |= kCpuHasLSX;
if (cfg2 & LOONGARCH_CFG2_LASX)
- flag |= kCpuHasLASX;
+ flag |= kCpuHasLASX;
return flag;
}
#endif
diff --git a/source/planar_functions.cc b/source/planar_functions.cc
index 4147cfbb..d7cb8dc7 100644
--- a/source/planar_functions.cc
+++ b/source/planar_functions.cc
@@ -853,6 +853,53 @@ int NV21ToNV12(const uint8_t* src_y,
return 0;
}
+// Detile a plane of data
+// tile width is 16 and assumed.
+// tile_height is 16 or 32 for MM21.
+// src_stride_y is bytes per row of source ignoring tiling. e.g. 640
+// TODO: More detile row functions.
+
+LIBYUV_API
+void DetilePlane(const uint8_t* src_y,
+ int src_stride_y,
+ uint8_t* dst_y,
+ int dst_stride_y,
+ int width,
+ int height,
+ int tile_height) {
+ const ptrdiff_t src_tile_stride = 16 * tile_height;
+ int y;
+ void (*DetileRow)(const uint8_t* src, ptrdiff_t src_tile_stride, uint8_t* dst,
+ int width) = DetileRow_C;
+ assert(src_stride_y >= 0);
+ assert(tile_height > 0);
+ assert(src_stride_y > 0);
+
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_y = dst_y + (height - 1) * dst_stride_y;
+ dst_stride_y = -dst_stride_y;
+ }
+
+#if defined(HAS_DETILEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) {
+ DetileRow = DetileRow_NEON;
+ }
+#endif
+
+ // Detile plane
+ for (y = 0; y < height; ++y) {
+ DetileRow(src_y, src_tile_stride, dst_y, width);
+ dst_y += dst_stride_y;
+ src_y += 16;
+ // Advance to next row of tiles.
+ if ((y & (tile_height - 1)) == (tile_height - 1)) {
+ src_y = src_y - src_tile_stride + src_stride_y * tile_height;
+ }
+ }
+}
+
// Support function for NV12 etc RGB channels.
// Width and height are plane sizes (typically half pixel width).
LIBYUV_API
diff --git a/source/rotate_lsx.cc b/source/rotate_lsx.cc
index 0a288b28..94a2b91c 100644
--- a/source/rotate_lsx.cc
+++ b/source/rotate_lsx.cc
@@ -20,28 +20,28 @@ namespace libyuv {
extern "C" {
#endif
-#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \
- { \
- DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \
- DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \
+#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \
+ DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \
}
-#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \
- { \
- DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \
- DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \
+#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \
+ DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \
}
-#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \
- { \
- DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \
- DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \
+#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \
+ DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \
}
-#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \
- { \
- DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \
- DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \
+#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \
+ DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \
}
#define LSX_ST_4(_dst0, _dst1, _dst2, _dst3, _dst, _stride, _stride2, \
@@ -54,11 +54,11 @@ extern "C" {
_dst += _stride4; \
}
-#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \
- { \
- __lsx_vst(_dst0, _dst, 0); \
- __lsx_vstx(_dst1, _dst, _stride); \
- _dst += _stride2; \
+#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \
+ { \
+ __lsx_vst(_dst0, _dst, 0); \
+ __lsx_vstx(_dst1, _dst, _stride); \
+ _dst += _stride2; \
}
void TransposeWx16_C(const uint8_t* src,
@@ -84,7 +84,6 @@ void TransposeUVWx16_C(const uint8_t* src,
dst_stride_a, (dst_b + 8), dst_stride_b, width);
}
-
void TransposeWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
@@ -92,7 +91,7 @@ void TransposeWx16_LSX(const uint8_t* src,
int width) {
int x;
int len = width / 16;
- uint8_t *s;
+ uint8_t* s;
int src_stride2 = src_stride << 1;
int src_stride3 = src_stride + src_stride2;
int src_stride4 = src_stride2 << 1;
@@ -139,23 +138,23 @@ void TransposeWx16_LSX(const uint8_t* src,
res8 = __lsx_vilvl_w(reg4, reg0);
res9 = __lsx_vilvh_w(reg4, reg0);
ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
- LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
- dst_stride3, dst_stride4);
+ LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
+ dst_stride4);
res8 = __lsx_vilvl_w(reg5, reg1);
res9 = __lsx_vilvh_w(reg5, reg1);
ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
- LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
- dst_stride3, dst_stride4);
+ LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
+ dst_stride4);
res8 = __lsx_vilvl_w(reg6, reg2);
res9 = __lsx_vilvh_w(reg6, reg2);
ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
- LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
- dst_stride3, dst_stride4);
+ LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
+ dst_stride4);
res8 = __lsx_vilvl_w(reg7, reg3);
res9 = __lsx_vilvh_w(reg7, reg3);
ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
- LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2,
- dst_stride3, dst_stride4);
+ LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3,
+ dst_stride4);
src += 16;
}
}
diff --git a/source/row_common.cc b/source/row_common.cc
index 0f7aa820..84b395b6 100644
--- a/source/row_common.cc
+++ b/source/row_common.cc
@@ -2659,6 +2659,21 @@ void RGB24MirrorRow_C(const uint8_t* src_rgb24, uint8_t* dst_rgb24, int width) {
}
}
+void DetileRow_C(const uint8_t* src,
+ ptrdiff_t src_tile_stride,
+ uint8_t* dst,
+ int width) {
+ int x;
+ for (x = 0; x < width - 15; x += 16) {
+ memcpy(dst, src, 16);
+ dst += 16;
+ src += src_tile_stride;
+ }
+ if (width & 15) {
+ memcpy(dst, src, width & 15);
+ }
+}
+
void SplitUVRow_C(const uint8_t* src_uv,
uint8_t* dst_u,
uint8_t* dst_v,
diff --git a/source/row_lasx.cc b/source/row_lasx.cc
index b9c7cc16..0d43714a 100644
--- a/source/row_lasx.cc
+++ b/source/row_lasx.cc
@@ -23,178 +23,176 @@ extern "C" {
#define ALPHA_VAL (-1)
// Fill YUV -> RGB conversion constants into vectors
-#define YUVTORGB_SETUP(yuvconst, ubvr, ugvg, yg, yb) \
- { \
- __m256i ub, vr, ug, vg; \
- \
- ub = __lasx_xvreplgr2vr_h(yuvconst->kUVToB[0]); \
- vr = __lasx_xvreplgr2vr_h(yuvconst->kUVToR[1]); \
- ug = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[0]); \
- vg = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[1]); \
- yg = __lasx_xvreplgr2vr_h(yuvconst->kYToRgb[0]); \
- yb = __lasx_xvreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
- ubvr = __lasx_xvilvl_h(ub, vr); \
- ugvg = __lasx_xvilvl_h(ug, vg); \
+#define YUVTORGB_SETUP(yuvconst, ubvr, ugvg, yg, yb) \
+ { \
+ __m256i ub, vr, ug, vg; \
+ \
+ ub = __lasx_xvreplgr2vr_h(yuvconst->kUVToB[0]); \
+ vr = __lasx_xvreplgr2vr_h(yuvconst->kUVToR[1]); \
+ ug = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[0]); \
+ vg = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[1]); \
+ yg = __lasx_xvreplgr2vr_h(yuvconst->kYToRgb[0]); \
+ yb = __lasx_xvreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
+ ubvr = __lasx_xvilvl_h(ub, vr); \
+ ugvg = __lasx_xvilvl_h(ug, vg); \
}
// Load 32 YUV422 pixel data
-#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \
- { \
- __m256i temp0, temp1; \
- \
- DUP2_ARG2(__lasx_xvld, psrc_y, 0, psrc_u, 0, out_y, temp0); \
- temp1 = __lasx_xvld(psrc_v, 0); \
- temp0 = __lasx_xvsub_b(temp0, const_0x80); \
- temp1 = __lasx_xvsub_b(temp1, const_0x80); \
- temp0 = __lasx_vext2xv_h_b(temp0); \
- temp1 = __lasx_vext2xv_h_b(temp1); \
- uv_l = __lasx_xvilvl_h(temp0, temp1); \
- uv_h = __lasx_xvilvh_h(temp0, temp1); \
+#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \
+ { \
+ __m256i temp0, temp1; \
+ \
+ DUP2_ARG2(__lasx_xvld, psrc_y, 0, psrc_u, 0, out_y, temp0); \
+ temp1 = __lasx_xvld(psrc_v, 0); \
+ temp0 = __lasx_xvsub_b(temp0, const_0x80); \
+ temp1 = __lasx_xvsub_b(temp1, const_0x80); \
+ temp0 = __lasx_vext2xv_h_b(temp0); \
+ temp1 = __lasx_vext2xv_h_b(temp1); \
+ uv_l = __lasx_xvilvl_h(temp0, temp1); \
+ uv_h = __lasx_xvilvh_h(temp0, temp1); \
}
// Load 16 YUV422 pixel data
-#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \
- { \
- __m256i temp0, temp1; \
- \
- out_y = __lasx_xvld(psrc_y, 0); \
- temp0 = __lasx_xvldrepl_d(psrc_u, 0); \
- temp1 = __lasx_xvldrepl_d(psrc_v, 0); \
- uv = __lasx_xvilvl_b(temp0, temp1); \
- uv = __lasx_xvsub_b(uv, const_0x80); \
- uv = __lasx_vext2xv_h_b(uv); \
+#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \
+ { \
+ __m256i temp0, temp1; \
+ \
+ out_y = __lasx_xvld(psrc_y, 0); \
+ temp0 = __lasx_xvldrepl_d(psrc_u, 0); \
+ temp1 = __lasx_xvldrepl_d(psrc_v, 0); \
+ uv = __lasx_xvilvl_b(temp0, temp1); \
+ uv = __lasx_xvsub_b(uv, const_0x80); \
+ uv = __lasx_vext2xv_h_b(uv); \
}
// Convert 16 pixels of YUV420 to RGB.
-#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, \
- yg, yb, b_l, b_h, g_l, g_h, r_l, r_h) \
- { \
- __m256i u_l, u_h, v_l, v_h; \
- __m256i yl_ev, yl_od, yh_ev, yh_od; \
- __m256i temp0, temp1, temp2, temp3; \
- \
- temp0 = __lasx_xvilvl_b(in_y, in_y); \
- temp1 = __lasx_xvilvh_b(in_y, in_y); \
- yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
- yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
- yh_ev = __lasx_xvmulwev_w_hu_h(temp1, yg); \
- yh_od = __lasx_xvmulwod_w_hu_h(temp1, yg); \
- DUP4_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \
- yl_ev, yl_od, yh_ev, yh_od); \
- yl_ev = __lasx_xvadd_w(yl_ev, yb); \
- yl_od = __lasx_xvadd_w(yl_od, yb); \
- yh_ev = __lasx_xvadd_w(yh_ev, yb); \
- yh_od = __lasx_xvadd_w(yh_od, yb); \
- v_l = __lasx_xvmulwev_w_h(in_uvl, ubvr); \
- u_l = __lasx_xvmulwod_w_h(in_uvl, ubvr); \
- v_h = __lasx_xvmulwev_w_h(in_uvh, ubvr); \
- u_h = __lasx_xvmulwod_w_h(in_uvh, ubvr); \
- temp0 = __lasx_xvadd_w(yl_ev, u_l); \
- temp1 = __lasx_xvadd_w(yl_od, u_l); \
- temp2 = __lasx_xvadd_w(yh_ev, u_h); \
- temp3 = __lasx_xvadd_w(yh_od, u_h); \
- DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
- temp0, temp1, temp2, temp3); \
- DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
- temp0, temp1, temp2, temp3); \
- b_l = __lasx_xvpackev_h(temp1, temp0); \
- b_h = __lasx_xvpackev_h(temp3, temp2); \
- temp0 = __lasx_xvadd_w(yl_ev, v_l); \
- temp1 = __lasx_xvadd_w(yl_od, v_l); \
- temp2 = __lasx_xvadd_w(yh_ev, v_h); \
- temp3 = __lasx_xvadd_w(yh_od, v_h); \
- DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
- temp0, temp1, temp2, temp3); \
- DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
- temp0, temp1, temp2, temp3); \
- r_l = __lasx_xvpackev_h(temp1, temp0); \
- r_h = __lasx_xvpackev_h(temp3, temp2); \
- DUP2_ARG2(__lasx_xvdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \
- temp0 = __lasx_xvsub_w(yl_ev, u_l); \
- temp1 = __lasx_xvsub_w(yl_od, u_l); \
- temp2 = __lasx_xvsub_w(yh_ev, u_h); \
- temp3 = __lasx_xvsub_w(yh_od, u_h); \
- DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, \
- temp0, temp1, temp2, temp3); \
- DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, \
- temp0, temp1, temp2, temp3); \
- g_l = __lasx_xvpackev_h(temp1, temp0); \
- g_h = __lasx_xvpackev_h(temp3, temp2); \
+#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, yg, yb, b_l, b_h, g_l, \
+ g_h, r_l, r_h) \
+ { \
+ __m256i u_l, u_h, v_l, v_h; \
+ __m256i yl_ev, yl_od, yh_ev, yh_od; \
+ __m256i temp0, temp1, temp2, temp3; \
+ \
+ temp0 = __lasx_xvilvl_b(in_y, in_y); \
+ temp1 = __lasx_xvilvh_b(in_y, in_y); \
+ yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
+ yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
+ yh_ev = __lasx_xvmulwev_w_hu_h(temp1, yg); \
+ yh_od = __lasx_xvmulwod_w_hu_h(temp1, yg); \
+ DUP4_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \
+ yl_ev, yl_od, yh_ev, yh_od); \
+ yl_ev = __lasx_xvadd_w(yl_ev, yb); \
+ yl_od = __lasx_xvadd_w(yl_od, yb); \
+ yh_ev = __lasx_xvadd_w(yh_ev, yb); \
+ yh_od = __lasx_xvadd_w(yh_od, yb); \
+ v_l = __lasx_xvmulwev_w_h(in_uvl, ubvr); \
+ u_l = __lasx_xvmulwod_w_h(in_uvl, ubvr); \
+ v_h = __lasx_xvmulwev_w_h(in_uvh, ubvr); \
+ u_h = __lasx_xvmulwod_w_h(in_uvh, ubvr); \
+ temp0 = __lasx_xvadd_w(yl_ev, u_l); \
+ temp1 = __lasx_xvadd_w(yl_od, u_l); \
+ temp2 = __lasx_xvadd_w(yh_ev, u_h); \
+ temp3 = __lasx_xvadd_w(yh_od, u_h); \
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ b_l = __lasx_xvpackev_h(temp1, temp0); \
+ b_h = __lasx_xvpackev_h(temp3, temp2); \
+ temp0 = __lasx_xvadd_w(yl_ev, v_l); \
+ temp1 = __lasx_xvadd_w(yl_od, v_l); \
+ temp2 = __lasx_xvadd_w(yh_ev, v_h); \
+ temp3 = __lasx_xvadd_w(yh_od, v_h); \
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ r_l = __lasx_xvpackev_h(temp1, temp0); \
+ r_h = __lasx_xvpackev_h(temp3, temp2); \
+ DUP2_ARG2(__lasx_xvdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \
+ temp0 = __lasx_xvsub_w(yl_ev, u_l); \
+ temp1 = __lasx_xvsub_w(yl_od, u_l); \
+ temp2 = __lasx_xvsub_w(yh_ev, u_h); \
+ temp3 = __lasx_xvsub_w(yh_od, u_h); \
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ g_l = __lasx_xvpackev_h(temp1, temp0); \
+ g_h = __lasx_xvpackev_h(temp3, temp2); \
}
// Convert 8 pixels of YUV420 to RGB.
-#define YUVTORGB(in_y, in_uv, ubvr, ugvg, \
- yg, yb, out_b, out_g, out_r) \
- { \
- __m256i u_l, v_l, yl_ev, yl_od; \
- __m256i temp0, temp1; \
- \
- in_y = __lasx_xvpermi_d(in_y, 0xD8); \
- temp0 = __lasx_xvilvl_b(in_y, in_y); \
- yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
- yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
- DUP2_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yl_ev, yl_od); \
- yl_ev = __lasx_xvadd_w(yl_ev, yb); \
- yl_od = __lasx_xvadd_w(yl_od, yb); \
- v_l = __lasx_xvmulwev_w_h(in_uv, ubvr); \
- u_l = __lasx_xvmulwod_w_h(in_uv, ubvr); \
- temp0 = __lasx_xvadd_w(yl_ev, u_l); \
- temp1 = __lasx_xvadd_w(yl_od, u_l); \
- DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
- DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
- out_b = __lasx_xvpackev_h(temp1, temp0); \
- temp0 = __lasx_xvadd_w(yl_ev, v_l); \
- temp1 = __lasx_xvadd_w(yl_od, v_l); \
- DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
- DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
- out_r = __lasx_xvpackev_h(temp1, temp0); \
- u_l = __lasx_xvdp2_w_h(in_uv, ugvg); \
- temp0 = __lasx_xvsub_w(yl_ev, u_l); \
- temp1 = __lasx_xvsub_w(yl_od, u_l); \
- DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
- DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
- out_g = __lasx_xvpackev_h(temp1, temp0); \
+#define YUVTORGB(in_y, in_uv, ubvr, ugvg, yg, yb, out_b, out_g, out_r) \
+ { \
+ __m256i u_l, v_l, yl_ev, yl_od; \
+ __m256i temp0, temp1; \
+ \
+ in_y = __lasx_xvpermi_d(in_y, 0xD8); \
+ temp0 = __lasx_xvilvl_b(in_y, in_y); \
+ yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \
+ yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \
+ DUP2_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yl_ev, yl_od); \
+ yl_ev = __lasx_xvadd_w(yl_ev, yb); \
+ yl_od = __lasx_xvadd_w(yl_od, yb); \
+ v_l = __lasx_xvmulwev_w_h(in_uv, ubvr); \
+ u_l = __lasx_xvmulwod_w_h(in_uv, ubvr); \
+ temp0 = __lasx_xvadd_w(yl_ev, u_l); \
+ temp1 = __lasx_xvadd_w(yl_od, u_l); \
+ DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
+ DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
+ out_b = __lasx_xvpackev_h(temp1, temp0); \
+ temp0 = __lasx_xvadd_w(yl_ev, v_l); \
+ temp1 = __lasx_xvadd_w(yl_od, v_l); \
+ DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
+ DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
+ out_r = __lasx_xvpackev_h(temp1, temp0); \
+ u_l = __lasx_xvdp2_w_h(in_uv, ugvg); \
+ temp0 = __lasx_xvsub_w(yl_ev, u_l); \
+ temp1 = __lasx_xvsub_w(yl_od, u_l); \
+ DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \
+ DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \
+ out_g = __lasx_xvpackev_h(temp1, temp0); \
}
// Pack and Store 16 ARGB values.
-#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, \
- b_l, b_h, pdst_argb) \
- { \
- __m256i temp0, temp1, temp2, temp3; \
- \
- temp0 = __lasx_xvpackev_b(g_l, b_l); \
- temp1 = __lasx_xvpackev_b(a_l, r_l); \
- temp2 = __lasx_xvpackev_b(g_h, b_h); \
- temp3 = __lasx_xvpackev_b(a_h, r_h); \
- r_l = __lasx_xvilvl_h(temp1, temp0); \
- r_h = __lasx_xvilvh_h(temp1, temp0); \
- g_l = __lasx_xvilvl_h(temp3, temp2); \
- g_h = __lasx_xvilvh_h(temp3, temp2); \
- temp0 = __lasx_xvpermi_q(r_h, r_l, 0x20); \
- temp1 = __lasx_xvpermi_q(g_h, g_l, 0x20); \
- temp2 = __lasx_xvpermi_q(r_h, r_l, 0x31); \
- temp3 = __lasx_xvpermi_q(g_h, g_l, 0x31); \
- __lasx_xvst(temp0, pdst_argb, 0); \
- __lasx_xvst(temp1, pdst_argb, 32); \
- __lasx_xvst(temp2, pdst_argb, 64); \
- __lasx_xvst(temp3, pdst_argb, 96); \
- pdst_argb += 128; \
+#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, pdst_argb) \
+ { \
+ __m256i temp0, temp1, temp2, temp3; \
+ \
+ temp0 = __lasx_xvpackev_b(g_l, b_l); \
+ temp1 = __lasx_xvpackev_b(a_l, r_l); \
+ temp2 = __lasx_xvpackev_b(g_h, b_h); \
+ temp3 = __lasx_xvpackev_b(a_h, r_h); \
+ r_l = __lasx_xvilvl_h(temp1, temp0); \
+ r_h = __lasx_xvilvh_h(temp1, temp0); \
+ g_l = __lasx_xvilvl_h(temp3, temp2); \
+ g_h = __lasx_xvilvh_h(temp3, temp2); \
+ temp0 = __lasx_xvpermi_q(r_h, r_l, 0x20); \
+ temp1 = __lasx_xvpermi_q(g_h, g_l, 0x20); \
+ temp2 = __lasx_xvpermi_q(r_h, r_l, 0x31); \
+ temp3 = __lasx_xvpermi_q(g_h, g_l, 0x31); \
+ __lasx_xvst(temp0, pdst_argb, 0); \
+ __lasx_xvst(temp1, pdst_argb, 32); \
+ __lasx_xvst(temp2, pdst_argb, 64); \
+ __lasx_xvst(temp3, pdst_argb, 96); \
+ pdst_argb += 128; \
}
// Pack and Store 8 ARGB values.
-#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
- { \
- __m256i temp0, temp1; \
- \
- temp0 = __lasx_xvpackev_b(in_g, in_b); \
- temp1 = __lasx_xvpackev_b(in_a, in_r); \
- in_a = __lasx_xvilvl_h(temp1, temp0); \
- in_r = __lasx_xvilvh_h(temp1, temp0); \
- temp0 = __lasx_xvpermi_q(in_r, in_a, 0x20); \
- temp1 = __lasx_xvpermi_q(in_r, in_a, 0x31); \
- __lasx_xvst(temp0, pdst_argb, 0); \
- __lasx_xvst(temp1, pdst_argb, 32); \
- pdst_argb += 64; \
+#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
+ { \
+ __m256i temp0, temp1; \
+ \
+ temp0 = __lasx_xvpackev_b(in_g, in_b); \
+ temp1 = __lasx_xvpackev_b(in_a, in_r); \
+ in_a = __lasx_xvilvl_h(temp1, temp0); \
+ in_r = __lasx_xvilvh_h(temp1, temp0); \
+ temp0 = __lasx_xvpermi_q(in_r, in_a, 0x20); \
+ temp1 = __lasx_xvpermi_q(in_r, in_a, 0x31); \
+ __lasx_xvst(temp0, pdst_argb, 0); \
+ __lasx_xvst(temp1, pdst_argb, 32); \
+ pdst_argb += 64; \
}
void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
@@ -205,15 +203,15 @@ void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
0x08090A0B0C0D0E0F, 0x0001020304050607};
src += width - 64;
for (x = 0; x < len; x++) {
- DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
- DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler,
- src1, src1, shuffler, src0, src1);
- src0 = __lasx_xvpermi_q(src0, src0, 0x01);
- src1 = __lasx_xvpermi_q(src1, src1, 0x01);
- __lasx_xvst(src1, dst, 0);
- __lasx_xvst(src0, dst, 32);
- dst += 64;
- src -= 64;
+ DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
+ DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
+ src1);
+ src0 = __lasx_xvpermi_q(src0, src0, 0x01);
+ src1 = __lasx_xvpermi_q(src1, src1, 0x01);
+ __lasx_xvst(src1, dst, 0);
+ __lasx_xvst(src0, dst, 32);
+ dst += 64;
+ src -= 64;
}
}
@@ -226,12 +224,12 @@ void MirrorUVRow_LASX(const uint8_t* src_uv, uint8_t* dst_uv, int width) {
src_uv += (width - 16) << 1;
for (x = 0; x < len; x++) {
- src = __lasx_xvld(src_uv, 0);
- dst = __lasx_xvshuf_h(shuffler, src, src);
- dst = __lasx_xvpermi_q(dst, dst, 0x01);
- __lasx_xvst(dst, dst_uv, 0);
- src_uv -= 32;
- dst_uv += 32;
+ src = __lasx_xvld(src_uv, 0);
+ dst = __lasx_xvshuf_h(shuffler, src, src);
+ dst = __lasx_xvpermi_q(dst, dst, 0x01);
+ __lasx_xvst(dst, dst_uv, 0);
+ src_uv -= 32;
+ dst_uv += 32;
}
}
@@ -244,15 +242,15 @@ void ARGBMirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) {
0x0B0A09080F0E0D0C, 0x0302010007060504};
src += (width * 4) - 64;
for (x = 0; x < len; x++) {
- DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
- DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler,
- src1, src1, shuffler, src0, src1);
- dst1 = __lasx_xvpermi_q(src0, src0, 0x01);
- dst0 = __lasx_xvpermi_q(src1, src1, 0x01);
- __lasx_xvst(dst0, dst, 0);
- __lasx_xvst(dst1, dst, 32);
- dst += 64;
- src -= 64;
+ DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1);
+ DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
+ src1);
+ dst1 = __lasx_xvpermi_q(src0, src0, 0x01);
+ dst0 = __lasx_xvpermi_q(src1, src1, 0x01);
+ __lasx_xvst(dst0, dst, 0);
+ __lasx_xvst(dst1, dst, 32);
+ dst += 64;
+ src -= 64;
}
}
@@ -268,21 +266,21 @@ void I422ToYUY2Row_LASX(const uint8_t* src_y,
__m256i dst_yuy2_0, dst_yuy2_1;
for (x = 0; x < len; x++) {
- DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
- src_y0 = __lasx_xvld(src_y, 0);
- src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
- src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
- vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
- vec_yuy2_0 = __lasx_xvilvl_b(vec_uv0, src_y0);
- vec_yuy2_1 = __lasx_xvilvh_b(vec_uv0, src_y0);
- dst_yuy2_0 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x20);
- dst_yuy2_1 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x31);
- __lasx_xvst(dst_yuy2_0, dst_yuy2, 0);
- __lasx_xvst(dst_yuy2_1, dst_yuy2, 32);
- src_u += 16;
- src_v += 16;
- src_y += 32;
- dst_yuy2 += 64;
+ DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
+ src_y0 = __lasx_xvld(src_y, 0);
+ src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
+ src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
+ vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
+ vec_yuy2_0 = __lasx_xvilvl_b(vec_uv0, src_y0);
+ vec_yuy2_1 = __lasx_xvilvh_b(vec_uv0, src_y0);
+ dst_yuy2_0 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x20);
+ dst_yuy2_1 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x31);
+ __lasx_xvst(dst_yuy2_0, dst_yuy2, 0);
+ __lasx_xvst(dst_yuy2_1, dst_yuy2, 32);
+ src_u += 16;
+ src_v += 16;
+ src_y += 32;
+ dst_yuy2 += 64;
}
}
@@ -298,21 +296,21 @@ void I422ToUYVYRow_LASX(const uint8_t* src_y,
__m256i dst_uyvy0, dst_uyvy1;
for (x = 0; x < len; x++) {
- DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
- src_y0 = __lasx_xvld(src_y, 0);
- src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
- src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
- vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
- vec_uyvy0 = __lasx_xvilvl_b(src_y0, vec_uv0);
- vec_uyvy1 = __lasx_xvilvh_b(src_y0, vec_uv0);
- dst_uyvy0 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x20);
- dst_uyvy1 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x31);
- __lasx_xvst(dst_uyvy0, dst_uyvy, 0);
- __lasx_xvst(dst_uyvy1, dst_uyvy, 32);
- src_u += 16;
- src_v += 16;
- src_y += 32;
- dst_uyvy +=64;
+ DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0);
+ src_y0 = __lasx_xvld(src_y, 0);
+ src_u0 = __lasx_xvpermi_d(src_u0, 0xD8);
+ src_v0 = __lasx_xvpermi_d(src_v0, 0xD8);
+ vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0);
+ vec_uyvy0 = __lasx_xvilvl_b(src_y0, vec_uv0);
+ vec_uyvy1 = __lasx_xvilvh_b(src_y0, vec_uv0);
+ dst_uyvy0 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x20);
+ dst_uyvy1 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x31);
+ __lasx_xvst(dst_uyvy0, dst_uyvy, 0);
+ __lasx_xvst(dst_uyvy1, dst_uyvy, 32);
+ src_u += 16;
+ src_v += 16;
+ src_y += 32;
+ dst_uyvy += 64;
}
}
@@ -335,8 +333,8 @@ void I422ToARGBRow_LASX(const uint8_t* src_y,
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
STOREARGB_D(alpha, alpha, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
src_y += 32;
src_u += 16;
@@ -363,8 +361,8 @@ void I422ToRGBARow_LASX(const uint8_t* src_y,
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
STOREARGB_D(r_l, r_h, g_l, g_h, b_l, b_h, alpha, alpha, dst_argb);
src_y += 32;
src_u += 16;
@@ -392,12 +390,12 @@ void I422AlphaToARGBRow_LASX(const uint8_t* src_y,
for (x = 0; x < len; x++) {
__m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h, a_l, a_h;
- y = __lasx_xvld(src_a, 0);
+ y = __lasx_xvld(src_a, 0);
a_l = __lasx_xvilvl_b(zero, y);
a_h = __lasx_xvilvh_b(zero, y);
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
src_y += 32;
src_u += 16;
@@ -437,12 +435,13 @@ void I422ToRGB24Row_LASX(const uint8_t* src_y,
__m256i temp0, temp1, temp2, temp3;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
temp0 = __lasx_xvpackev_b(g_l, b_l);
temp1 = __lasx_xvpackev_b(g_h, b_h);
DUP4_ARG3(__lasx_xvshuf_b, r_l, temp0, shuffler1, r_h, temp1, shuffler1,
- r_l, temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0, temp1);
+ r_l, temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0,
+ temp1);
b_l = __lasx_xvilvl_d(temp1, temp2);
b_h = __lasx_xvilvh_d(temp3, temp1);
@@ -479,22 +478,22 @@ void I422ToRGB565Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
- b_l = __lasx_xvsrli_h(b_l, 3);
- b_h = __lasx_xvsrli_h(b_h, 3);
- g_l = __lasx_xvsrli_h(g_l, 2);
- g_h = __lasx_xvsrli_h(g_h, 2);
- r_l = __lasx_xvsrli_h(r_l, 3);
- r_h = __lasx_xvsrli_h(r_h, 3);
- r_l = __lasx_xvslli_h(r_l, 11);
- r_h = __lasx_xvslli_h(r_h, 11);
- g_l = __lasx_xvslli_h(g_l, 5);
- g_h = __lasx_xvslli_h(g_h, 5);
- r_l = __lasx_xvor_v(r_l, g_l);
- r_l = __lasx_xvor_v(r_l, b_l);
- r_h = __lasx_xvor_v(r_h, g_h);
- r_h = __lasx_xvor_v(r_h, b_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ b_l = __lasx_xvsrli_h(b_l, 3);
+ b_h = __lasx_xvsrli_h(b_h, 3);
+ g_l = __lasx_xvsrli_h(g_l, 2);
+ g_h = __lasx_xvsrli_h(g_h, 2);
+ r_l = __lasx_xvsrli_h(r_l, 3);
+ r_h = __lasx_xvsrli_h(r_h, 3);
+ r_l = __lasx_xvslli_h(r_l, 11);
+ r_h = __lasx_xvslli_h(r_h, 11);
+ g_l = __lasx_xvslli_h(g_l, 5);
+ g_h = __lasx_xvslli_h(g_h, 5);
+ r_l = __lasx_xvor_v(r_l, g_l);
+ r_l = __lasx_xvor_v(r_l, b_l);
+ r_h = __lasx_xvor_v(r_h, g_h);
+ r_h = __lasx_xvor_v(r_h, b_h);
dst_l = __lasx_xvpermi_q(r_h, r_l, 0x20);
dst_h = __lasx_xvpermi_q(r_h, r_l, 0x31);
__lasx_xvst(dst_l, dst_rgb565, 0);
@@ -518,10 +517,10 @@ void I422ToARGB4444Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
- __m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000,
- 0xF000F000F000F000, 0xF000F000F000F000};
- __m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0,
- 0x00F000F000F000F0, 0x00F000F000F000F0};
+ __m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000,
+ 0xF000F000F000F000};
+ __m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0, 0x00F000F000F000F0,
+ 0x00F000F000F000F0};
YUVTORGB_SETUP(yuvconstants, vec_ubvr, vec_ugvg, vec_yg, vec_yb);
@@ -530,8 +529,8 @@ void I422ToARGB4444Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 4);
b_h = __lasx_xvsrli_h(b_h, 4);
r_l = __lasx_xvsrli_h(r_l, 4);
@@ -568,8 +567,8 @@ void I422ToARGB1555Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
- __m256i alpha = {0x8000800080008000, 0x8000800080008000,
- 0x8000800080008000, 0x8000800080008000};
+ __m256i alpha = {0x8000800080008000, 0x8000800080008000, 0x8000800080008000,
+ 0x8000800080008000};
YUVTORGB_SETUP(yuvconstants, vec_ubvr, vec_ugvg, vec_yg, vec_yb);
@@ -578,8 +577,8 @@ void I422ToARGB1555Row_LASX(const uint8_t* src_y,
__m256i dst_l, dst_h;
READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
- YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg,
- vec_yb, b_l, b_h, g_l, g_h, r_l, r_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
b_l = __lasx_xvsrli_h(b_l, 3);
b_h = __lasx_xvsrli_h(b_h, 3);
g_l = __lasx_xvsrli_h(g_l, 3);
@@ -751,13 +750,13 @@ void ARGBToYRow_LASX(const uint8_t* src_argb0, uint8_t* dst_y, int width) {
int len = width / 32;
__m256i src0, src1, src2, src3, vec0, vec1, vec2, vec3;
__m256i tmp0, tmp1, dst0;
- __m256i const_19 = __lasx_xvldi(0x19);
- __m256i const_42 = __lasx_xvldi(0x42);
- __m256i const_81 = __lasx_xvldi(0x81);
+ __m256i const_19 = __lasx_xvldi(0x19);
+ __m256i const_42 = __lasx_xvldi(0x42);
+ __m256i const_81 = __lasx_xvldi(0x81);
__m256i const_1080 = {0x1080108010801080, 0x1080108010801080,
0x1080108010801080, 0x1080108010801080};
- __m256i control = {0x0000000400000000, 0x0000000500000001,
- 0x0000000600000002, 0x0000000700000003};
+ __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
+ 0x0000000700000003};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lasx_xvld, src_argb0, 0, src_argb0, 32, src_argb0, 64,
@@ -802,8 +801,8 @@ void ARGBToUVRow_LASX(const uint8_t* src_argb0,
0x002f002f002f002f, 0x002f002f002f002f};
__m256i const_0x12 = {0x0009000900090009, 0x0009000900090009,
0x0009000900090009, 0x0009000900090009};
- __m256i control = {0x0000000400000000, 0x0000000500000001,
- 0x0000000600000002, 0x0000000700000003};
+ __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
+ 0x0000000700000003};
__m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
@@ -861,13 +860,13 @@ void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
int len = (width / 32) - 1;
__m256i src0, src1, src2, src3;
__m256i tmp0, tmp1, tmp2, tmp3;
- __m256i shuf = {0x0908060504020100, 0x000000000E0D0C0A,
- 0x0908060504020100, 0x000000000E0D0C0A};
- __m256i control = {0x0000000100000000, 0x0000000400000002,
- 0x0000000600000005, 0x0000000700000003};
+ __m256i shuf = {0x0908060504020100, 0x000000000E0D0C0A, 0x0908060504020100,
+ 0x000000000E0D0C0A};
+ __m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005,
+ 0x0000000700000003};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
- src_argb, 96, src0, src1, src2, src3);
+ DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
+ 96, src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@@ -883,8 +882,8 @@ void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
dst_rgb += 96;
src_argb += 128;
}
- DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
- src_argb, 96, src0, src1, src2, src3);
+ DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96,
+ src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@@ -907,13 +906,13 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
int len = (width / 32) - 1;
__m256i src0, src1, src2, src3;
__m256i tmp0, tmp1, tmp2, tmp3;
- __m256i shuf = {0x090A040506000102, 0x000000000C0D0E08,
- 0x090A040506000102, 0x000000000C0D0E08};
- __m256i control = {0x0000000100000000, 0x0000000400000002,
- 0x0000000600000005, 0x0000000700000003};
+ __m256i shuf = {0x090A040506000102, 0x000000000C0D0E08, 0x090A040506000102,
+ 0x000000000C0D0E08};
+ __m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005,
+ 0x0000000700000003};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
- src_argb, 96, src0, src1, src2, src3);
+ DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
+ 96, src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@@ -929,8 +928,8 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
dst_rgb += 96;
src_argb += 128;
}
- DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
- src_argb, 96, src0, src1, src2, src3);
+ DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96,
+ src0, src1, src2, src3);
tmp0 = __lasx_xvshuf_b(src0, src0, shuf);
tmp1 = __lasx_xvshuf_b(src1, src1, shuf);
tmp2 = __lasx_xvshuf_b(src2, src2, shuf);
@@ -948,13 +947,15 @@ void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
__lasx_xvstelm_d(tmp3, dst_rgb, 16, 2);
}
-void ARGBToRGB565Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
+void ARGBToRGB565Row_LASX(const uint8_t* src_argb,
+ uint8_t* dst_rgb,
+ int width) {
int x;
int len = width / 16;
__m256i zero = __lasx_xvldi(0);
__m256i src0, src1, tmp0, tmp1, dst0;
- __m256i shift = {0x0300030003000300, 0x0300030003000300,
- 0x0300030003000300, 0x0300030003000300};
+ __m256i shift = {0x0300030003000300, 0x0300030003000300, 0x0300030003000300,
+ 0x0300030003000300};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@@ -980,10 +981,10 @@ void ARGBToARGB1555Row_LASX(const uint8_t* src_argb,
int len = width / 16;
__m256i zero = __lasx_xvldi(0);
__m256i src0, src1, tmp0, tmp1, tmp2, tmp3, dst0;
- __m256i shift1 = {0x0703070307030703, 0x0703070307030703,
- 0x0703070307030703, 0x0703070307030703};
- __m256i shift2 = {0x0200020002000200, 0x0200020002000200,
- 0x0200020002000200, 0x0200020002000200};
+ __m256i shift1 = {0x0703070307030703, 0x0703070307030703, 0x0703070307030703,
+ 0x0703070307030703};
+ __m256i shift2 = {0x0200020002000200, 0x0200020002000200, 0x0200020002000200,
+ 0x0200020002000200};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@@ -1036,17 +1037,17 @@ void ARGBToUV444Row_LASX(const uint8_t* src_argb,
__m256i tmp0, tmp1, tmp2, tmp3;
__m256i reg0, reg1, reg2, reg3, dst0, dst1;
__m256i const_112 = __lasx_xvldi(112);
- __m256i const_74 = __lasx_xvldi(74);
- __m256i const_38 = __lasx_xvldi(38);
- __m256i const_94 = __lasx_xvldi(94);
- __m256i const_18 = __lasx_xvldi(18);
+ __m256i const_74 = __lasx_xvldi(74);
+ __m256i const_38 = __lasx_xvldi(38);
+ __m256i const_94 = __lasx_xvldi(94);
+ __m256i const_18 = __lasx_xvldi(18);
__m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
- __m256i control = {0x0000000400000000, 0x0000000500000001,
- 0x0000000600000002, 0x0000000700000003};
+ __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
+ 0x0000000700000003};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64,
- src_argb, 96, src0, src1, src2, src3);
+ DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb,
+ 96, src0, src1, src2, src3);
tmp0 = __lasx_xvpickev_h(src1, src0);
tmp1 = __lasx_xvpickod_h(src1, src0);
tmp2 = __lasx_xvpickev_h(src3, src2);
@@ -1101,7 +1102,7 @@ void ARGBMultiplyRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
- dst_argb += 32;
+ dst_argb += 32;
}
}
@@ -1119,7 +1120,7 @@ void ARGBAddRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
- dst_argb += 32;
+ dst_argb += 32;
}
}
@@ -1137,7 +1138,7 @@ void ARGBSubtractRow_LASX(const uint8_t* src_argb0,
__lasx_xvst(dst0, dst_argb, 0);
src_argb0 += 32;
src_argb1 += 32;
- dst_argb += 32;
+ dst_argb += 32;
}
}
@@ -1149,8 +1150,8 @@ void ARGBAttenuateRow_LASX(const uint8_t* src_argb,
__m256i src0, src1, tmp0, tmp1;
__m256i reg0, reg1, reg2, reg3, reg4, reg5;
__m256i b, g, r, a, dst0, dst1;
- __m256i control = {0x0005000100040000, 0x0007000300060002,
- 0x0005000100040000, 0x0007000300060002};
+ __m256i control = {0x0005000100040000, 0x0007000300060002, 0x0005000100040000,
+ 0x0007000300060002};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@@ -1199,12 +1200,12 @@ void ARGBToRGB565DitherRow_LASX(const uint8_t* src_argb,
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
tmp0 = __lasx_xvpickev_b(src1, src0);
tmp1 = __lasx_xvpickod_b(src1, src0);
- b = __lasx_xvpackev_b(zero, tmp0);
- r = __lasx_xvpackod_b(zero, tmp0);
- g = __lasx_xvpackev_b(zero, tmp1);
- b = __lasx_xvadd_h(b, vec_dither);
- g = __lasx_xvadd_h(g, vec_dither);
- r = __lasx_xvadd_h(r, vec_dither);
+ b = __lasx_xvpackev_b(zero, tmp0);
+ r = __lasx_xvpackod_b(zero, tmp0);
+ g = __lasx_xvpackev_b(zero, tmp1);
+ b = __lasx_xvadd_h(b, vec_dither);
+ g = __lasx_xvadd_h(g, vec_dither);
+ r = __lasx_xvadd_h(r, vec_dither);
DUP2_ARG1(__lasx_xvclip255_h, b, g, b, g);
r = __lasx_xvclip255_h(r);
b = __lasx_xvsrai_h(b, 3);
@@ -1228,8 +1229,8 @@ void ARGBShuffleRow_LASX(const uint8_t* src_argb,
int x;
int len = width / 16;
__m256i src0, src1, dst0, dst1;
- __m256i shuf = {0x0404040400000000, 0x0C0C0C0C08080808,
- 0x0404040400000000, 0x0C0C0C0C08080808};
+ __m256i shuf = {0x0404040400000000, 0x0C0C0C0C08080808, 0x0404040400000000,
+ 0x0C0C0C0C08080808};
__m256i temp = __lasx_xvldrepl_w(shuffler, 0);
shuf = __lasx_xvadd_b(shuf, temp);
@@ -1274,8 +1275,8 @@ void ARGBGrayRow_LASX(const uint8_t* src_argb, uint8_t* dst_argb, int width) {
__m256i reg0, reg1, reg2, dst0, dst1;
__m256i const_128 = __lasx_xvldi(0x480);
__m256i const_150 = __lasx_xvldi(0x96);
- __m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D,
- 0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
+ __m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D,
+ 0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
@@ -1301,17 +1302,17 @@ void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width) {
__m256i src0, src1, tmp0, tmp1;
__m256i reg0, reg1, spb, spg, spr;
__m256i dst0, dst1;
- __m256i spb_g = __lasx_xvldi(68);
- __m256i spg_g = __lasx_xvldi(88);
- __m256i spr_g = __lasx_xvldi(98);
- __m256i spb_br = {0x2311231123112311, 0x2311231123112311,
- 0x2311231123112311, 0x2311231123112311};
- __m256i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16,
- 0x2D162D162D162D16, 0x2D162D162D162D16};
- __m256i spr_br = {0x3218321832183218, 0x3218321832183218,
- 0x3218321832183218, 0x3218321832183218};
- __m256i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908,
- 0x1706150413021100, 0x1F0E1D0C1B0A1908};
+ __m256i spb_g = __lasx_xvldi(68);
+ __m256i spg_g = __lasx_xvldi(88);
+ __m256i spr_g = __lasx_xvldi(98);
+ __m256i spb_br = {0x2311231123112311, 0x2311231123112311, 0x2311231123112311,
+ 0x2311231123112311};
+ __m256i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16, 0x2D162D162D162D16,
+ 0x2D162D162D162D16};
+ __m256i spr_br = {0x3218321832183218, 0x3218321832183218, 0x3218321832183218,
+ 0x3218321832183218};
+ __m256i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908, 0x1706150413021100,
+ 0x1F0E1D0C1B0A1908};
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, dst_argb, 0, dst_argb, 32, src0, src1);
@@ -1319,14 +1320,14 @@ void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width) {
tmp1 = __lasx_xvpickod_b(src1, src0);
DUP2_ARG2(__lasx_xvdp2_h_bu, tmp0, spb_br, tmp0, spg_br, spb, spg);
spr = __lasx_xvdp2_h_bu(tmp0, spr_br);
- spb = __lasx_xvmaddwev_h_bu(spb, tmp1, spb_g);
- spg = __lasx_xvmaddwev_h_bu(spg, tmp1, spg_g);
- spr = __lasx_xvmaddwev_h_bu(spr, tmp1, spr_g);
- spb = __lasx_xvsrli_h(spb, 7);
- spg = __lasx_xvsrli_h(spg, 7);
- spr = __lasx_xvsrli_h(spr, 7);
- spg = __lasx_xvsat_hu(spg, 7);
- spr = __lasx_xvsat_hu(spr, 7);
+ spb = __lasx_xvmaddwev_h_bu(spb, tmp1, spb_g);
+ spg = __lasx_xvmaddwev_h_bu(spg, tmp1, spg_g);
+ spr = __lasx_xvmaddwev_h_bu(spr, tmp1, spr_g);
+ spb = __lasx_xvsrli_h(spb, 7);
+ spg = __lasx_xvsrli_h(spg, 7);
+ spr = __lasx_xvsrli_h(spr, 7);
+ spg = __lasx_xvsat_hu(spg, 7);
+ spr = __lasx_xvsat_hu(spr, 7);
reg0 = __lasx_xvpackev_b(spg, spb);
reg1 = __lasx_xvshuf_b(tmp1, spr, shuff);
dst0 = __lasx_xvilvl_h(reg1, reg0);
diff --git a/source/row_lsx.cc b/source/row_lsx.cc
index 6fe93b57..a445e636 100644
--- a/source/row_lsx.cc
+++ b/source/row_lsx.cc
@@ -21,139 +21,138 @@ extern "C" {
#endif
// Fill YUV -> RGB conversion constants into vectors
-#define YUVTORGB_SETUP(yuvconst, vr, ub, vg, ug, yg, yb) \
- { \
- ub = __lsx_vreplgr2vr_h(yuvconst->kUVToB[0]); \
- vr = __lsx_vreplgr2vr_h(yuvconst->kUVToR[1]); \
- ug = __lsx_vreplgr2vr_h(yuvconst->kUVToG[0]); \
- vg = __lsx_vreplgr2vr_h(yuvconst->kUVToG[1]); \
- yg = __lsx_vreplgr2vr_h(yuvconst->kYToRgb[0]); \
- yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
+#define YUVTORGB_SETUP(yuvconst, vr, ub, vg, ug, yg, yb) \
+ { \
+ ub = __lsx_vreplgr2vr_h(yuvconst->kUVToB[0]); \
+ vr = __lsx_vreplgr2vr_h(yuvconst->kUVToR[1]); \
+ ug = __lsx_vreplgr2vr_h(yuvconst->kUVToG[0]); \
+ vg = __lsx_vreplgr2vr_h(yuvconst->kUVToG[1]); \
+ yg = __lsx_vreplgr2vr_h(yuvconst->kYToRgb[0]); \
+ yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
}
// Convert 8 pixels of YUV420 to RGB.
-#define YUVTORGB(in_y, in_vu, vrub, vgug, \
- yg, yb, out_b, out_g, out_r) \
- { \
- __m128i y_ev, y_od, u_l, v_l; \
- __m128i tmp0, tmp1, tmp2, tmp3; \
- \
- tmp0 = __lsx_vilvl_b(in_y, in_y); \
- y_ev = __lsx_vmulwev_w_hu_h(tmp0, yg); \
- y_od = __lsx_vmulwod_w_hu_h(tmp0, yg); \
- y_ev = __lsx_vsrai_w(y_ev, 16); \
- y_od = __lsx_vsrai_w(y_od, 16); \
- y_ev = __lsx_vadd_w(y_ev, yb); \
- y_od = __lsx_vadd_w(y_od, yb); \
- in_vu = __lsx_vilvl_b(zero, in_vu); \
- in_vu = __lsx_vsub_h(in_vu, const_80); \
- u_l = __lsx_vmulwev_w_h(in_vu, vrub); \
- v_l = __lsx_vmulwod_w_h(in_vu, vrub); \
- tmp0 = __lsx_vadd_w(y_ev, u_l); \
- tmp1 = __lsx_vadd_w(y_od, u_l); \
- tmp2 = __lsx_vadd_w(y_ev, v_l); \
- tmp3 = __lsx_vadd_w(y_od, v_l); \
- tmp0 = __lsx_vsrai_w(tmp0, 6); \
- tmp1 = __lsx_vsrai_w(tmp1, 6); \
- tmp2 = __lsx_vsrai_w(tmp2, 6); \
- tmp3 = __lsx_vsrai_w(tmp3, 6); \
- tmp0 = __lsx_vclip255_w(tmp0); \
- tmp1 = __lsx_vclip255_w(tmp1); \
- tmp2 = __lsx_vclip255_w(tmp2); \
- tmp3 = __lsx_vclip255_w(tmp3); \
- out_b = __lsx_vpackev_h(tmp1, tmp0); \
- out_r = __lsx_vpackev_h(tmp3, tmp2); \
- tmp0 = __lsx_vdp2_w_h(in_vu, vgug); \
- tmp1 = __lsx_vsub_w(y_ev, tmp0); \
- tmp2 = __lsx_vsub_w(y_od, tmp0); \
- tmp1 = __lsx_vsrai_w(tmp1, 6); \
- tmp2 = __lsx_vsrai_w(tmp2, 6); \
- tmp1 = __lsx_vclip255_w(tmp1); \
- tmp2 = __lsx_vclip255_w(tmp2); \
- out_g = __lsx_vpackev_h(tmp2, tmp1); \
+#define YUVTORGB(in_y, in_vu, vrub, vgug, yg, yb, out_b, out_g, out_r) \
+ { \
+ __m128i y_ev, y_od, u_l, v_l; \
+ __m128i tmp0, tmp1, tmp2, tmp3; \
+ \
+ tmp0 = __lsx_vilvl_b(in_y, in_y); \
+ y_ev = __lsx_vmulwev_w_hu_h(tmp0, yg); \
+ y_od = __lsx_vmulwod_w_hu_h(tmp0, yg); \
+ y_ev = __lsx_vsrai_w(y_ev, 16); \
+ y_od = __lsx_vsrai_w(y_od, 16); \
+ y_ev = __lsx_vadd_w(y_ev, yb); \
+ y_od = __lsx_vadd_w(y_od, yb); \
+ in_vu = __lsx_vilvl_b(zero, in_vu); \
+ in_vu = __lsx_vsub_h(in_vu, const_80); \
+ u_l = __lsx_vmulwev_w_h(in_vu, vrub); \
+ v_l = __lsx_vmulwod_w_h(in_vu, vrub); \
+ tmp0 = __lsx_vadd_w(y_ev, u_l); \
+ tmp1 = __lsx_vadd_w(y_od, u_l); \
+ tmp2 = __lsx_vadd_w(y_ev, v_l); \
+ tmp3 = __lsx_vadd_w(y_od, v_l); \
+ tmp0 = __lsx_vsrai_w(tmp0, 6); \
+ tmp1 = __lsx_vsrai_w(tmp1, 6); \
+ tmp2 = __lsx_vsrai_w(tmp2, 6); \
+ tmp3 = __lsx_vsrai_w(tmp3, 6); \
+ tmp0 = __lsx_vclip255_w(tmp0); \
+ tmp1 = __lsx_vclip255_w(tmp1); \
+ tmp2 = __lsx_vclip255_w(tmp2); \
+ tmp3 = __lsx_vclip255_w(tmp3); \
+ out_b = __lsx_vpackev_h(tmp1, tmp0); \
+ out_r = __lsx_vpackev_h(tmp3, tmp2); \
+ tmp0 = __lsx_vdp2_w_h(in_vu, vgug); \
+ tmp1 = __lsx_vsub_w(y_ev, tmp0); \
+ tmp2 = __lsx_vsub_w(y_od, tmp0); \
+ tmp1 = __lsx_vsrai_w(tmp1, 6); \
+ tmp2 = __lsx_vsrai_w(tmp2, 6); \
+ tmp1 = __lsx_vclip255_w(tmp1); \
+ tmp2 = __lsx_vclip255_w(tmp2); \
+ out_g = __lsx_vpackev_h(tmp2, tmp1); \
}
// Convert I444 pixels of YUV420 to RGB.
-#define I444TORGB(in_yy, in_u, in_v, ub, vr, ugvg, \
- yg, yb, out_b, out_g, out_r) \
- { \
- __m128i y_ev, y_od, u_ev, v_ev, u_od, v_od; \
- __m128i tmp0, tmp1, tmp2, tmp3; \
- \
- y_ev = __lsx_vmulwev_w_hu_h(in_yy, yg); \
- y_od = __lsx_vmulwod_w_hu_h(in_yy, yg); \
- y_ev = __lsx_vsrai_w(y_ev, 16); \
- y_od = __lsx_vsrai_w(y_od, 16); \
- y_ev = __lsx_vadd_w(y_ev, yb); \
- y_od = __lsx_vadd_w(y_od, yb); \
- in_u = __lsx_vsub_h(in_u, const_80); \
- in_v = __lsx_vsub_h(in_v, const_80); \
- u_ev = __lsx_vmulwev_w_h(in_u, ub); \
- u_od = __lsx_vmulwod_w_h(in_u, ub); \
- v_ev = __lsx_vmulwev_w_h(in_v, vr); \
- v_od = __lsx_vmulwod_w_h(in_v, vr); \
- tmp0 = __lsx_vadd_w(y_ev, u_ev); \
- tmp1 = __lsx_vadd_w(y_od, u_od); \
- tmp2 = __lsx_vadd_w(y_ev, v_ev); \
- tmp3 = __lsx_vadd_w(y_od, v_od); \
- tmp0 = __lsx_vsrai_w(tmp0, 6); \
- tmp1 = __lsx_vsrai_w(tmp1, 6); \
- tmp2 = __lsx_vsrai_w(tmp2, 6); \
- tmp3 = __lsx_vsrai_w(tmp3, 6); \
- tmp0 = __lsx_vclip255_w(tmp0); \
- tmp1 = __lsx_vclip255_w(tmp1); \
- tmp2 = __lsx_vclip255_w(tmp2); \
- tmp3 = __lsx_vclip255_w(tmp3); \
- out_b = __lsx_vpackev_h(tmp1, tmp0); \
- out_r = __lsx_vpackev_h(tmp3, tmp2); \
- u_ev = __lsx_vpackev_h(in_u, in_v); \
- u_od = __lsx_vpackod_h(in_u, in_v); \
- v_ev = __lsx_vdp2_w_h(u_ev, ugvg); \
- v_od = __lsx_vdp2_w_h(u_od, ugvg); \
- tmp0 = __lsx_vsub_w(y_ev, v_ev); \
- tmp1 = __lsx_vsub_w(y_od, v_od); \
- tmp0 = __lsx_vsrai_w(tmp0, 6); \
- tmp1 = __lsx_vsrai_w(tmp1, 6); \
- tmp0 = __lsx_vclip255_w(tmp0); \
- tmp1 = __lsx_vclip255_w(tmp1); \
- out_g = __lsx_vpackev_h(tmp1, tmp0); \
+#define I444TORGB(in_yy, in_u, in_v, ub, vr, ugvg, yg, yb, out_b, out_g, \
+ out_r) \
+ { \
+ __m128i y_ev, y_od, u_ev, v_ev, u_od, v_od; \
+ __m128i tmp0, tmp1, tmp2, tmp3; \
+ \
+ y_ev = __lsx_vmulwev_w_hu_h(in_yy, yg); \
+ y_od = __lsx_vmulwod_w_hu_h(in_yy, yg); \
+ y_ev = __lsx_vsrai_w(y_ev, 16); \
+ y_od = __lsx_vsrai_w(y_od, 16); \
+ y_ev = __lsx_vadd_w(y_ev, yb); \
+ y_od = __lsx_vadd_w(y_od, yb); \
+ in_u = __lsx_vsub_h(in_u, const_80); \
+ in_v = __lsx_vsub_h(in_v, const_80); \
+ u_ev = __lsx_vmulwev_w_h(in_u, ub); \
+ u_od = __lsx_vmulwod_w_h(in_u, ub); \
+ v_ev = __lsx_vmulwev_w_h(in_v, vr); \
+ v_od = __lsx_vmulwod_w_h(in_v, vr); \
+ tmp0 = __lsx_vadd_w(y_ev, u_ev); \
+ tmp1 = __lsx_vadd_w(y_od, u_od); \
+ tmp2 = __lsx_vadd_w(y_ev, v_ev); \
+ tmp3 = __lsx_vadd_w(y_od, v_od); \
+ tmp0 = __lsx_vsrai_w(tmp0, 6); \
+ tmp1 = __lsx_vsrai_w(tmp1, 6); \
+ tmp2 = __lsx_vsrai_w(tmp2, 6); \
+ tmp3 = __lsx_vsrai_w(tmp3, 6); \
+ tmp0 = __lsx_vclip255_w(tmp0); \
+ tmp1 = __lsx_vclip255_w(tmp1); \
+ tmp2 = __lsx_vclip255_w(tmp2); \
+ tmp3 = __lsx_vclip255_w(tmp3); \
+ out_b = __lsx_vpackev_h(tmp1, tmp0); \
+ out_r = __lsx_vpackev_h(tmp3, tmp2); \
+ u_ev = __lsx_vpackev_h(in_u, in_v); \
+ u_od = __lsx_vpackod_h(in_u, in_v); \
+ v_ev = __lsx_vdp2_w_h(u_ev, ugvg); \
+ v_od = __lsx_vdp2_w_h(u_od, ugvg); \
+ tmp0 = __lsx_vsub_w(y_ev, v_ev); \
+ tmp1 = __lsx_vsub_w(y_od, v_od); \
+ tmp0 = __lsx_vsrai_w(tmp0, 6); \
+ tmp1 = __lsx_vsrai_w(tmp1, 6); \
+ tmp0 = __lsx_vclip255_w(tmp0); \
+ tmp1 = __lsx_vclip255_w(tmp1); \
+ out_g = __lsx_vpackev_h(tmp1, tmp0); \
}
// Pack and Store 8 ARGB values.
-#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
- { \
- __m128i temp0, temp1; \
- __m128i dst0, dst1; \
- \
- temp0 = __lsx_vpackev_b(in_g, in_b); \
- temp1 = __lsx_vpackev_b(in_a, in_r); \
- dst0 = __lsx_vilvl_h(temp1, temp0); \
- dst1 = __lsx_vilvh_h(temp1, temp0); \
- __lsx_vst(dst0, pdst_argb, 0); \
- __lsx_vst(dst1, pdst_argb, 16); \
- pdst_argb += 32; \
+#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
+ { \
+ __m128i temp0, temp1; \
+ __m128i dst0, dst1; \
+ \
+ temp0 = __lsx_vpackev_b(in_g, in_b); \
+ temp1 = __lsx_vpackev_b(in_a, in_r); \
+ dst0 = __lsx_vilvl_h(temp1, temp0); \
+ dst1 = __lsx_vilvh_h(temp1, temp0); \
+ __lsx_vst(dst0, pdst_argb, 0); \
+ __lsx_vst(dst1, pdst_argb, 16); \
+ pdst_argb += 32; \
}
-#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \
- { \
- __m128i _tmp0, _tmp1, _tmp2, _tmp3; \
- __m128i _reg0, _reg1; \
- _tmp0 = __lsx_vaddwev_h_bu(_tmpb, _nexb); \
- _tmp1 = __lsx_vaddwod_h_bu(_tmpb, _nexb); \
- _tmp2 = __lsx_vaddwev_h_bu(_tmpg, _nexg); \
- _tmp3 = __lsx_vaddwod_h_bu(_tmpg, _nexg); \
- _reg0 = __lsx_vaddwev_h_bu(_tmpr, _nexr); \
- _reg1 = __lsx_vaddwod_h_bu(_tmpr, _nexr); \
- _tmpb = __lsx_vavgr_hu(_tmp0, _tmp1); \
- _tmpg = __lsx_vavgr_hu(_tmp2, _tmp3); \
- _tmpr = __lsx_vavgr_hu(_reg0, _reg1); \
- _reg0 = __lsx_vmadd_h(const_8080, const_112, _tmpb); \
- _reg1 = __lsx_vmadd_h(const_8080, const_112, _tmpr); \
- _reg0 = __lsx_vmsub_h(_reg0, const_74, _tmpg); \
- _reg1 = __lsx_vmsub_h(_reg1, const_94, _tmpg); \
- _reg0 = __lsx_vmsub_h(_reg0, const_38, _tmpr); \
- _reg1 = __lsx_vmsub_h(_reg1, const_18, _tmpb); \
- _dst0 = __lsx_vsrlni_b_h(_reg1, _reg0, 8); \
+#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \
+ { \
+ __m128i _tmp0, _tmp1, _tmp2, _tmp3; \
+ __m128i _reg0, _reg1; \
+ _tmp0 = __lsx_vaddwev_h_bu(_tmpb, _nexb); \
+ _tmp1 = __lsx_vaddwod_h_bu(_tmpb, _nexb); \
+ _tmp2 = __lsx_vaddwev_h_bu(_tmpg, _nexg); \
+ _tmp3 = __lsx_vaddwod_h_bu(_tmpg, _nexg); \
+ _reg0 = __lsx_vaddwev_h_bu(_tmpr, _nexr); \
+ _reg1 = __lsx_vaddwod_h_bu(_tmpr, _nexr); \
+ _tmpb = __lsx_vavgr_hu(_tmp0, _tmp1); \
+ _tmpg = __lsx_vavgr_hu(_tmp2, _tmp3); \
+ _tmpr = __lsx_vavgr_hu(_reg0, _reg1); \
+ _reg0 = __lsx_vmadd_h(const_8080, const_112, _tmpb); \
+ _reg1 = __lsx_vmadd_h(const_8080, const_112, _tmpr); \
+ _reg0 = __lsx_vmsub_h(_reg0, const_74, _tmpg); \
+ _reg1 = __lsx_vmsub_h(_reg1, const_94, _tmpg); \
+ _reg0 = __lsx_vmsub_h(_reg0, const_38, _tmpr); \
+ _reg1 = __lsx_vmsub_h(_reg1, const_18, _tmpb); \
+ _dst0 = __lsx_vsrlni_b_h(_reg1, _reg0, 8); \
}
void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444,
@@ -177,8 +176,8 @@ void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444,
reg2 = __lsx_vslli_b(tmp2, 4);
reg1 = __lsx_vsrli_b(tmp1, 4);
reg3 = __lsx_vsrli_b(tmp3, 4);
- DUP4_ARG2(__lsx_vor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2,
- tmp3, reg3, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2, tmp3, reg3, tmp0,
+ tmp1, tmp2, tmp3);
dst0 = __lsx_vilvl_b(tmp1, tmp0);
dst2 = __lsx_vilvl_b(tmp3, tmp2);
dst1 = __lsx_vilvh_b(tmp1, tmp0);
@@ -352,9 +351,9 @@ void ARGB1555ToYRow_LSX(const uint8_t* src_argb1555,
__m128i src0, src1;
__m128i tmp0, tmp1, tmpb, tmpg, tmpr;
__m128i reg0, reg1, reg2, dst0;
- __m128i const_66 = __lsx_vldi(66);
+ __m128i const_66 = __lsx_vldi(66);
__m128i const_129 = __lsx_vldi(129);
- __m128i const_25 = __lsx_vldi(25);
+ __m128i const_25 = __lsx_vldi(25);
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
__m128i shuff = {0x0B030A0209010800, 0x0F070E060D050C04};
@@ -406,15 +405,15 @@ void ARGB1555ToUVRow_LSX(const uint8_t* src_argb1555,
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i reg0, reg1, reg2, reg3, dst0;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16,
- next_argb1555, 0, next_argb1555, 16, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16, next_argb1555, 0,
+ next_argb1555, 16, src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
tmpb = __lsx_vandi_b(tmp0, 0x1F);
@@ -465,9 +464,9 @@ void RGB565ToYRow_LSX(const uint8_t* src_rgb565, uint8_t* dst_y, int width) {
__m128i src0, src1;
__m128i tmp0, tmp1, tmpb, tmpg, tmpr;
__m128i reg0, reg1, dst0;
- __m128i const_66 = __lsx_vldi(66);
+ __m128i const_66 = __lsx_vldi(66);
__m128i const_129 = __lsx_vldi(129);
- __m128i const_25 = __lsx_vldi(25);
+ __m128i const_25 = __lsx_vldi(25);
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
__m128i shuff = {0x0B030A0209010800, 0x0F070E060D050C04};
@@ -517,15 +516,15 @@ void RGB565ToUVRow_LSX(const uint8_t* src_rgb565,
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i reg0, reg1, reg2, reg3, dst0;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16,
- next_rgb565, 0, next_rgb565, 16, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16, next_rgb565, 0,
+ next_rgb565, 16, src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
tmpb = __lsx_vandi_b(tmp0, 0x1F);
@@ -611,10 +610,10 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
__m128i nex0, nex1, nex2, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
__m128i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_b = {0x0706050403020100, 0x1D1A1714110A0908};
@@ -630,12 +629,18 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
nex0 = __lsx_vld(next_rgb24, 0);
nex1 = __lsx_vld(next_rgb24, 16);
nex2 = __lsx_vld(next_rgb24, 32);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, nexb);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, nexg);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, nexr);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, nexb);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, nexg);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, nexr);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb,
+ nexb);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg,
+ nexg);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr,
+ nexr);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb,
+ nexb);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg,
+ nexg);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr,
+ nexr);
RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0);
__lsx_vstelm_d(dst0, dst_u, 0, 0);
__lsx_vstelm_d(dst0, dst_v, 0, 1);
@@ -691,10 +696,10 @@ void RAWToUVRow_LSX(const uint8_t* src_raw,
__m128i nex0, nex1, nex2, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
__m128i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_r = {0x0706050403020100, 0x1D1A1714110A0908};
@@ -710,12 +715,18 @@ void RAWToUVRow_LSX(const uint8_t* src_raw,
nex0 = __lsx_vld(next_raw, 0);
nex1 = __lsx_vld(next_raw, 16);
nex2 = __lsx_vld(next_raw, 32);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, nexb);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, nexg);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, nexr);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, nexb);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, nexg);
- DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, nexr);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb,
+ nexb);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg,
+ nexg);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr,
+ nexr);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb,
+ nexb);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg,
+ nexg);
+ DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr,
+ nexr);
RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0);
__lsx_vstelm_d(dst0, dst_u, 0, 0);
__lsx_vstelm_d(dst0, dst_v, 0, 1);
@@ -739,19 +750,19 @@ void NV12ToARGBRow_LSX(const uint8_t* src_y,
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub);
vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug);
for (x = 0; x < len; x++) {
- vec_y = __lsx_vld(src_y, 0);
+ vec_y = __lsx_vld(src_y, 0);
vec_vu = __lsx_vld(src_uv, 0);
- YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
- out_b, out_g, out_r);
+ YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
+ out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
- src_y += 8;
+ src_y += 8;
src_uv += 8;
}
}
@@ -768,17 +779,17 @@ void NV12ToRGB565Row_LSX(const uint8_t* src_y,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub);
vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug);
for (x = 0; x < len; x++) {
- vec_y = __lsx_vld(src_y, 0);
+ vec_y = __lsx_vld(src_y, 0);
vec_vu = __lsx_vld(src_uv, 0);
- YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
- out_b, out_g, out_r);
+ YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
+ out_r);
out_b = __lsx_vsrli_h(out_b, 3);
out_g = __lsx_vsrli_h(out_g, 2);
out_r = __lsx_vsrli_h(out_r, 3);
@@ -787,7 +798,7 @@ void NV12ToRGB565Row_LSX(const uint8_t* src_y,
out_r = __lsx_vor_v(out_r, out_g);
out_r = __lsx_vor_v(out_r, out_b);
__lsx_vst(out_r, dst_rgb565, 0);
- src_y += 8;
+ src_y += 8;
src_uv += 8;
dst_rgb565 += 16;
}
@@ -806,19 +817,19 @@ void NV21ToARGBRow_LSX(const uint8_t* src_y,
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
for (x = 0; x < len; x++) {
- vec_y = __lsx_vld(src_y, 0);
+ vec_y = __lsx_vld(src_y, 0);
vec_uv = __lsx_vld(src_vu, 0);
- YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb,
- out_r, out_g, out_b);
+ YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, out_r, out_g,
+ out_b);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
- src_y += 8;
+ src_y += 8;
src_vu += 8;
}
}
@@ -831,7 +842,7 @@ void SobelRow_LSX(const uint8_t* src_sobelx,
int len = width / 16;
__m128i src0, src1, tmp0;
__m128i out0, out1, out2, out3;
- __m128i alpha = __lsx_vldi(0xFF);
+ __m128i alpha = __lsx_vldi(0xFF);
__m128i shuff0 = {0x1001010110000000, 0x1003030310020202};
__m128i shuff1 = __lsx_vaddi_bu(shuff0, 0x04);
__m128i shuff2 = __lsx_vaddi_bu(shuff1, 0x04);
@@ -915,11 +926,11 @@ void ARGBToYJRow_LSX(const uint8_t* src_argb, uint8_t* dst_y, int width) {
__m128i reg0, reg1;
__m128i const_128 = __lsx_vldi(0x480);
__m128i const_150 = __lsx_vldi(0x96);
- __m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
+ __m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
- src_argb, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@@ -942,12 +953,12 @@ void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x1942194219421942, 0x1942194219421942};
+ __m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32,
- src_bgra, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
+ src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@@ -976,17 +987,17 @@ void BGRAToUVRow_LSX(const uint8_t* src_bgra,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32,
- src_bgra, 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, next_bgra, 0, next_bgra, 16, next_bgra, 32,
- next_bgra, 48, nex0, nex1, nex2, nex3);
+ DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, next_bgra, 0, next_bgra, 16, next_bgra, 32, next_bgra,
+ 48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@@ -1018,12 +1029,12 @@ void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x1942194219421942, 0x1942194219421942};
+ __m128i const_br = {0x1942194219421942, 0x1942194219421942};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32,
- src_abgr, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
+ src0, src1, src2, src3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@@ -1052,17 +1063,17 @@ void ABGRToUVRow_LSX(const uint8_t* src_abgr,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32,
- src_abgr, 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, next_abgr, 0, next_abgr, 16, next_abgr, 32,
- next_abgr, 48, nex0, nex1, nex2, nex3);
+ DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, next_abgr, 0, next_abgr, 16, next_abgr, 32, next_abgr,
+ 48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@@ -1094,12 +1105,12 @@ void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width) {
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i reg0, reg1;
__m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x4219421942194219, 0x4219421942194219};
+ __m128i const_br = {0x4219421942194219, 0x4219421942194219};
__m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32,
- src_rgba, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
+ src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@@ -1128,17 +1139,17 @@ void RGBAToUVRow_LSX(const uint8_t* src_rgba,
__m128i tmp0, tmp1, tmp2, tmp3, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
__m128i const_112 = __lsx_vldi(0x438);
- __m128i const_74 = __lsx_vldi(0x425);
- __m128i const_38 = __lsx_vldi(0x413);
- __m128i const_94 = __lsx_vldi(0x42F);
- __m128i const_18 = __lsx_vldi(0x409);
+ __m128i const_74 = __lsx_vldi(0x425);
+ __m128i const_38 = __lsx_vldi(0x413);
+ __m128i const_94 = __lsx_vldi(0x42F);
+ __m128i const_18 = __lsx_vldi(0x409);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32,
- src_rgba, 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, next_rgba, 0, next_rgba, 16, next_rgba, 32,
- next_rgba, 48, nex0, nex1, nex2, nex3);
+ DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, next_rgba, 0, next_rgba, 16, next_rgba, 32, next_rgba,
+ 48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickev_b(src1, src0);
tmp2 = __lsx_vpickod_b(src3, src2);
@@ -1174,20 +1185,20 @@ void ARGBToUVJRow_LSX(const uint8_t* src_argb,
__m128i src0, src1, src2, src3;
__m128i nex0, nex1, nex2, nex3;
__m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1, dst0;
+ __m128i reg0, reg1, dst0;
__m128i tmpb, tmpg, tmpr, nexb, nexg, nexr;
- __m128i const_63 = __lsx_vldi(0x43F);
- __m128i const_42 = __lsx_vldi(0x42A);
- __m128i const_21 = __lsx_vldi(0x415);
- __m128i const_53 = __lsx_vldi(0x435);
- __m128i const_10 = __lsx_vldi(0x40A);
+ __m128i const_63 = __lsx_vldi(0x43F);
+ __m128i const_42 = __lsx_vldi(0x42A);
+ __m128i const_21 = __lsx_vldi(0x415);
+ __m128i const_53 = __lsx_vldi(0x435);
+ __m128i const_10 = __lsx_vldi(0x40A);
__m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
- src_argb, 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, next_argb, 0, next_argb, 16, next_argb, 32,
- next_argb, 48, nex0, nex1, nex2, nex3);
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, next_argb, 0, next_argb, 16, next_argb, 32, next_argb,
+ 48, nex0, nex1, nex2, nex3);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
tmp2 = __lsx_vpickev_b(src3, src2);
@@ -1240,26 +1251,26 @@ void I444ToARGBRow_LSX(const uint8_t* src_y,
__m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb, vec_ugvg;
__m128i const_80 = __lsx_vldi(0x480);
__m128i alpha = __lsx_vldi(0xFF);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
for (x = 0; x < len; x++) {
- vec_y = __lsx_vld(src_y, 0);
- vec_u = __lsx_vld(src_u, 0);
- vec_v = __lsx_vld(src_v, 0);
+ vec_y = __lsx_vld(src_y, 0);
+ vec_u = __lsx_vld(src_u, 0);
+ vec_v = __lsx_vld(src_v, 0);
vec_yl = __lsx_vilvl_b(vec_y, vec_y);
vec_ul = __lsx_vilvl_b(zero, vec_u);
vec_vl = __lsx_vilvl_b(zero, vec_v);
- I444TORGB(vec_yl, vec_ul, vec_vl, vec_ub, vec_vr, vec_ugvg,
- vec_yg, vec_yb, out_b, out_g, out_r);
+ I444TORGB(vec_yl, vec_ul, vec_vl, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb,
+ out_b, out_g, out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
vec_yh = __lsx_vilvh_b(vec_y, vec_y);
vec_uh = __lsx_vilvh_b(zero, vec_u);
vec_vh = __lsx_vilvh_b(zero, vec_v);
- I444TORGB(vec_yh, vec_uh, vec_vh, vec_ub, vec_vr, vec_ugvg,
- vec_yg, vec_yb, out_b, out_g, out_r);
+ I444TORGB(vec_yh, vec_uh, vec_vh, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb,
+ out_b, out_g, out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_y += 16;
src_u += 16;
@@ -1283,37 +1294,37 @@ void I400ToARGBRow_LSX(const uint8_t* src_y,
for (x = 0; x < len; x++) {
vec_y = __lsx_vld(src_y, 0);
vec_yl = __lsx_vilvl_b(vec_y, vec_y);
- y_ev = __lsx_vmulwev_w_hu_h(vec_yl, vec_yg);
- y_od = __lsx_vmulwod_w_hu_h(vec_yl, vec_yg);
- y_ev = __lsx_vsrai_w(y_ev, 16);
- y_od = __lsx_vsrai_w(y_od, 16);
- y_ev = __lsx_vadd_w(y_ev, vec_yb);
- y_od = __lsx_vadd_w(y_od, vec_yb);
- y_ev = __lsx_vsrai_w(y_ev, 6);
- y_od = __lsx_vsrai_w(y_od, 6);
- y_ev = __lsx_vclip255_w(y_ev);
- y_od = __lsx_vclip255_w(y_od);
- out0 = __lsx_vpackev_h(y_od, y_ev);
+ y_ev = __lsx_vmulwev_w_hu_h(vec_yl, vec_yg);
+ y_od = __lsx_vmulwod_w_hu_h(vec_yl, vec_yg);
+ y_ev = __lsx_vsrai_w(y_ev, 16);
+ y_od = __lsx_vsrai_w(y_od, 16);
+ y_ev = __lsx_vadd_w(y_ev, vec_yb);
+ y_od = __lsx_vadd_w(y_od, vec_yb);
+ y_ev = __lsx_vsrai_w(y_ev, 6);
+ y_od = __lsx_vsrai_w(y_od, 6);
+ y_ev = __lsx_vclip255_w(y_ev);
+ y_od = __lsx_vclip255_w(y_od);
+ out0 = __lsx_vpackev_h(y_od, y_ev);
temp0 = __lsx_vpackev_b(out0, out0);
temp1 = __lsx_vpackev_b(alpha, out0);
- dst0 = __lsx_vilvl_h(temp1, temp0);
- dst1 = __lsx_vilvh_h(temp1, temp0);
+ dst0 = __lsx_vilvl_h(temp1, temp0);
+ dst1 = __lsx_vilvh_h(temp1, temp0);
vec_yh = __lsx_vilvh_b(vec_y, vec_y);
- y_ev = __lsx_vmulwev_w_hu_h(vec_yh, vec_yg);
- y_od = __lsx_vmulwod_w_hu_h(vec_yh, vec_yg);
- y_ev = __lsx_vsrai_w(y_ev, 16);
- y_od = __lsx_vsrai_w(y_od, 16);
- y_ev = __lsx_vadd_w(y_ev, vec_yb);
- y_od = __lsx_vadd_w(y_od, vec_yb);
- y_ev = __lsx_vsrai_w(y_ev, 6);
- y_od = __lsx_vsrai_w(y_od, 6);
- y_ev = __lsx_vclip255_w(y_ev);
- y_od = __lsx_vclip255_w(y_od);
- out0 = __lsx_vpackev_h(y_od, y_ev);
+ y_ev = __lsx_vmulwev_w_hu_h(vec_yh, vec_yg);
+ y_od = __lsx_vmulwod_w_hu_h(vec_yh, vec_yg);
+ y_ev = __lsx_vsrai_w(y_ev, 16);
+ y_od = __lsx_vsrai_w(y_od, 16);
+ y_ev = __lsx_vadd_w(y_ev, vec_yb);
+ y_od = __lsx_vadd_w(y_od, vec_yb);
+ y_ev = __lsx_vsrai_w(y_ev, 6);
+ y_od = __lsx_vsrai_w(y_od, 6);
+ y_ev = __lsx_vclip255_w(y_ev);
+ y_od = __lsx_vclip255_w(y_od);
+ out0 = __lsx_vpackev_h(y_od, y_ev);
temp0 = __lsx_vpackev_b(out0, out0);
temp1 = __lsx_vpackev_b(alpha, out0);
- dst2 = __lsx_vilvl_h(temp1, temp0);
- dst3 = __lsx_vilvh_h(temp1, temp0);
+ dst2 = __lsx_vilvl_h(temp1, temp0);
+ dst3 = __lsx_vilvh_h(temp1, temp0);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
__lsx_vst(dst2, dst_argb, 32);
@@ -1360,7 +1371,7 @@ void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
__m128i alpha = __lsx_vldi(0xFF);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
@@ -1369,10 +1380,10 @@ void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2,
for (x = 0; x < len; x++) {
src0 = __lsx_vld(src_yuy2, 0);
- vec_y = __lsx_vpickev_b(src0, src0);
+ vec_y = __lsx_vpickev_b(src0, src0);
vec_vu = __lsx_vpickod_b(src0, src0);
- YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
- out_b, out_g, out_r);
+ YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
+ out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_yuy2 += 16;
}
@@ -1389,7 +1400,7 @@ void UYVYToARGBRow_LSX(const uint8_t* src_uyvy,
__m128i vec_vrub, vec_vgug;
__m128i out_b, out_g, out_r;
__m128i const_80 = __lsx_vldi(0x480);
- __m128i zero = __lsx_vldi(0);
+ __m128i zero = __lsx_vldi(0);
__m128i alpha = __lsx_vldi(0xFF);
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
@@ -1398,10 +1409,10 @@ void UYVYToARGBRow_LSX(const uint8_t* src_uyvy,
for (x = 0; x < len; x++) {
src0 = __lsx_vld(src_uyvy, 0);
- vec_y = __lsx_vpickod_b(src0, src0);
+ vec_y = __lsx_vpickod_b(src0, src0);
vec_vu = __lsx_vpickev_b(src0, src0);
- YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb,
- out_b, out_g, out_r);
+ YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g,
+ out_r);
STOREARGB(alpha, out_r, out_g, out_b, dst_argb);
src_uyvy += 16;
}
@@ -1535,8 +1546,8 @@ void ARGBExtractAlphaRow_LSX(const uint8_t* src_argb,
__m128i src0, src1, src2, src3, tmp0, tmp1, dst0;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32,
- src_argb, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
tmp0 = __lsx_vpickod_b(src1, src0);
tmp1 = __lsx_vpickod_b(src3, src2);
dst0 = __lsx_vpickod_b(tmp1, tmp0);
@@ -1562,22 +1573,22 @@ void ARGBBlendRow_LSX(const uint8_t* src_argb,
__m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16,
- src_argb1, 0, src_argb1, 16, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb1, 0, src_argb1, 16,
+ src0, src1, src2, src3);
tmp0 = __lsx_vshuf4i_b(src0, 0xFF);
tmp1 = __lsx_vshuf4i_b(src1, 0xFF);
- a0 = __lsx_vilvl_b(zero, tmp0);
- a1 = __lsx_vilvh_b(zero, tmp0);
- a2 = __lsx_vilvl_b(zero, tmp1);
- a3 = __lsx_vilvh_b(zero, tmp1);
+ a0 = __lsx_vilvl_b(zero, tmp0);
+ a1 = __lsx_vilvh_b(zero, tmp0);
+ a2 = __lsx_vilvl_b(zero, tmp1);
+ a3 = __lsx_vilvh_b(zero, tmp1);
reg0 = __lsx_vilvl_b(zero, src2);
reg1 = __lsx_vilvh_b(zero, src2);
reg2 = __lsx_vilvl_b(zero, src3);
reg3 = __lsx_vilvh_b(zero, src3);
DUP4_ARG2(__lsx_vsub_h, const_256, a0, const_256, a1, const_256, a2,
const_256, a3, a0, a1, a2, a3);
- DUP4_ARG2(__lsx_vmul_h, a0, reg0, a1, reg1, a2, reg2, a3, reg3,
- reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vmul_h, a0, reg0, a1, reg1, a2, reg2, a3, reg3, reg0, reg1,
+ reg2, reg3);
DUP2_ARG3(__lsx_vsrani_b_h, reg1, reg0, 8, reg3, reg2, 8, dst0, dst1);
dst0 = __lsx_vsadd_bu(dst0, src0);
dst1 = __lsx_vsadd_bu(dst1, src1);
@@ -1608,8 +1619,8 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
__m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32,
- dst_argb, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32, dst_argb, 48,
+ src0, src1, src2, src3);
reg0 = __lsx_vilvl_b(zero, src0);
reg1 = __lsx_vilvh_b(zero, src0);
reg2 = __lsx_vilvl_b(zero, src1);
@@ -1652,10 +1663,10 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
dst3 = __lsx_vpickev_b(reg3, reg2);
DUP4_ARG2(__lsx_vmul_b, dst0, vec_size, dst1, vec_size, dst2, vec_size,
dst3, vec_size, dst0, dst1, dst2, dst3);
- DUP4_ARG2(__lsx_vadd_b, dst0, vec_offset, dst1, vec_offset, dst2, vec_offset,
- dst3, vec_offset, dst0, dst1, dst2, dst3);
- DUP4_ARG3(__lsx_vbitsel_v, dst0, src0, control, dst1, src1, control,
- dst2, src2, control, dst3, src3, control, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vadd_b, dst0, vec_offset, dst1, vec_offset, dst2,
+ vec_offset, dst3, vec_offset, dst0, dst1, dst2, dst3);
+ DUP4_ARG3(__lsx_vbitsel_v, dst0, src0, control, dst1, src1, control, dst2,
+ src2, control, dst3, src3, control, dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
__lsx_vst(dst2, dst_argb, 32);
@@ -1684,22 +1695,24 @@ void ARGBColorMatrixRow_LSX(const uint8_t* src_argb,
src0, matrix_a, tmp_b, tmp_g, tmp_r, tmp_a);
DUP4_ARG2(__lsx_vdp2_h_bu_b, src1, matrix_b, src1, matrix_g, src1, matrix_r,
src1, matrix_a, reg_b, reg_g, reg_r, reg_a);
- DUP4_ARG2(__lsx_vhaddw_w_h, tmp_b, tmp_b, tmp_g, tmp_g, tmp_r, tmp_r,
- tmp_a, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
- DUP4_ARG2(__lsx_vhaddw_w_h, reg_b, reg_b, reg_g, reg_g, reg_r, reg_r,
- reg_a, reg_a, reg_b, reg_g, reg_r, reg_a);
- DUP4_ARG2(__lsx_vsrai_w, tmp_b, 6, tmp_g, 6, tmp_r, 6,
- tmp_a, 6, tmp_b, tmp_g, tmp_r, tmp_a);
- DUP4_ARG2(__lsx_vsrai_w, reg_b, 6, reg_g, 6, reg_r, 6,
- reg_a, 6, reg_b, reg_g, reg_r, reg_a);
- DUP4_ARG1(__lsx_vclip255_w, tmp_b, tmp_g, tmp_r, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a)
- DUP4_ARG1(__lsx_vclip255_w, reg_b, reg_g, reg_r, reg_a, reg_b, reg_g, reg_r, reg_a)
- DUP4_ARG2(__lsx_vpickev_h, reg_b, tmp_b, reg_g, tmp_g, reg_r, tmp_r,
- reg_a, tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
- tmp0 = __lsx_vpackev_b(tmp_g, tmp_b);
- tmp1 = __lsx_vpackev_b(tmp_a, tmp_r);
- dst0 = __lsx_vilvl_h(tmp1, tmp0);
- dst1 = __lsx_vilvh_h(tmp1, tmp0);
+ DUP4_ARG2(__lsx_vhaddw_w_h, tmp_b, tmp_b, tmp_g, tmp_g, tmp_r, tmp_r, tmp_a,
+ tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
+ DUP4_ARG2(__lsx_vhaddw_w_h, reg_b, reg_b, reg_g, reg_g, reg_r, reg_r, reg_a,
+ reg_a, reg_b, reg_g, reg_r, reg_a);
+ DUP4_ARG2(__lsx_vsrai_w, tmp_b, 6, tmp_g, 6, tmp_r, 6, tmp_a, 6, tmp_b,
+ tmp_g, tmp_r, tmp_a);
+ DUP4_ARG2(__lsx_vsrai_w, reg_b, 6, reg_g, 6, reg_r, 6, reg_a, 6, reg_b,
+ reg_g, reg_r, reg_a);
+ DUP4_ARG1(__lsx_vclip255_w, tmp_b, tmp_g, tmp_r, tmp_a, tmp_b, tmp_g, tmp_r,
+ tmp_a)
+ DUP4_ARG1(__lsx_vclip255_w, reg_b, reg_g, reg_r, reg_a, reg_b, reg_g, reg_r,
+ reg_a)
+ DUP4_ARG2(__lsx_vpickev_h, reg_b, tmp_b, reg_g, tmp_g, reg_r, tmp_r, reg_a,
+ tmp_a, tmp_b, tmp_g, tmp_r, tmp_a);
+ tmp0 = __lsx_vpackev_b(tmp_g, tmp_b);
+ tmp1 = __lsx_vpackev_b(tmp_a, tmp_r);
+ dst0 = __lsx_vilvl_h(tmp1, tmp0);
+ dst1 = __lsx_vilvh_h(tmp1, tmp0);
__lsx_vst(dst0, dst_argb, 0);
__lsx_vst(dst1, dst_argb, 16);
src_argb += 32;
@@ -1717,8 +1730,8 @@ void SplitUVRow_LSX(const uint8_t* src_uv,
__m128i dst0, dst1, dst2, dst3;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32,
- src_uv, 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src0,
+ src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, dst0, dst1);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst2, dst3);
__lsx_vst(dst0, dst_u, 0);
@@ -1756,10 +1769,10 @@ void MirrorSplitUVRow_LSX(const uint8_t* src_uv,
src_uv += (width << 1);
for (x = 0; x < len; x++) {
src_uv -= 64;
- DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32,
- src_uv, 48, src2, src3, src0, src1);
- DUP4_ARG3(__lsx_vshuf_b, src1, src0, shuff1, src3, src2, shuff1,
- src1, src0, shuff0, src3, src2, shuff0, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src2,
+ src3, src0, src1);
+ DUP4_ARG3(__lsx_vshuf_b, src1, src0, shuff1, src3, src2, shuff1, src1, src0,
+ shuff0, src3, src2, shuff0, dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst_v, 0);
__lsx_vst(dst1, dst_v, 16);
__lsx_vst(dst2, dst_u, 0);
@@ -1778,18 +1791,21 @@ void HalfFloatRow_LSX(const uint16_t* src,
float mult = 1.9259299444e-34f * scale;
__m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- __m128 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
+ __m128 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
__m128 vec_mult = (__m128)__lsx_vldrepl_w(&mult, 0);
__m128i zero = __lsx_vldi(0);
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vilvl_h, zero, src0, zero, src1, zero, src2,
- zero, src3, tmp0, tmp2, tmp4, tmp6);
- DUP4_ARG2(__lsx_vilvh_h, zero, src0, zero, src1, zero, src2,
- zero, src3, tmp1, tmp3, tmp5, tmp7);
- DUP4_ARG1(__lsx_vffint_s_wu, tmp0, tmp2, tmp4, tmp6, reg0, reg2, reg4, reg6);
- DUP4_ARG1(__lsx_vffint_s_wu, tmp1, tmp3, tmp5, tmp7, reg1, reg3, reg5, reg7);
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src1, src2,
+ src3);
+ DUP4_ARG2(__lsx_vilvl_h, zero, src0, zero, src1, zero, src2, zero, src3,
+ tmp0, tmp2, tmp4, tmp6);
+ DUP4_ARG2(__lsx_vilvh_h, zero, src0, zero, src1, zero, src2, zero, src3,
+ tmp1, tmp3, tmp5, tmp7);
+ DUP4_ARG1(__lsx_vffint_s_wu, tmp0, tmp2, tmp4, tmp6, reg0, reg2, reg4,
+ reg6);
+ DUP4_ARG1(__lsx_vffint_s_wu, tmp1, tmp3, tmp5, tmp7, reg1, reg3, reg5,
+ reg7);
DUP4_ARG2(__lsx_vfmul_s, reg0, vec_mult, reg1, vec_mult, reg2, vec_mult,
reg3, vec_mult, reg0, reg1, reg2, reg3);
DUP4_ARG2(__lsx_vfmul_s, reg4, vec_mult, reg5, vec_mult, reg6, vec_mult,
@@ -1798,8 +1814,8 @@ void HalfFloatRow_LSX(const uint16_t* src,
(v4u32)reg3, 13, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrli_w, (v4u32)reg4, 13, (v4u32)reg5, 13, (v4u32)reg6, 13,
(v4u32)reg7, 13, tmp4, tmp5, tmp6, tmp7);
- DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
- tmp7, tmp6, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
+ dst0, dst1, dst2, dst3);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
__lsx_vst(dst2, dst, 32);
diff --git a/source/row_neon64.cc b/source/row_neon64.cc
index 92af20d2..1d1f9bb1 100644
--- a/source/row_neon64.cc
+++ b/source/row_neon64.cc
@@ -616,13 +616,13 @@ void DetileRow_NEON(const uint8_t* src,
"1: \n"
"ld1 {v0.16b}, [%0], %3 \n" // load 16 bytes
"subs %w2, %w2, #16 \n" // 16 processed per loop
- "prfm pldl1keep, [%0, 448] \n"
+ "prfm pldl1keep, [%0, 1792] \n" // 7 tiles of 256b ahead
"st1 {v0.16b}, [%1], #16 \n" // store 16 bytes
"b.gt 1b \n"
- : "+r"(src), // %0
- "+r"(dst), // %1
- "+r"(width) // %2
- : "r"(src_tile_stride) // %3
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ : "r"(src_tile_stride) // %3
: "cc", "memory", "v0" // Clobber List
);
}
diff --git a/source/scale_argb.cc b/source/scale_argb.cc
index 320a8442..3238df05 100644
--- a/source/scale_argb.cc
+++ b/source/scale_argb.cc
@@ -631,7 +631,8 @@ static void ScaleYUVToARGBBilinearUp(int src_width,
}
#endif
#if defined(HAS_I422TOARGBROW_AVX512BW)
- if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
+ if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) ==
+ (kCpuHasAVX512BW | kCpuHasAVX512VL)) {
I422ToARGBRow = I422ToARGBRow_Any_AVX512BW;
if (IS_ALIGNED(src_width, 32)) {
I422ToARGBRow = I422ToARGBRow_AVX512BW;
diff --git a/source/scale_lsx.cc b/source/scale_lsx.cc
index d8181b3e..bfe5e9fb 100644
--- a/source/scale_lsx.cc
+++ b/source/scale_lsx.cc
@@ -22,15 +22,15 @@ namespace libyuv {
extern "C" {
#endif
-#define LOAD_DATA(_src, _in, _out) \
- { \
- int _tmp1, _tmp2, _tmp3, _tmp4; \
- DUP4_ARG2(__lsx_vpickve2gr_w, _in, 0, _in, 1, _in, 2, \
- _in, 3, _tmp1, _tmp2, _tmp3, _tmp4); \
- _out = __lsx_vinsgr2vr_w(_out, _src[_tmp1], 0); \
- _out = __lsx_vinsgr2vr_w(_out, _src[_tmp2], 1); \
- _out = __lsx_vinsgr2vr_w(_out, _src[_tmp3], 2); \
- _out = __lsx_vinsgr2vr_w(_out, _src[_tmp4], 3); \
+#define LOAD_DATA(_src, _in, _out) \
+ { \
+ int _tmp1, _tmp2, _tmp3, _tmp4; \
+ DUP4_ARG2(__lsx_vpickve2gr_w, _in, 0, _in, 1, _in, 2, _in, 3, _tmp1, \
+ _tmp2, _tmp3, _tmp4); \
+ _out = __lsx_vinsgr2vr_w(_out, _src[_tmp1], 0); \
+ _out = __lsx_vinsgr2vr_w(_out, _src[_tmp2], 1); \
+ _out = __lsx_vinsgr2vr_w(_out, _src[_tmp3], 2); \
+ _out = __lsx_vinsgr2vr_w(_out, _src[_tmp4], 3); \
}
void ScaleARGBRowDown2_LSX(const uint8_t* src_argb,
@@ -157,8 +157,8 @@ void ScaleARGBRowDownEvenBox_LSX(const uint8_t* src_argb,
next_argb += stepx;
tmp7 = __lsx_vldrepl_d(next_argb, 0);
next_argb += stepx;
- DUP4_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
- tmp7, tmp6, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6,
+ src0, src1, src2, src3);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpackev_w, tmp1, tmp0, tmp3, tmp2, reg0, reg1);
@@ -181,8 +181,8 @@ void ScaleRowDown2_LSX(const uint8_t* src_ptr,
(void)src_stride;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
- 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
+ src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst0, dst1);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
@@ -201,9 +201,9 @@ void ScaleRowDown2Linear_LSX(const uint8_t* src_ptr,
__m128i tmp0, tmp1, tmp2, tmp3, dst0, dst1;
(void)src_stride;
- for(x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
- 48, src0, src1, src2, src3);
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
+ src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2);
DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3);
DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp1, tmp2, tmp3, dst0, dst1);
@@ -220,20 +220,20 @@ void ScaleRowDown2Box_LSX(const uint8_t* src_ptr,
int dst_width) {
int x;
int len = dst_width / 32;
- const uint8_t *src_nex = src_ptr + src_stride;
+ const uint8_t* src_nex = src_ptr + src_stride;
__m128i src0, src1, src2, src3, src4, src5, src6, src7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
__m128i dst0, dst1;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
- 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex,
- 48, src4, src5, src6, src7);
- DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp0, tmp2, tmp4, tmp6);
- DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp1, tmp3, tmp5, tmp7);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex, 48,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp0, tmp2, tmp4, tmp6);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
tmp0, tmp1, tmp2, tmp3);
DUP2_ARG3(__lsx_vsrarni_b_h, tmp1, tmp0, 2, tmp3, tmp2, 2, dst0, dst1);
@@ -255,8 +255,8 @@ void ScaleRowDown4_LSX(const uint8_t* src_ptr,
(void)src_stride;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
- 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
+ src0, src1, src2, src3);
DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp1);
dst0 = __lsx_vpickod_b(tmp1, tmp0);
__lsx_vst(dst0, dst, 0);
@@ -279,30 +279,30 @@ void ScaleRowDown4Box_LSX(const uint8_t* src_ptr,
__m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, dst0;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr,
- 48, src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, ptr1, 0, ptr1, 16, ptr1, 32, ptr1, 48,
- src4, src5, src6, src7);
- DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp0, tmp2, tmp4, tmp6);
- DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp1, tmp3, tmp5, tmp7);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, ptr1, 0, ptr1, 16, ptr1, 32, ptr1, 48, src4, src5,
+ src6, src7);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp0, tmp2, tmp4, tmp6);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
reg0, reg1, reg2, reg3);
- DUP4_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, ptr2, 32, ptr2, 48,
- src0, src1, src2, src3);
- DUP4_ARG2(__lsx_vld, ptr3, 0, ptr3, 16, ptr3, 32, ptr3, 48,
- src4, src5, src6, src7);
- DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp0, tmp2, tmp4, tmp6);
- DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, src7,
- tmp1, tmp3, tmp5, tmp7);
+ DUP4_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, ptr2, 32, ptr2, 48, src0, src1,
+ src2, src3);
+ DUP4_ARG2(__lsx_vld, ptr3, 0, ptr3, 16, ptr3, 32, ptr3, 48, src4, src5,
+ src6, src7);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp0, tmp2, tmp4, tmp6);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3,
+ src7, tmp1, tmp3, tmp5, tmp7);
DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
reg4, reg5, reg6, reg7);
DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
reg0, reg1, reg2, reg3);
- DUP4_ARG2(__lsx_vhaddw_wu_hu, reg0, reg0, reg1, reg1, reg2, reg2, reg3, reg3,
- reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vhaddw_wu_hu, reg0, reg0, reg1, reg1, reg2, reg2, reg3,
+ reg3, reg0, reg1, reg2, reg3);
DUP2_ARG3(__lsx_vsrarni_h_w, reg1, reg0, 4, reg3, reg2, 4, tmp0, tmp1);
dst0 = __lsx_vpickev_b(tmp1, tmp0);
__lsx_vst(dst0, dst, 0);
@@ -353,8 +353,8 @@ void ScaleRowDown38_2_Box_LSX(const uint8_t* src_ptr,
len = dst_width / 12;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_nex, 0, src_nex,
- 16, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_nex, 0, src_nex, 16, src0,
+ src1, src2, src3);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1);
@@ -394,15 +394,15 @@ void ScaleRowDown38_3_Box_LSX(const uint8_t* src_ptr,
len = dst_width / 12;
for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, ptr1, 0, ptr1, 16,
- src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, ptr1, 0, ptr1, 16, src0, src1,
+ src2, src3);
DUP2_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, src4, src5);
DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2);
DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3);
DUP2_ARG2(__lsx_vpackev_b, zero, src4, zero, src5, tmp4, tmp6);
DUP2_ARG2(__lsx_vpackod_b, zero, src4, zero, src5, tmp5, tmp7);
- DUP4_ARG2(__lsx_vadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3,
- tmp7, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
+ tmp0, tmp1, tmp2, tmp3);
DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1);
DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, reg2, reg3);
tmp4 = __lsx_vpickev_w(reg3, reg2);
@@ -476,28 +476,28 @@ void ScaleFilterCols_LSX(uint8_t* dst_ptr,
tmp3 = __lsx_vsrai_w(vec_x, 16);
tmp7 = __lsx_vand_v(vec_x, const1);
vec_x = __lsx_vadd_w(vec_x, vec1);
- DUP4_ARG2(__lsx_vsrai_w, tmp4, 9, tmp5, 9, tmp6, 9, tmp7, 9,
- tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG2(__lsx_vsrai_w, tmp4, 9, tmp5, 9, tmp6, 9, tmp7, 9, tmp4, tmp5,
+ tmp6, tmp7);
LOAD_DATA(src_ptr, tmp0, reg0);
LOAD_DATA(src_ptr, tmp1, reg1);
LOAD_DATA(src_ptr, tmp2, reg2);
LOAD_DATA(src_ptr, tmp3, reg3);
- DUP4_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp2, 1, tmp3, 1,
- tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp2, 1, tmp3, 1, tmp0, tmp1,
+ tmp2, tmp3);
LOAD_DATA(src_ptr, tmp0, reg4);
LOAD_DATA(src_ptr, tmp1, reg5);
LOAD_DATA(src_ptr, tmp2, reg6);
LOAD_DATA(src_ptr, tmp3, reg7);
- DUP4_ARG2(__lsx_vsub_w, reg4, reg0, reg5, reg1, reg6, reg2, reg7,
- reg3, reg4, reg5, reg6, reg7);
- DUP4_ARG2(__lsx_vmul_w, reg4, tmp4, reg5, tmp5, reg6, tmp6, reg7,
- tmp7, reg4, reg5, reg6, reg7);
- DUP4_ARG2(__lsx_vadd_w, reg4, const2, reg5, const2, reg6, const2,
- reg7, const2, reg4, reg5, reg6, reg7);
- DUP4_ARG2(__lsx_vsrai_w, reg4, 7, reg5, 7, reg6, 7, reg7, 7,
+ DUP4_ARG2(__lsx_vsub_w, reg4, reg0, reg5, reg1, reg6, reg2, reg7, reg3,
+ reg4, reg5, reg6, reg7);
+ DUP4_ARG2(__lsx_vmul_w, reg4, tmp4, reg5, tmp5, reg6, tmp6, reg7, tmp7,
reg4, reg5, reg6, reg7);
- DUP4_ARG2(__lsx_vadd_w, reg0, reg4, reg1, reg5, reg2, reg6, reg3,
- reg7, reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vadd_w, reg4, const2, reg5, const2, reg6, const2, reg7,
+ const2, reg4, reg5, reg6, reg7);
+ DUP4_ARG2(__lsx_vsrai_w, reg4, 7, reg5, 7, reg6, 7, reg7, 7, reg4, reg5,
+ reg6, reg7);
+ DUP4_ARG2(__lsx_vadd_w, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
+ reg0, reg1, reg2, reg3);
DUP2_ARG2(__lsx_vpickev_h, reg1, reg0, reg3, reg2, tmp0, tmp1);
dst0 = __lsx_vpickev_b(tmp1, tmp0);
__lsx_vst(dst0, dst_ptr, 0);
@@ -598,7 +598,8 @@ void ScaleRowDown34_LSX(const uint8_t* src_ptr,
for (x = 0; x < dst_width; x += 48) {
DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48,
src0, src1, src2, src3);
- DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0, src2, src1, shuff1, dst0, dst1);
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0, src2, src1, shuff1, dst0,
+ dst1);
dst2 = __lsx_vshuf_b(src3, src2, shuff2);
__lsx_vst(dst0, dst, 0);
__lsx_vst(dst1, dst, 16);
@@ -644,16 +645,16 @@ void ScaleRowDown34_0_Box_LSX(const uint8_t* src_ptr,
const0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7,
const1, src4, src5, src6, src7);
- DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1,
- tmp11, const2, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11,
+ const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3,
shift0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7,
shift1, src4, src5, src6, src7);
DUP4_ARG2(__lsx_vsrar_h, tmp0, shift2, tmp1, shift0, tmp2, shift1, tmp3,
shift2, tmp0, tmp1, tmp2, tmp3);
- DUP4_ARG2(__lsx_vslli_h, src0, 1, src1, 1, src2, 1, src3, 1,
- tmp5, tmp6, tmp7, tmp8);
+ DUP4_ARG2(__lsx_vslli_h, src0, 1, src1, 1, src2, 1, src3, 1, tmp5, tmp6,
+ tmp7, tmp8);
DUP2_ARG2(__lsx_vslli_h, src4, 1, src5, 1, tmp9, tmp10);
DUP4_ARG2(__lsx_vadd_h, src0, tmp5, src1, tmp6, src2, tmp7, src3, tmp8,
src0, src1, src2, src3);
@@ -708,8 +709,8 @@ void ScaleRowDown34_1_Box_LSX(const uint8_t* src_ptr,
const0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7,
const1, src4, src5, src6, src7);
- DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1,
- tmp11, const2, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11,
+ const2, tmp0, tmp1, tmp2, tmp3);
DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3,
shift0, src0, src1, src2, src3);
DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7,
diff --git a/unit_test/cpu_test.cc b/unit_test/cpu_test.cc
index 41961124..080778f5 100644
--- a/unit_test/cpu_test.cc
+++ b/unit_test/cpu_test.cc
@@ -257,7 +257,8 @@ TEST_F(LibYUVBaseTest, TestLinuxMipsMsa) {
EXPECT_EQ(0, MipsCpuCaps("../../unit_test/testdata/mips.txt"));
EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_msa.txt"));
- EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_loongson2k.txt"));
+ EXPECT_EQ(kCpuHasMSA,
+ MipsCpuCaps("../../unit_test/testdata/mips_loongson2k.txt"));
} else {
printf("WARNING: unable to load \"../../unit_test/testdata/mips.txt\"\n");
}
diff --git a/unit_test/planar_test.cc b/unit_test/planar_test.cc
index efd725c1..bdbdb6a4 100644..100755
--- a/unit_test/planar_test.cc
+++ b/unit_test/planar_test.cc
@@ -1484,6 +1484,45 @@ TEST_F(LibYUVPlanarTest, TestCopyPlane) {
EXPECT_EQ(0, err);
}
+TEST_F(LibYUVPlanarTest, TestDetilePlane) {
+ int i, j;
+
+ // orig is tiled. Allocate enough memory for tiles.
+ int orig_width = (benchmark_width_ + 15) & ~15;
+ int orig_height = (benchmark_height_ + 15) & ~15;
+ int orig_plane_size = orig_width * orig_height;
+ int y_plane_size = benchmark_width_ * benchmark_height_;
+ align_buffer_page_end(orig_y, orig_plane_size);
+ align_buffer_page_end(dst_c, y_plane_size);
+ align_buffer_page_end(dst_opt, y_plane_size);
+
+ MemRandomize(orig_y, orig_plane_size);
+ memset(dst_c, 0, y_plane_size);
+ memset(dst_opt, 0, y_plane_size);
+
+ // Disable all optimizations.
+ MaskCpuFlags(disable_cpu_flags_);
+ for (j = 0; j < benchmark_iterations_; j++) {
+ DetilePlane(orig_y, orig_width, dst_c, benchmark_width_,
+ benchmark_width_, benchmark_height_, 16);
+ }
+
+ // Enable optimizations.
+ MaskCpuFlags(benchmark_cpu_info_);
+ for (j = 0; j < benchmark_iterations_; j++) {
+ DetilePlane(orig_y, orig_width, dst_opt, benchmark_width_,
+ benchmark_width_, benchmark_height_, 16);
+ }
+
+ for (i = 0; i < y_plane_size; ++i) {
+ EXPECT_EQ(dst_c[i], dst_opt[i]);
+ }
+
+ free_aligned_buffer_page_end(orig_y);
+ free_aligned_buffer_page_end(dst_c);
+ free_aligned_buffer_page_end(dst_opt);
+}
+
static int TestMultiply(int width,
int height,
int benchmark_iterations,