aboutsummaryrefslogtreecommitdiff
path: root/files/source/row_lsx.cc
diff options
context:
space:
mode:
Diffstat (limited to 'files/source/row_lsx.cc')
-rw-r--r--files/source/row_lsx.cc1514
1 files changed, 1336 insertions, 178 deletions
diff --git a/files/source/row_lsx.cc b/files/source/row_lsx.cc
index 3e8b901a..e626072a 100644
--- a/files/source/row_lsx.cc
+++ b/files/source/row_lsx.cc
@@ -31,6 +31,91 @@ extern "C" {
yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \
}
+// Load 32 YUV422 pixel data
+#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \
+ { \
+ __m128i temp0, temp1; \
+ \
+ DUP2_ARG2(__lsx_vld, psrc_y, 0, psrc_u, 0, out_y, temp0); \
+ temp1 = __lsx_vld(psrc_v, 0); \
+ temp0 = __lsx_vsub_b(temp0, const_80); \
+ temp1 = __lsx_vsub_b(temp1, const_80); \
+ temp0 = __lsx_vsllwil_h_b(temp0, 0); \
+ temp1 = __lsx_vsllwil_h_b(temp1, 0); \
+ uv_l = __lsx_vilvl_h(temp0, temp1); \
+ uv_h = __lsx_vilvh_h(temp0, temp1); \
+ }
+
+// Load 16 YUV422 pixel data
+#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \
+ { \
+ __m128i temp0, temp1; \
+ \
+ out_y = __lsx_vld(psrc_y, 0); \
+ temp0 = __lsx_vldrepl_d(psrc_u, 0); \
+ temp1 = __lsx_vldrepl_d(psrc_v, 0); \
+ uv = __lsx_vilvl_b(temp0, temp1); \
+ uv = __lsx_vsub_b(uv, const_80); \
+ uv = __lsx_vsllwil_h_b(uv, 0); \
+ }
+
+// Convert 16 pixels of YUV420 to RGB.
+#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, yg, yb, b_l, b_h, g_l, \
+ g_h, r_l, r_h) \
+ { \
+ __m128i u_l, u_h, v_l, v_h; \
+ __m128i yl_ev, yl_od, yh_ev, yh_od; \
+ __m128i temp0, temp1, temp2, temp3; \
+ \
+ temp0 = __lsx_vilvl_b(in_y, in_y); \
+ temp1 = __lsx_vilvh_b(in_y, in_y); \
+ yl_ev = __lsx_vmulwev_w_hu_h(temp0, yg); \
+ yl_od = __lsx_vmulwod_w_hu_h(temp0, yg); \
+ yh_ev = __lsx_vmulwev_w_hu_h(temp1, yg); \
+ yh_od = __lsx_vmulwod_w_hu_h(temp1, yg); \
+ DUP4_ARG2(__lsx_vsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \
+ yl_ev, yl_od, yh_ev, yh_od); \
+ yl_ev = __lsx_vadd_w(yl_ev, yb); \
+ yl_od = __lsx_vadd_w(yl_od, yb); \
+ yh_ev = __lsx_vadd_w(yh_ev, yb); \
+ yh_od = __lsx_vadd_w(yh_od, yb); \
+ v_l = __lsx_vmulwev_w_h(in_uvl, ubvr); \
+ u_l = __lsx_vmulwod_w_h(in_uvl, ubvr); \
+ v_h = __lsx_vmulwev_w_h(in_uvh, ubvr); \
+ u_h = __lsx_vmulwod_w_h(in_uvh, ubvr); \
+ temp0 = __lsx_vadd_w(yl_ev, u_l); \
+ temp1 = __lsx_vadd_w(yl_od, u_l); \
+ temp2 = __lsx_vadd_w(yh_ev, u_h); \
+ temp3 = __lsx_vadd_w(yh_od, u_h); \
+ DUP4_ARG2(__lsx_vsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lsx_vclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ b_l = __lsx_vpackev_h(temp1, temp0); \
+ b_h = __lsx_vpackev_h(temp3, temp2); \
+ temp0 = __lsx_vadd_w(yl_ev, v_l); \
+ temp1 = __lsx_vadd_w(yl_od, v_l); \
+ temp2 = __lsx_vadd_w(yh_ev, v_h); \
+ temp3 = __lsx_vadd_w(yh_od, v_h); \
+ DUP4_ARG2(__lsx_vsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lsx_vclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ r_l = __lsx_vpackev_h(temp1, temp0); \
+ r_h = __lsx_vpackev_h(temp3, temp2); \
+ DUP2_ARG2(__lsx_vdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \
+ temp0 = __lsx_vsub_w(yl_ev, u_l); \
+ temp1 = __lsx_vsub_w(yl_od, u_l); \
+ temp2 = __lsx_vsub_w(yh_ev, u_h); \
+ temp3 = __lsx_vsub_w(yh_od, u_h); \
+ DUP4_ARG2(__lsx_vsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \
+ temp1, temp2, temp3); \
+ DUP4_ARG1(__lsx_vclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \
+ temp2, temp3); \
+ g_l = __lsx_vpackev_h(temp1, temp0); \
+ g_h = __lsx_vpackev_h(temp3, temp2); \
+ }
+
// Convert 8 pixels of YUV420 to RGB.
#define YUVTORGB(in_y, in_vu, vrub, vgug, yg, yb, out_b, out_g, out_r) \
{ \
@@ -118,6 +203,25 @@ extern "C" {
out_g = __lsx_vpackev_h(tmp1, tmp0); \
}
+// Pack and Store 16 ARGB values.
+#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, pdst_argb) \
+ { \
+ __m128i temp0, temp1, temp2, temp3; \
+ temp0 = __lsx_vpackev_b(g_l, b_l); \
+ temp1 = __lsx_vpackev_b(a_l, r_l); \
+ temp2 = __lsx_vpackev_b(g_h, b_h); \
+ temp3 = __lsx_vpackev_b(a_h, r_h); \
+ r_l = __lsx_vilvl_h(temp1, temp0); \
+ r_h = __lsx_vilvh_h(temp1, temp0); \
+ g_l = __lsx_vilvl_h(temp3, temp2); \
+ g_h = __lsx_vilvh_h(temp3, temp2); \
+ __lsx_vst(r_l, pdst_argb, 0); \
+ __lsx_vst(r_h, pdst_argb, 16); \
+ __lsx_vst(g_l, pdst_argb, 32); \
+ __lsx_vst(g_h, pdst_argb, 48); \
+ pdst_argb += 64; \
+ }
+
// Pack and Store 8 ARGB values.
#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \
{ \
@@ -155,6 +259,1028 @@ extern "C" {
_dst0 = __lsx_vpickod_b(_reg1, _reg0); \
}
+void MirrorRow_LSX(const uint8_t* src, uint8_t* dst, int width) {
+ int x;
+ int len = width / 32;
+ __m128i src0, src1;
+ __m128i shuffler = {0x08090A0B0C0D0E0F, 0x0001020304050607};
+ src += width - 32;
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
+ src1);
+ __lsx_vst(src1, dst, 0);
+ __lsx_vst(src0, dst, 16);
+ dst += 32;
+ src -= 32;
+ }
+}
+
+void MirrorUVRow_LSX(const uint8_t* src_uv, uint8_t* dst_uv, int width) {
+ int x;
+ int len = width / 8;
+ __m128i src, dst;
+ __m128i shuffler = {0x0004000500060007, 0x0000000100020003};
+
+ src_uv += (width - 8) << 1;
+ for (x = 0; x < len; x++) {
+ src = __lsx_vld(src_uv, 0);
+ dst = __lsx_vshuf_h(shuffler, src, src);
+ __lsx_vst(dst, dst_uv, 0);
+ src_uv -= 16;
+ dst_uv += 16;
+ }
+}
+
+void ARGBMirrorRow_LSX(const uint8_t* src, uint8_t* dst, int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1;
+ __m128i shuffler = {0x0B0A09080F0E0D0C, 0x0302010007060504};
+
+ src += (width * 4) - 32;
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
+ DUP2_ARG3(__lsx_vshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0,
+ src1);
+ __lsx_vst(src1, dst, 0);
+ __lsx_vst(src0, dst, 16);
+ dst += 32;
+ src -= 32;
+ }
+}
+
+void I422ToYUY2Row_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_yuy2,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i src_u0, src_v0, src_y0, vec_uv0;
+ __m128i vec_yuy2_0, vec_yuy2_1;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_u, 0, src_v, 0, src_u0, src_v0);
+ src_y0 = __lsx_vld(src_y, 0);
+ vec_uv0 = __lsx_vilvl_b(src_v0, src_u0);
+ vec_yuy2_0 = __lsx_vilvl_b(vec_uv0, src_y0);
+ vec_yuy2_1 = __lsx_vilvh_b(vec_uv0, src_y0);
+ __lsx_vst(vec_yuy2_0, dst_yuy2, 0);
+ __lsx_vst(vec_yuy2_1, dst_yuy2, 16);
+ src_u += 8;
+ src_v += 8;
+ src_y += 16;
+ dst_yuy2 += 32;
+ }
+}
+
+void I422ToUYVYRow_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_uyvy,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i src_u0, src_v0, src_y0, vec_uv0;
+ __m128i vec_uyvy0, vec_uyvy1;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_u, 0, src_v, 0, src_u0, src_v0);
+ src_y0 = __lsx_vld(src_y, 0);
+ vec_uv0 = __lsx_vilvl_b(src_v0, src_u0);
+ vec_uyvy0 = __lsx_vilvl_b(src_y0, vec_uv0);
+ vec_uyvy1 = __lsx_vilvh_b(src_y0, vec_uv0);
+ __lsx_vst(vec_uyvy0, dst_uyvy, 0);
+ __lsx_vst(vec_uyvy1, dst_uyvy, 16);
+ src_u += 8;
+ src_v += 8;
+ src_y += 16;
+ dst_uyvy += 32;
+ }
+}
+
+void I422ToARGBRow_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_ug, vec_vr, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i alpha = __lsx_vldi(0xFF);
+ __m128i const_80 = __lsx_vldi(0x80);
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ STOREARGB_D(alpha, alpha, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+void I422ToRGBARow_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i alpha = __lsx_vldi(0xFF);
+ __m128i const_80 = __lsx_vldi(0x80);
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ STOREARGB_D(r_l, r_h, g_l, g_h, b_l, b_h, alpha, alpha, dst_argb);
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+void I422AlphaToARGBRow_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ const uint8_t* src_a,
+ uint8_t* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ int res = width & 15;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i zero = __lsx_vldi(0);
+ __m128i const_80 = __lsx_vldi(0x80);
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h, a_l, a_h;
+
+ y = __lsx_vld(src_a, 0);
+ a_l = __lsx_vilvl_b(zero, y);
+ a_h = __lsx_vilvh_b(zero, y);
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb);
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ src_a += 16;
+ }
+ if (res) {
+ __m128i y, uv, r, g, b, a;
+ a = __lsx_vld(src_a, 0);
+ a = __lsx_vsllwil_hu_bu(a, 0);
+ READYUV422(src_y, src_u, src_v, y, uv);
+ YUVTORGB(y, uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b, g, r);
+ STOREARGB(a, r, g, b, dst_argb);
+ }
+}
+
+void I422ToRGB24Row_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int32_t width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i const_80 = __lsx_vldi(0x80);
+ __m128i shuffler0 = {0x0504120302100100, 0x0A18090816070614};
+ __m128i shuffler1 = {0x1E0F0E1C0D0C1A0B, 0x1E0F0E1C0D0C1A0B};
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+ __m128i temp0, temp1, temp2, temp3;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ temp0 = __lsx_vpackev_b(g_l, b_l);
+ temp1 = __lsx_vpackev_b(g_h, b_h);
+ DUP4_ARG3(__lsx_vshuf_b, r_l, temp0, shuffler1, r_h, temp1, shuffler1, r_l,
+ temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0,
+ temp1);
+
+ b_l = __lsx_vilvl_d(temp1, temp2);
+ b_h = __lsx_vilvh_d(temp3, temp1);
+ __lsx_vst(temp0, dst_argb, 0);
+ __lsx_vst(b_l, dst_argb, 16);
+ __lsx_vst(b_h, dst_argb, 32);
+ dst_argb += 48;
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+// TODO(fbarchard): Consider AND instead of shift to isolate 5 upper bits of R.
+void I422ToRGB565Row_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_rgb565,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i const_80 = __lsx_vldi(0x80);
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ b_l = __lsx_vsrli_h(b_l, 3);
+ b_h = __lsx_vsrli_h(b_h, 3);
+ g_l = __lsx_vsrli_h(g_l, 2);
+ g_h = __lsx_vsrli_h(g_h, 2);
+ r_l = __lsx_vsrli_h(r_l, 3);
+ r_h = __lsx_vsrli_h(r_h, 3);
+ r_l = __lsx_vslli_h(r_l, 11);
+ r_h = __lsx_vslli_h(r_h, 11);
+ g_l = __lsx_vslli_h(g_l, 5);
+ g_h = __lsx_vslli_h(g_h, 5);
+ r_l = __lsx_vor_v(r_l, g_l);
+ r_l = __lsx_vor_v(r_l, b_l);
+ r_h = __lsx_vor_v(r_h, g_h);
+ r_h = __lsx_vor_v(r_h, b_h);
+ __lsx_vst(r_l, dst_rgb565, 0);
+ __lsx_vst(r_h, dst_rgb565, 16);
+ dst_rgb565 += 32;
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+// TODO(fbarchard): Consider AND instead of shift to isolate 4 upper bits of G.
+void I422ToARGB4444Row_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_argb4444,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i const_80 = __lsx_vldi(0x80);
+ __m128i alpha = {0xF000F000F000F000, 0xF000F000F000F000};
+ __m128i mask = {0x00F000F000F000F0, 0x00F000F000F000F0};
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ b_l = __lsx_vsrli_h(b_l, 4);
+ b_h = __lsx_vsrli_h(b_h, 4);
+ r_l = __lsx_vsrli_h(r_l, 4);
+ r_h = __lsx_vsrli_h(r_h, 4);
+ g_l = __lsx_vand_v(g_l, mask);
+ g_h = __lsx_vand_v(g_h, mask);
+ r_l = __lsx_vslli_h(r_l, 8);
+ r_h = __lsx_vslli_h(r_h, 8);
+ r_l = __lsx_vor_v(r_l, alpha);
+ r_h = __lsx_vor_v(r_h, alpha);
+ r_l = __lsx_vor_v(r_l, g_l);
+ r_h = __lsx_vor_v(r_h, g_h);
+ r_l = __lsx_vor_v(r_l, b_l);
+ r_h = __lsx_vor_v(r_h, b_h);
+ __lsx_vst(r_l, dst_argb4444, 0);
+ __lsx_vst(r_h, dst_argb4444, 16);
+ dst_argb4444 += 32;
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+void I422ToARGB1555Row_LSX(const uint8_t* src_y,
+ const uint8_t* src_u,
+ const uint8_t* src_v,
+ uint8_t* dst_argb1555,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
+ __m128i vec_ubvr, vec_ugvg;
+ __m128i const_80 = __lsx_vldi(0x80);
+ __m128i alpha = {0x8000800080008000, 0x8000800080008000};
+
+ YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
+ vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
+ vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg);
+
+ for (x = 0; x < len; x++) {
+ __m128i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h;
+
+ READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h);
+ YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l,
+ g_h, r_l, r_h);
+ b_l = __lsx_vsrli_h(b_l, 3);
+ b_h = __lsx_vsrli_h(b_h, 3);
+ g_l = __lsx_vsrli_h(g_l, 3);
+
+ g_h = __lsx_vsrli_h(g_h, 3);
+ g_l = __lsx_vslli_h(g_l, 5);
+ g_h = __lsx_vslli_h(g_h, 5);
+ r_l = __lsx_vsrli_h(r_l, 3);
+ r_h = __lsx_vsrli_h(r_h, 3);
+ r_l = __lsx_vslli_h(r_l, 10);
+ r_h = __lsx_vslli_h(r_h, 10);
+ r_l = __lsx_vor_v(r_l, alpha);
+ r_h = __lsx_vor_v(r_h, alpha);
+ r_l = __lsx_vor_v(r_l, g_l);
+ r_h = __lsx_vor_v(r_h, g_h);
+ r_l = __lsx_vor_v(r_l, b_l);
+ r_h = __lsx_vor_v(r_h, b_h);
+ __lsx_vst(r_l, dst_argb1555, 0);
+ __lsx_vst(r_h, dst_argb1555, 16);
+ dst_argb1555 += 32;
+ src_y += 16;
+ src_u += 8;
+ src_v += 8;
+ }
+}
+
+void YUY2ToYRow_LSX(const uint8_t* src_yuy2, uint8_t* dst_y, int width) {
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, dst0;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_yuy2, 0, src_yuy2, 16, src0, src1);
+ dst0 = __lsx_vpickev_b(src1, src0);
+ __lsx_vst(dst0, dst_y, 0);
+ src_yuy2 += 32;
+ dst_y += 16;
+ }
+}
+
+void YUY2ToUVRow_LSX(const uint8_t* src_yuy2,
+ int src_stride_yuy2,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int width) {
+ const uint8_t* src_yuy2_next = src_yuy2 + src_stride_yuy2;
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, src2, src3;
+ __m128i tmp0, dst0, dst1;
+
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_yuy2, 0, src_yuy2, 16, src_yuy2_next, 0,
+ src_yuy2_next, 16, src0, src1, src2, src3);
+ src0 = __lsx_vpickod_b(src1, src0);
+ src1 = __lsx_vpickod_b(src3, src2);
+ tmp0 = __lsx_vavgr_bu(src1, src0);
+ dst0 = __lsx_vpickev_b(tmp0, tmp0);
+ dst1 = __lsx_vpickod_b(tmp0, tmp0);
+ __lsx_vstelm_d(dst0, dst_u, 0, 0);
+ __lsx_vstelm_d(dst1, dst_v, 0, 0);
+ src_yuy2 += 32;
+ src_yuy2_next += 32;
+ dst_u += 8;
+ dst_v += 8;
+ }
+}
+
+void YUY2ToUV422Row_LSX(const uint8_t* src_yuy2,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, tmp0, dst0, dst1;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_yuy2, 0, src_yuy2, 16, src0, src1);
+ tmp0 = __lsx_vpickod_b(src1, src0);
+ dst0 = __lsx_vpickev_b(tmp0, tmp0);
+ dst1 = __lsx_vpickod_b(tmp0, tmp0);
+ __lsx_vstelm_d(dst0, dst_u, 0, 0);
+ __lsx_vstelm_d(dst1, dst_v, 0, 0);
+ src_yuy2 += 32;
+ dst_u += 8;
+ dst_v += 8;
+ }
+}
+
+void UYVYToYRow_LSX(const uint8_t* src_uyvy, uint8_t* dst_y, int width) {
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, dst0;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_uyvy, 0, src_uyvy, 16, src0, src1);
+ dst0 = __lsx_vpickod_b(src1, src0);
+ __lsx_vst(dst0, dst_y, 0);
+ src_uyvy += 32;
+ dst_y += 16;
+ }
+}
+
+void UYVYToUVRow_LSX(const uint8_t* src_uyvy,
+ int src_stride_uyvy,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int width) {
+ const uint8_t* src_uyvy_next = src_uyvy + src_stride_uyvy;
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, src2, src3, tmp0, dst0, dst1;
+
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_uyvy, 0, src_uyvy, 16, src_uyvy_next, 0,
+ src_uyvy_next, 16, src0, src1, src2, src3);
+ src0 = __lsx_vpickev_b(src1, src0);
+ src1 = __lsx_vpickev_b(src3, src2);
+ tmp0 = __lsx_vavgr_bu(src1, src0);
+ dst0 = __lsx_vpickev_b(tmp0, tmp0);
+ dst1 = __lsx_vpickod_b(tmp0, tmp0);
+ __lsx_vstelm_d(dst0, dst_u, 0, 0);
+ __lsx_vstelm_d(dst1, dst_v, 0, 0);
+ src_uyvy += 32;
+ src_uyvy_next += 32;
+ dst_u += 8;
+ dst_v += 8;
+ }
+}
+
+void UYVYToUV422Row_LSX(const uint8_t* src_uyvy,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int width) {
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, tmp0, dst0, dst1;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_uyvy, 0, src_uyvy, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ dst0 = __lsx_vpickev_b(tmp0, tmp0);
+ dst1 = __lsx_vpickod_b(tmp0, tmp0);
+ __lsx_vstelm_d(dst0, dst_u, 0, 0);
+ __lsx_vstelm_d(dst1, dst_v, 0, 0);
+ src_uyvy += 32;
+ dst_u += 8;
+ dst_v += 8;
+ }
+}
+
+void ARGBToUVRow_LSX(const uint8_t* src_argb0,
+ int src_stride_argb,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int width) {
+ int x;
+ int len = width / 16;
+ const uint8_t* src_argb1 = src_argb0 + src_stride_argb;
+
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i vec0, vec1, vec2, vec3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, dst0, dst1;
+ __m128i const_0x70 = {0x0038003800380038, 0x0038003800380038};
+ __m128i const_0x4A = {0x0025002500250025, 0x0025002500250025};
+ __m128i const_0x26 = {0x0013001300130013, 0x0013001300130013};
+ __m128i const_0x5E = {0x002f002f002f002f, 0x002f002f002f002f};
+ __m128i const_0x12 = {0x0009000900090009, 0x0009000900090009};
+ __m128i const_0x8080 = {0x8080808080808080, 0x8080808080808080};
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_argb0, 0, src_argb0, 16, src_argb0, 32, src_argb0,
+ 48, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vld, src_argb1, 0, src_argb1, 16, src_argb1, 32, src_argb1,
+ 48, src4, src5, src6, src7);
+ vec0 = __lsx_vaddwev_h_bu(src0, src4);
+ vec1 = __lsx_vaddwev_h_bu(src1, src5);
+ vec2 = __lsx_vaddwev_h_bu(src2, src6);
+ vec3 = __lsx_vaddwev_h_bu(src3, src7);
+ tmp0 = __lsx_vpickev_h(vec1, vec0);
+ tmp1 = __lsx_vpickev_h(vec3, vec2);
+ tmp2 = __lsx_vpickod_h(vec1, vec0);
+ tmp3 = __lsx_vpickod_h(vec3, vec2);
+ vec0 = __lsx_vaddwod_h_bu(src0, src4);
+ vec1 = __lsx_vaddwod_h_bu(src1, src5);
+ vec2 = __lsx_vaddwod_h_bu(src2, src6);
+ vec3 = __lsx_vaddwod_h_bu(src3, src7);
+ tmp4 = __lsx_vpickev_h(vec1, vec0);
+ tmp5 = __lsx_vpickev_h(vec3, vec2);
+ vec0 = __lsx_vpickev_h(tmp1, tmp0);
+ vec1 = __lsx_vpickod_h(tmp1, tmp0);
+ src0 = __lsx_vavgr_h(vec0, vec1);
+ vec0 = __lsx_vpickev_h(tmp3, tmp2);
+ vec1 = __lsx_vpickod_h(tmp3, tmp2);
+ src1 = __lsx_vavgr_h(vec0, vec1);
+ vec0 = __lsx_vpickev_h(tmp5, tmp4);
+ vec1 = __lsx_vpickod_h(tmp5, tmp4);
+ src2 = __lsx_vavgr_h(vec0, vec1);
+ dst0 = __lsx_vmadd_h(const_0x8080, src0, const_0x70);
+ dst0 = __lsx_vmsub_h(dst0, src2, const_0x4A);
+ dst0 = __lsx_vmsub_h(dst0, src1, const_0x26);
+ dst1 = __lsx_vmadd_h(const_0x8080, src1, const_0x70);
+ dst1 = __lsx_vmsub_h(dst1, src2, const_0x5E);
+ dst1 = __lsx_vmsub_h(dst1, src0, const_0x12);
+ dst0 = __lsx_vsrai_h(dst0, 8);
+ dst1 = __lsx_vsrai_h(dst1, 8);
+ dst0 = __lsx_vpickev_b(dst1, dst0);
+ __lsx_vstelm_d(dst0, dst_u, 0, 0);
+ __lsx_vstelm_d(dst0, dst_v, 0, 1);
+ src_argb0 += 64;
+ src_argb1 += 64;
+ dst_u += 8;
+ dst_v += 8;
+ }
+}
+
+void ARGBToRGB24Row_LSX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
+ int x;
+ int len = (width / 16) - 1;
+ __m128i src0, src1, src2, src3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i shuf = {0x0908060504020100, 0x000000000E0D0C0A};
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vshuf_b(src0, src0, shuf);
+ tmp1 = __lsx_vshuf_b(src1, src1, shuf);
+ tmp2 = __lsx_vshuf_b(src2, src2, shuf);
+ tmp3 = __lsx_vshuf_b(src3, src3, shuf);
+ __lsx_vst(tmp0, dst_rgb, 0);
+ __lsx_vst(tmp1, dst_rgb, 12);
+ __lsx_vst(tmp2, dst_rgb, 24);
+ __lsx_vst(tmp3, dst_rgb, 36);
+ dst_rgb += 48;
+ src_argb += 64;
+ }
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vshuf_b(src0, src0, shuf);
+ tmp1 = __lsx_vshuf_b(src1, src1, shuf);
+ tmp2 = __lsx_vshuf_b(src2, src2, shuf);
+ tmp3 = __lsx_vshuf_b(src3, src3, shuf);
+ __lsx_vst(tmp0, dst_rgb, 0);
+ __lsx_vst(tmp1, dst_rgb, 12);
+ __lsx_vst(tmp2, dst_rgb, 24);
+ dst_rgb += 36;
+ __lsx_vst(tmp3, dst_rgb, 0);
+}
+
+void ARGBToRAWRow_LSX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
+ int x;
+ int len = (width / 16) - 1;
+ __m128i src0, src1, src2, src3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i shuf = {0x090A040506000102, 0x000000000C0D0E08};
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vshuf_b(src0, src0, shuf);
+ tmp1 = __lsx_vshuf_b(src1, src1, shuf);
+ tmp2 = __lsx_vshuf_b(src2, src2, shuf);
+ tmp3 = __lsx_vshuf_b(src3, src3, shuf);
+ __lsx_vst(tmp0, dst_rgb, 0);
+ __lsx_vst(tmp1, dst_rgb, 12);
+ __lsx_vst(tmp2, dst_rgb, 24);
+ __lsx_vst(tmp3, dst_rgb, 36);
+ dst_rgb += 48;
+ src_argb += 64;
+ }
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vshuf_b(src0, src0, shuf);
+ tmp1 = __lsx_vshuf_b(src1, src1, shuf);
+ tmp2 = __lsx_vshuf_b(src2, src2, shuf);
+ tmp3 = __lsx_vshuf_b(src3, src3, shuf);
+ __lsx_vst(tmp0, dst_rgb, 0);
+ __lsx_vst(tmp1, dst_rgb, 12);
+ __lsx_vst(tmp2, dst_rgb, 24);
+ dst_rgb += 36;
+ __lsx_vst(tmp3, dst_rgb, 0);
+}
+
+void ARGBToRGB565Row_LSX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) {
+ int x;
+ int len = width / 8;
+ __m128i zero = __lsx_vldi(0);
+ __m128i src0, src1, tmp0, tmp1, dst0;
+ __m128i shift = {0x0300030003000300, 0x0300030003000300};
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ tmp0 = __lsx_vsrli_b(tmp0, 3);
+ tmp1 = __lsx_vpackev_b(zero, tmp1);
+ tmp1 = __lsx_vsrli_h(tmp1, 2);
+ tmp0 = __lsx_vsll_b(tmp0, shift);
+ tmp1 = __lsx_vslli_h(tmp1, 5);
+ dst0 = __lsx_vor_v(tmp0, tmp1);
+ __lsx_vst(dst0, dst_rgb, 0);
+ dst_rgb += 16;
+ src_argb += 32;
+ }
+}
+
+void ARGBToARGB1555Row_LSX(const uint8_t* src_argb,
+ uint8_t* dst_rgb,
+ int width) {
+ int x;
+ int len = width / 8;
+ __m128i zero = __lsx_vldi(0);
+ __m128i src0, src1, tmp0, tmp1, tmp2, tmp3, dst0;
+ __m128i shift1 = {0x0703070307030703, 0x0703070307030703};
+ __m128i shift2 = {0x0200020002000200, 0x0200020002000200};
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ tmp0 = __lsx_vsrli_b(tmp0, 3);
+ tmp1 = __lsx_vsrl_b(tmp1, shift1);
+ tmp0 = __lsx_vsll_b(tmp0, shift2);
+ tmp2 = __lsx_vpackev_b(zero, tmp1);
+ tmp3 = __lsx_vpackod_b(zero, tmp1);
+ tmp2 = __lsx_vslli_h(tmp2, 5);
+ tmp3 = __lsx_vslli_h(tmp3, 15);
+ dst0 = __lsx_vor_v(tmp0, tmp2);
+ dst0 = __lsx_vor_v(dst0, tmp3);
+ __lsx_vst(dst0, dst_rgb, 0);
+ dst_rgb += 16;
+ src_argb += 32;
+ }
+}
+
+void ARGBToARGB4444Row_LSX(const uint8_t* src_argb,
+ uint8_t* dst_rgb,
+ int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, tmp0, tmp1, dst0;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ tmp1 = __lsx_vandi_b(tmp1, 0xF0);
+ tmp0 = __lsx_vsrli_b(tmp0, 4);
+ dst0 = __lsx_vor_v(tmp1, tmp0);
+ __lsx_vst(dst0, dst_rgb, 0);
+ dst_rgb += 16;
+ src_argb += 32;
+ }
+}
+
+void ARGBToUV444Row_LSX(const uint8_t* src_argb,
+ uint8_t* dst_u,
+ uint8_t* dst_v,
+ int32_t width) {
+ int x;
+ int len = width / 16;
+ __m128i src0, src1, src2, src3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i reg0, reg1, reg2, reg3, dst0, dst1;
+ __m128i const_112 = __lsx_vldi(112);
+ __m128i const_74 = __lsx_vldi(74);
+ __m128i const_38 = __lsx_vldi(38);
+ __m128i const_94 = __lsx_vldi(94);
+ __m128i const_18 = __lsx_vldi(18);
+ __m128i const_0x8080 = {0x8080808080808080, 0x8080808080808080};
+ for (x = 0; x < len; x++) {
+ DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vpickev_h(src1, src0);
+ tmp1 = __lsx_vpickod_h(src1, src0);
+ tmp2 = __lsx_vpickev_h(src3, src2);
+ tmp3 = __lsx_vpickod_h(src3, src2);
+ reg0 = __lsx_vmaddwev_h_bu(const_0x8080, tmp0, const_112);
+ reg1 = __lsx_vmaddwev_h_bu(const_0x8080, tmp2, const_112);
+ reg2 = __lsx_vmulwod_h_bu(tmp0, const_74);
+ reg3 = __lsx_vmulwod_h_bu(tmp2, const_74);
+ reg2 = __lsx_vmaddwev_h_bu(reg2, tmp1, const_38);
+ reg3 = __lsx_vmaddwev_h_bu(reg3, tmp3, const_38);
+ reg0 = __lsx_vsub_h(reg0, reg2);
+ reg1 = __lsx_vsub_h(reg1, reg3);
+ reg0 = __lsx_vsrai_h(reg0, 8);
+ reg1 = __lsx_vsrai_h(reg1, 8);
+ dst0 = __lsx_vpickev_b(reg1, reg0);
+
+ reg0 = __lsx_vmaddwev_h_bu(const_0x8080, tmp1, const_112);
+ reg1 = __lsx_vmaddwev_h_bu(const_0x8080, tmp3, const_112);
+ reg2 = __lsx_vmulwev_h_bu(tmp0, const_18);
+ reg3 = __lsx_vmulwev_h_bu(tmp2, const_18);
+ reg2 = __lsx_vmaddwod_h_bu(reg2, tmp0, const_94);
+ reg3 = __lsx_vmaddwod_h_bu(reg3, tmp2, const_94);
+ reg0 = __lsx_vsub_h(reg0, reg2);
+ reg1 = __lsx_vsub_h(reg1, reg3);
+ reg0 = __lsx_vsrai_h(reg0, 8);
+ reg1 = __lsx_vsrai_h(reg1, 8);
+ dst1 = __lsx_vpickev_b(reg1, reg0);
+
+ __lsx_vst(dst0, dst_u, 0);
+ __lsx_vst(dst1, dst_v, 0);
+ dst_u += 16;
+ dst_v += 16;
+ src_argb += 64;
+ }
+}
+
+void ARGBMultiplyRow_LSX(const uint8_t* src_argb0,
+ const uint8_t* src_argb1,
+ uint8_t* dst_argb,
+ int width) {
+ int x;
+ int len = width / 4;
+ __m128i zero = __lsx_vldi(0);
+ __m128i src0, src1, dst0, dst1;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb0, 0, src_argb1, 0, src0, src1);
+ tmp0 = __lsx_vilvl_b(src0, src0);
+ tmp1 = __lsx_vilvh_b(src0, src0);
+ tmp2 = __lsx_vilvl_b(zero, src1);
+ tmp3 = __lsx_vilvh_b(zero, src1);
+ dst0 = __lsx_vmuh_hu(tmp0, tmp2);
+ dst1 = __lsx_vmuh_hu(tmp1, tmp3);
+ dst0 = __lsx_vpickev_b(dst1, dst0);
+ __lsx_vst(dst0, dst_argb, 0);
+ src_argb0 += 16;
+ src_argb1 += 16;
+ dst_argb += 16;
+ }
+}
+
+void ARGBAddRow_LSX(const uint8_t* src_argb0,
+ const uint8_t* src_argb1,
+ uint8_t* dst_argb,
+ int width) {
+ int x;
+ int len = width / 4;
+ __m128i src0, src1, dst0;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb0, 0, src_argb1, 0, src0, src1);
+ dst0 = __lsx_vsadd_bu(src0, src1);
+ __lsx_vst(dst0, dst_argb, 0);
+ src_argb0 += 16;
+ src_argb1 += 16;
+ dst_argb += 16;
+ }
+}
+
+void ARGBSubtractRow_LSX(const uint8_t* src_argb0,
+ const uint8_t* src_argb1,
+ uint8_t* dst_argb,
+ int width) {
+ int x;
+ int len = width / 4;
+ __m128i src0, src1, dst0;
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb0, 0, src_argb1, 0, src0, src1);
+ dst0 = __lsx_vssub_bu(src0, src1);
+ __lsx_vst(dst0, dst_argb, 0);
+ src_argb0 += 16;
+ src_argb1 += 16;
+ dst_argb += 16;
+ }
+}
+
+void ARGBAttenuateRow_LSX(const uint8_t* src_argb,
+ uint8_t* dst_argb,
+ int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, tmp0, tmp1;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i b, g, r, a, dst0, dst1;
+ __m128i control = {0x0005000100040000, 0x0007000300060002};
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ b = __lsx_vpackev_b(tmp0, tmp0);
+ r = __lsx_vpackod_b(tmp0, tmp0);
+ g = __lsx_vpackev_b(tmp1, tmp1);
+ a = __lsx_vpackod_b(tmp1, tmp1);
+ reg0 = __lsx_vmulwev_w_hu(b, a);
+ reg1 = __lsx_vmulwod_w_hu(b, a);
+ reg2 = __lsx_vmulwev_w_hu(r, a);
+ reg3 = __lsx_vmulwod_w_hu(r, a);
+ reg4 = __lsx_vmulwev_w_hu(g, a);
+ reg5 = __lsx_vmulwod_w_hu(g, a);
+ reg0 = __lsx_vssrani_h_w(reg1, reg0, 24);
+ reg2 = __lsx_vssrani_h_w(reg3, reg2, 24);
+ reg4 = __lsx_vssrani_h_w(reg5, reg4, 24);
+ reg0 = __lsx_vshuf_h(control, reg0, reg0);
+ reg2 = __lsx_vshuf_h(control, reg2, reg2);
+ reg4 = __lsx_vshuf_h(control, reg4, reg4);
+ tmp0 = __lsx_vpackev_b(reg4, reg0);
+ tmp1 = __lsx_vpackev_b(a, reg2);
+ dst0 = __lsx_vilvl_h(tmp1, tmp0);
+ dst1 = __lsx_vilvh_h(tmp1, tmp0);
+ __lsx_vst(dst0, dst_argb, 0);
+ __lsx_vst(dst1, dst_argb, 16);
+ dst_argb += 32;
+ src_argb += 32;
+ }
+}
+
+void ARGBToRGB565DitherRow_LSX(const uint8_t* src_argb,
+ uint8_t* dst_rgb,
+ uint32_t dither4,
+ int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, tmp0, tmp1, dst0;
+ __m128i b, g, r;
+ __m128i zero = __lsx_vldi(0);
+ __m128i vec_dither = __lsx_vldrepl_w(&dither4, 0);
+
+ vec_dither = __lsx_vilvl_b(zero, vec_dither);
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ b = __lsx_vpackev_b(zero, tmp0);
+ r = __lsx_vpackod_b(zero, tmp0);
+ g = __lsx_vpackev_b(zero, tmp1);
+ b = __lsx_vadd_h(b, vec_dither);
+ g = __lsx_vadd_h(g, vec_dither);
+ r = __lsx_vadd_h(r, vec_dither);
+ DUP2_ARG1(__lsx_vclip255_h, b, g, b, g);
+ r = __lsx_vclip255_h(r);
+ b = __lsx_vsrai_h(b, 3);
+ g = __lsx_vsrai_h(g, 2);
+ r = __lsx_vsrai_h(r, 3);
+ g = __lsx_vslli_h(g, 5);
+ r = __lsx_vslli_h(r, 11);
+ dst0 = __lsx_vor_v(b, g);
+ dst0 = __lsx_vor_v(dst0, r);
+ __lsx_vst(dst0, dst_rgb, 0);
+ src_argb += 32;
+ dst_rgb += 16;
+ }
+}
+
+void ARGBShuffleRow_LSX(const uint8_t* src_argb,
+ uint8_t* dst_argb,
+ const uint8_t* shuffler,
+ int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, dst0, dst1;
+ __m128i shuf = {0x0404040400000000, 0x0C0C0C0C08080808};
+ __m128i temp = __lsx_vldrepl_w(shuffler, 0);
+
+ shuf = __lsx_vadd_b(shuf, temp);
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ dst0 = __lsx_vshuf_b(src0, src0, shuf);
+ dst1 = __lsx_vshuf_b(src1, src1, shuf);
+ __lsx_vst(dst0, dst_argb, 0);
+ __lsx_vst(dst1, dst_argb, 16);
+ src_argb += 32;
+ dst_argb += 32;
+ }
+}
+
+void ARGBShadeRow_LSX(const uint8_t* src_argb,
+ uint8_t* dst_argb,
+ int width,
+ uint32_t value) {
+ int x;
+ int len = width / 4;
+ __m128i src0, dst0, tmp0, tmp1;
+ __m128i vec_value = __lsx_vreplgr2vr_w(value);
+
+ vec_value = __lsx_vilvl_b(vec_value, vec_value);
+ for (x = 0; x < len; x++) {
+ src0 = __lsx_vld(src_argb, 0);
+ tmp0 = __lsx_vilvl_b(src0, src0);
+ tmp1 = __lsx_vilvh_b(src0, src0);
+ tmp0 = __lsx_vmuh_hu(tmp0, vec_value);
+ tmp1 = __lsx_vmuh_hu(tmp1, vec_value);
+ dst0 = __lsx_vpickod_b(tmp1, tmp0);
+ __lsx_vst(dst0, dst_argb, 0);
+ src_argb += 16;
+ dst_argb += 16;
+ }
+}
+
+void ARGBGrayRow_LSX(const uint8_t* src_argb, uint8_t* dst_argb, int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, tmp0, tmp1;
+ __m128i reg0, reg1, reg2, dst0, dst1;
+ __m128i const_128 = __lsx_vldi(0x480);
+ __m128i const_150 = __lsx_vldi(0x96);
+ __m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ reg0 = __lsx_vdp2_h_bu(tmp0, const_br);
+ reg1 = __lsx_vmaddwev_h_bu(const_128, tmp1, const_150);
+ reg2 = __lsx_vadd_h(reg0, reg1);
+ tmp0 = __lsx_vpackod_b(reg2, reg2);
+ tmp1 = __lsx_vpackod_b(tmp1, reg2);
+ dst0 = __lsx_vilvl_h(tmp1, tmp0);
+ dst1 = __lsx_vilvh_h(tmp1, tmp0);
+ __lsx_vst(dst0, dst_argb, 0);
+ __lsx_vst(dst1, dst_argb, 16);
+ src_argb += 32;
+ dst_argb += 32;
+ }
+}
+
+void ARGBSepiaRow_LSX(uint8_t* dst_argb, int width) {
+ int x;
+ int len = width / 8;
+ __m128i src0, src1, tmp0, tmp1;
+ __m128i reg0, reg1, spb, spg, spr;
+ __m128i dst0, dst1;
+ __m128i spb_g = __lsx_vldi(68);
+ __m128i spg_g = __lsx_vldi(88);
+ __m128i spr_g = __lsx_vldi(98);
+ __m128i spb_br = {0x2311231123112311, 0x2311231123112311};
+ __m128i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16};
+ __m128i spr_br = {0x3218321832183218, 0x3218321832183218};
+ __m128i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908};
+
+ for (x = 0; x < len; x++) {
+ DUP2_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, src0, src1);
+ tmp0 = __lsx_vpickev_b(src1, src0);
+ tmp1 = __lsx_vpickod_b(src1, src0);
+ DUP2_ARG2(__lsx_vdp2_h_bu, tmp0, spb_br, tmp0, spg_br, spb, spg);
+ spr = __lsx_vdp2_h_bu(tmp0, spr_br);
+ spb = __lsx_vmaddwev_h_bu(spb, tmp1, spb_g);
+ spg = __lsx_vmaddwev_h_bu(spg, tmp1, spg_g);
+ spr = __lsx_vmaddwev_h_bu(spr, tmp1, spr_g);
+ spb = __lsx_vsrli_h(spb, 7);
+ spg = __lsx_vsrli_h(spg, 7);
+ spr = __lsx_vsrli_h(spr, 7);
+ spg = __lsx_vsat_hu(spg, 7);
+ spr = __lsx_vsat_hu(spr, 7);
+ reg0 = __lsx_vpackev_b(spg, spb);
+ reg1 = __lsx_vshuf_b(tmp1, spr, shuff);
+ dst0 = __lsx_vilvl_h(reg1, reg0);
+ dst1 = __lsx_vilvh_h(reg1, reg0);
+ __lsx_vst(dst0, dst_argb, 0);
+ __lsx_vst(dst1, dst_argb, 16);
+ dst_argb += 32;
+ }
+}
+
void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444,
uint8_t* dst_argb,
int width) {
@@ -561,39 +1687,6 @@ void RGB565ToUVRow_LSX(const uint8_t* src_rgb565,
}
}
-void RGB24ToYRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1, dst0;
- __m128i const_129 = __lsx_vldi(129);
- __m128i const_br = {0x4219421942194219, 0x4219421942194219};
- __m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
- __m128i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C};
- __m128i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604};
- __m128i shuff2 = {0x000A000700040001, 0x001600130010000D};
- __m128i shuff3 = {0x0002001F001C0019, 0x000E000B00080005};
-
- for (x = 0; x < len; x++) {
- src0 = __lsx_vld(src_rgb24, 0);
- src1 = __lsx_vld(src_rgb24, 16);
- src2 = __lsx_vld(src_rgb24, 32);
- tmp0 = __lsx_vshuf_b(src1, src0, shuff0);
- tmp1 = __lsx_vshuf_b(src1, src2, shuff1);
- tmp2 = __lsx_vshuf_b(src1, src0, shuff2);
- tmp3 = __lsx_vshuf_b(src1, src2, shuff3);
- reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp2, const_129);
- reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp1);
- dst0 = __lsx_vpickod_b(reg1, reg0);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_rgb24 += 48;
- }
-}
-
void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_u,
@@ -647,39 +1740,6 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
}
}
-void RAWToYRow_LSX(const uint8_t* src_raw, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1, dst0;
- __m128i const_129 = __lsx_vldi(129);
- __m128i const_br = {0x1942194219421942, 0x1942194219421942};
- __m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
- __m128i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C};
- __m128i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604};
- __m128i shuff2 = {0x000A000700040001, 0x001600130010000D};
- __m128i shuff3 = {0x0002001F001C0019, 0x000E000B00080005};
-
- for (x = 0; x < len; x++) {
- src0 = __lsx_vld(src_raw, 0);
- src1 = __lsx_vld(src_raw, 16);
- src2 = __lsx_vld(src_raw, 32);
- tmp0 = __lsx_vshuf_b(src1, src0, shuff0);
- tmp1 = __lsx_vshuf_b(src1, src2, shuff1);
- tmp2 = __lsx_vshuf_b(src1, src0, shuff2);
- tmp3 = __lsx_vshuf_b(src1, src2, shuff3);
- reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp2, const_129);
- reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp1);
- dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_raw += 48;
- }
-}
-
void RAWToUVRow_LSX(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_u,
@@ -914,62 +1974,6 @@ void SobelXYRow_LSX(const uint8_t* src_sobelx,
}
}
-void ARGBToYJRow_LSX(const uint8_t* src_argb, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2, src3, dst0;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1;
- __m128i const_128 = __lsx_vldi(0x480);
- __m128i const_150 = __lsx_vldi(0x96);
- __m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D};
-
- for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
- src0, src1, src2, src3);
- tmp0 = __lsx_vpickev_b(src1, src0);
- tmp1 = __lsx_vpickod_b(src1, src0);
- tmp2 = __lsx_vpickev_b(src3, src2);
- tmp3 = __lsx_vpickod_b(src3, src2);
- reg0 = __lsx_vmaddwev_h_bu(const_128, tmp1, const_150);
- reg1 = __lsx_vmaddwev_h_bu(const_128, tmp3, const_150);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2);
- dst0 = __lsx_vpickod_b(reg1, reg0);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_argb += 64;
- }
-}
-
-void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2, src3, dst0;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1;
- __m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x1942194219421942, 0x1942194219421942};
- __m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
-
- for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
- src0, src1, src2, src3);
- tmp0 = __lsx_vpickod_b(src1, src0);
- tmp1 = __lsx_vpickev_b(src1, src0);
- tmp2 = __lsx_vpickod_b(src3, src2);
- tmp3 = __lsx_vpickev_b(src3, src2);
- reg0 = __lsx_vmaddwod_h_bu(const_1080, tmp1, const_129);
- reg1 = __lsx_vmaddwod_h_bu(const_1080, tmp3, const_129);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2);
- dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_bgra += 64;
- }
-}
-
void BGRAToUVRow_LSX(const uint8_t* src_bgra,
int src_stride_bgra,
uint8_t* dst_u,
@@ -1018,34 +2022,6 @@ void BGRAToUVRow_LSX(const uint8_t* src_bgra,
}
}
-void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2, src3, dst0;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1;
- __m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x1942194219421942, 0x1942194219421942};
- __m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
-
- for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
- src0, src1, src2, src3);
- tmp0 = __lsx_vpickev_b(src1, src0);
- tmp1 = __lsx_vpickod_b(src1, src0);
- tmp2 = __lsx_vpickev_b(src3, src2);
- tmp3 = __lsx_vpickod_b(src3, src2);
- reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp1, const_129);
- reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2);
- dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_abgr += 64;
- }
-}
-
void ABGRToUVRow_LSX(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_u,
@@ -1094,34 +2070,6 @@ void ABGRToUVRow_LSX(const uint8_t* src_abgr,
}
}
-void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width) {
- int x;
- int len = width / 16;
- __m128i src0, src1, src2, src3, dst0;
- __m128i tmp0, tmp1, tmp2, tmp3;
- __m128i reg0, reg1;
- __m128i const_129 = __lsx_vldi(0x81);
- __m128i const_br = {0x4219421942194219, 0x4219421942194219};
- __m128i const_1080 = {0x1080108010801080, 0x1080108010801080};
-
- for (x = 0; x < len; x++) {
- DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
- src0, src1, src2, src3);
- tmp0 = __lsx_vpickod_b(src1, src0);
- tmp1 = __lsx_vpickev_b(src1, src0);
- tmp2 = __lsx_vpickod_b(src3, src2);
- tmp3 = __lsx_vpickev_b(src3, src2);
- reg0 = __lsx_vmaddwod_h_bu(const_1080, tmp1, const_129);
- reg1 = __lsx_vmaddwod_h_bu(const_1080, tmp3, const_129);
- reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0);
- reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2);
- dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8);
- __lsx_vst(dst0, dst_y, 0);
- dst_y += 16;
- src_rgba += 64;
- }
-}
-
void RGBAToUVRow_LSX(const uint8_t* src_rgba,
int src_stride_rgba,
uint8_t* dst_u,
@@ -1821,6 +2769,216 @@ void HalfFloatRow_LSX(const uint16_t* src,
}
}
+struct RgbConstants {
+ uint8_t kRGBToY[4];
+ uint16_t kAddY;
+ uint16_t pad;
+};
+
+// RGB to JPeg coefficients
+// B * 0.1140 coefficient = 29
+// G * 0.5870 coefficient = 150
+// R * 0.2990 coefficient = 77
+// Add 0.5 = 0x80
+static const struct RgbConstants kRgb24JPEGConstants = {{29, 150, 77, 0},
+ 128,
+ 0};
+
+static const struct RgbConstants kRawJPEGConstants = {{77, 150, 29, 0}, 128, 0};
+
+// RGB to BT.601 coefficients
+// B * 0.1016 coefficient = 25
+// G * 0.5078 coefficient = 129
+// R * 0.2578 coefficient = 66
+// Add 16.5 = 0x1080
+
+static const struct RgbConstants kRgb24I601Constants = {{25, 129, 66, 0},
+ 0x1080,
+ 0};
+
+static const struct RgbConstants kRawI601Constants = {{66, 129, 25, 0},
+ 0x1080,
+ 0};
+
+// ARGB expects first 3 values to contain RGB and 4th value is ignored.
+static void ARGBToYMatrixRow_LSX(const uint8_t* src_argb,
+ uint8_t* dst_y,
+ int width,
+ const struct RgbConstants* rgbconstants) {
+ asm volatile(
+ "vldrepl.b $vr0, %3, 0 \n\t" // load rgbconstants
+ "vldrepl.b $vr1, %3, 1 \n\t" // load rgbconstants
+ "vldrepl.b $vr2, %3, 2 \n\t" // load rgbconstants
+ "vldrepl.h $vr3, %3, 4 \n\t" // load rgbconstants
+ "1: \n\t"
+ "vld $vr4, %0, 0 \n\t"
+ "vld $vr5, %0, 16 \n\t"
+ "vld $vr6, %0, 32 \n\t"
+ "vld $vr7, %0, 48 \n\t" // load 16 pixels of
+ // ARGB
+ "vor.v $vr12, $vr3, $vr3 \n\t"
+ "vor.v $vr13, $vr3, $vr3 \n\t"
+ "addi.d %2, %2, -16 \n\t" // 16 processed per
+ // loop.
+ "vpickev.b $vr8, $vr5, $vr4 \n\t" // BR
+ "vpickev.b $vr10, $vr7, $vr6 \n\t"
+ "vpickod.b $vr9, $vr5, $vr4 \n\t" // GA
+ "vpickod.b $vr11, $vr7, $vr6 \n\t"
+ "vmaddwev.h.bu $vr12, $vr8, $vr0 \n\t" // B
+ "vmaddwev.h.bu $vr13, $vr10, $vr0 \n\t"
+ "vmaddwev.h.bu $vr12, $vr9, $vr1 \n\t" // G
+ "vmaddwev.h.bu $vr13, $vr11, $vr1 \n\t"
+ "vmaddwod.h.bu $vr12, $vr8, $vr2 \n\t" // R
+ "vmaddwod.h.bu $vr13, $vr10, $vr2 \n\t"
+ "addi.d %0, %0, 64 \n\t"
+ "vpickod.b $vr10, $vr13, $vr12 \n\t"
+ "vst $vr10, %1, 0 \n\t"
+ "addi.d %1, %1, 16 \n\t"
+ "bnez %2, 1b \n\t"
+ : "+&r"(src_argb), // %0
+ "+&r"(dst_y), // %1
+ "+&r"(width) // %2
+ : "r"(rgbconstants)
+ : "memory");
+}
+
+void ARGBToYRow_LSX(const uint8_t* src_argb, uint8_t* dst_y, int width) {
+ ARGBToYMatrixRow_LSX(src_argb, dst_y, width, &kRgb24I601Constants);
+}
+
+void ARGBToYJRow_LSX(const uint8_t* src_argb, uint8_t* dst_yj, int width) {
+ ARGBToYMatrixRow_LSX(src_argb, dst_yj, width, &kRgb24JPEGConstants);
+}
+
+void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width) {
+ ARGBToYMatrixRow_LSX(src_abgr, dst_y, width, &kRawI601Constants);
+}
+
+void ABGRToYJRow_LSX(const uint8_t* src_abgr, uint8_t* dst_yj, int width) {
+ ARGBToYMatrixRow_LSX(src_abgr, dst_yj, width, &kRawJPEGConstants);
+}
+
+// RGBA expects first value to be A and ignored, then 3 values to contain RGB.
+// Same code as ARGB, except the LD4
+static void RGBAToYMatrixRow_LSX(const uint8_t* src_rgba,
+ uint8_t* dst_y,
+ int width,
+ const struct RgbConstants* rgbconstants) {
+ asm volatile(
+ "vldrepl.b $vr0, %3, 0 \n\t" // load rgbconstants
+ "vldrepl.b $vr1, %3, 1 \n\t" // load rgbconstants
+ "vldrepl.b $vr2, %3, 2 \n\t" // load rgbconstants
+ "vldrepl.h $vr3, %3, 4 \n\t" // load rgbconstants
+ "1: \n\t"
+ "vld $vr4, %0, 0 \n\t"
+ "vld $vr5, %0, 16 \n\t"
+ "vld $vr6, %0, 32 \n\t"
+ "vld $vr7, %0, 48 \n\t" // load 16 pixels of
+ // RGBA
+ "vor.v $vr12, $vr3, $vr3 \n\t"
+ "vor.v $vr13, $vr3, $vr3 \n\t"
+ "addi.d %2, %2, -16 \n\t" // 16 processed per
+ // loop.
+ "vpickev.b $vr8, $vr5, $vr4 \n\t" // AG
+ "vpickev.b $vr10, $vr7, $vr6 \n\t"
+ "vpickod.b $vr9, $vr5, $vr4 \n\t" // BR
+ "vpickod.b $vr11, $vr7, $vr6 \n\t"
+ "vmaddwev.h.bu $vr12, $vr9, $vr0 \n\t" // B
+ "vmaddwev.h.bu $vr13, $vr11, $vr0 \n\t"
+ "vmaddwod.h.bu $vr12, $vr8, $vr1 \n\t" // G
+ "vmaddwod.h.bu $vr13, $vr10, $vr1 \n\t"
+ "vmaddwod.h.bu $vr12, $vr9, $vr2 \n\t" // R
+ "vmaddwod.h.bu $vr13, $vr11, $vr2 \n\t"
+ "addi.d %0, %0, 64 \n\t"
+ "vpickod.b $vr10, $vr13, $vr12 \n\t"
+ "vst $vr10, %1, 0 \n\t"
+ "addi.d %1, %1, 16 \n\t"
+ "bnez %2, 1b \n\t"
+ : "+&r"(src_rgba), // %0
+ "+&r"(dst_y), // %1
+ "+&r"(width) // %2
+ : "r"(rgbconstants)
+ : "memory");
+}
+
+void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width) {
+ RGBAToYMatrixRow_LSX(src_rgba, dst_y, width, &kRgb24I601Constants);
+}
+
+void RGBAToYJRow_LSX(const uint8_t* src_rgba, uint8_t* dst_yj, int width) {
+ RGBAToYMatrixRow_LSX(src_rgba, dst_yj, width, &kRgb24JPEGConstants);
+}
+
+void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width) {
+ RGBAToYMatrixRow_LSX(src_bgra, dst_y, width, &kRawI601Constants);
+}
+
+static void RGBToYMatrixRow_LSX(const uint8_t* src_rgba,
+ uint8_t* dst_y,
+ int width,
+ const struct RgbConstants* rgbconstants) {
+ int8_t shuff[64] = {0, 2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 17, 18,
+ 20, 21, 23, 24, 26, 27, 29, 30, 0, 1, 3, 4, 6,
+ 7, 9, 10, 12, 13, 15, 1, 0, 4, 0, 7, 0, 10,
+ 0, 13, 0, 16, 0, 19, 0, 22, 0, 25, 0, 28, 0,
+ 31, 0, 2, 0, 5, 0, 8, 0, 11, 0, 14, 0};
+ asm volatile(
+ "vldrepl.b $vr0, %3, 0 \n\t" // load rgbconstants
+ "vldrepl.b $vr1, %3, 1 \n\t" // load rgbconstants
+ "vldrepl.b $vr2, %3, 2 \n\t" // load rgbconstants
+ "vldrepl.h $vr3, %3, 4 \n\t" // load rgbconstants
+ "vld $vr4, %4, 0 \n\t" // load shuff
+ "vld $vr5, %4, 16 \n\t"
+ "vld $vr6, %4, 32 \n\t"
+ "vld $vr7, %4, 48 \n\t"
+ "1: \n\t"
+ "vld $vr8, %0, 0 \n\t"
+ "vld $vr9, %0, 16 \n\t"
+ "vld $vr10, %0, 32 \n\t" // load 16 pixels of
+ // RGB
+ "vor.v $vr12, $vr3, $vr3 \n\t"
+ "vor.v $vr13, $vr3, $vr3 \n\t"
+ "addi.d %2, %2, -16 \n\t" // 16 processed per
+ // loop.
+ "vshuf.b $vr14, $vr9, $vr8, $vr4 \n\t"
+ "vshuf.b $vr15, $vr9, $vr10, $vr5 \n\t"
+ "vshuf.b $vr16, $vr9, $vr8, $vr6 \n\t"
+ "vshuf.b $vr17, $vr9, $vr10, $vr7 \n\t"
+ "vmaddwev.h.bu $vr12, $vr16, $vr1 \n\t" // G
+ "vmaddwev.h.bu $vr13, $vr17, $vr1 \n\t"
+ "vmaddwev.h.bu $vr12, $vr14, $vr0 \n\t" // B
+ "vmaddwev.h.bu $vr13, $vr15, $vr0 \n\t"
+ "vmaddwod.h.bu $vr12, $vr14, $vr2 \n\t" // R
+ "vmaddwod.h.bu $vr13, $vr15, $vr2 \n\t"
+ "addi.d %0, %0, 48 \n\t"
+ "vpickod.b $vr10, $vr13, $vr12 \n\t"
+ "vst $vr10, %1, 0 \n\t"
+ "addi.d %1, %1, 16 \n\t"
+ "bnez %2, 1b \n\t"
+ : "+&r"(src_rgba), // %0
+ "+&r"(dst_y), // %1
+ "+&r"(width) // %2
+ : "r"(rgbconstants), // %3
+ "r"(shuff) // %4
+ : "memory");
+}
+
+void RGB24ToYJRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_yj, int width) {
+ RGBToYMatrixRow_LSX(src_rgb24, dst_yj, width, &kRgb24JPEGConstants);
+}
+
+void RAWToYJRow_LSX(const uint8_t* src_raw, uint8_t* dst_yj, int width) {
+ RGBToYMatrixRow_LSX(src_raw, dst_yj, width, &kRawJPEGConstants);
+}
+
+void RGB24ToYRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_y, int width) {
+ RGBToYMatrixRow_LSX(src_rgb24, dst_y, width, &kRgb24I601Constants);
+}
+
+void RAWToYRow_LSX(const uint8_t* src_raw, uint8_t* dst_y, int width) {
+ RGBToYMatrixRow_LSX(src_raw, dst_y, width, &kRawI601Constants);
+}
+
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv