aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2023-06-29 22:53:42 -0700
committerFrank Barchard <fbarchard@chromium.org>2023-06-30 17:46:56 +0000
commit650be7496fe171cc0d93935a4c161d45101533f4 (patch)
tree354291b96845915a654fba3c61e353edb407cff8
parenta34a0ba68781d0d1914597449d871775dad50984 (diff)
downloadlibyuv-650be7496fe171cc0d93935a4c161d45101533f4.tar.gz
Fix warnings for missing prototypes
- Add static to internal scale and rotate functions - Remove unittest that tested an internal scale function - Remove unused private functions - Include missing scale_argb.h header - Bump version and apply clang format Bug: libyuv:830 Change-Id: I45bab0423b86334f9707f935aedd0c6efc442dd4 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/4658956 Reviewed-by: Mirko Bonadei <mbonadei@chromium.org>
-rw-r--r--README.chromium2
-rw-r--r--include/libyuv/planar_functions.h9
-rw-r--r--include/libyuv/version.h2
-rw-r--r--source/planar_functions.cc54
-rw-r--r--source/rotate.cc13
-rw-r--r--source/rotate_common.cc31
-rw-r--r--source/row_neon64.cc4
-rw-r--r--source/row_rvv.cc6
-rw-r--r--source/scale.cc168
-rw-r--r--source/scale_argb.cc1
-rw-r--r--source/scale_common.cc29
-rw-r--r--source/scale_rvv.cc6
-rw-r--r--source/scale_uv.cc64
-rw-r--r--unit_test/scale_test.cc42
14 files changed, 156 insertions, 275 deletions
diff --git a/README.chromium b/README.chromium
index aecf7085..527cc80c 100644
--- a/README.chromium
+++ b/README.chromium
@@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
-Version: 1873
+Version: 1874
License: BSD
License File: LICENSE
diff --git a/include/libyuv/planar_functions.h b/include/libyuv/planar_functions.h
index 154f2f21..8256fcaa 100644
--- a/include/libyuv/planar_functions.h
+++ b/include/libyuv/planar_functions.h
@@ -827,15 +827,6 @@ int ARGBCopyYToAlpha(const uint8_t* src_y,
int width,
int height);
-typedef void (*ARGBBlendRow)(const uint8_t* src_argb0,
- const uint8_t* src_argb1,
- uint8_t* dst_argb,
- int width);
-
-// Get function to Alpha Blend ARGB pixels and store to destination.
-LIBYUV_API
-ARGBBlendRow GetARGBBlend();
-
// Alpha Blend ARGB images and store to destination.
// Source is pre-multiplied by alpha using ARGBAttenuate.
// Alpha of destination is set to 255.
diff --git a/include/libyuv/version.h b/include/libyuv/version.h
index 1888d67a..7c9cc54e 100644
--- a/include/libyuv/version.h
+++ b/include/libyuv/version.h
@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
-#define LIBYUV_VERSION 1873
+#define LIBYUV_VERSION 1874
#endif // INCLUDE_LIBYUV_VERSION_H_
diff --git a/source/planar_functions.cc b/source/planar_functions.cc
index dcc37836..ca9d2151 100644
--- a/source/planar_functions.cc
+++ b/source/planar_functions.cc
@@ -2783,37 +2783,6 @@ int RGB24Mirror(const uint8_t* src_rgb24,
return 0;
}
-// Get a blender that optimized for the CPU and pixel count.
-// As there are 6 blenders to choose from, the caller should try to use
-// the same blend function for all pixels if possible.
-LIBYUV_API
-ARGBBlendRow GetARGBBlend() {
- void (*ARGBBlendRow)(const uint8_t* src_argb, const uint8_t* src_argb1,
- uint8_t* dst_argb, int width) = ARGBBlendRow_C;
-#if defined(HAS_ARGBBLENDROW_SSSE3)
- if (TestCpuFlag(kCpuHasSSSE3)) {
- ARGBBlendRow = ARGBBlendRow_SSSE3;
- return ARGBBlendRow;
- }
-#endif
-#if defined(HAS_ARGBBLENDROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- ARGBBlendRow = ARGBBlendRow_NEON;
- }
-#endif
-#if defined(HAS_ARGBBLENDROW_MSA)
- if (TestCpuFlag(kCpuHasMSA)) {
- ARGBBlendRow = ARGBBlendRow_MSA;
- }
-#endif
-#if defined(HAS_ARGBBLENDROW_LSX)
- if (TestCpuFlag(kCpuHasLSX)) {
- ARGBBlendRow = ARGBBlendRow_LSX;
- }
-#endif
- return ARGBBlendRow;
-}
-
// Alpha Blend 2 ARGB images and store to destination.
LIBYUV_API
int ARGBBlend(const uint8_t* src_argb0,
@@ -2826,7 +2795,7 @@ int ARGBBlend(const uint8_t* src_argb0,
int height) {
int y;
void (*ARGBBlendRow)(const uint8_t* src_argb, const uint8_t* src_argb1,
- uint8_t* dst_argb, int width) = GetARGBBlend();
+ uint8_t* dst_argb, int width) = ARGBBlendRow_C;
if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
return -1;
}
@@ -2843,7 +2812,26 @@ int ARGBBlend(const uint8_t* src_argb0,
height = 1;
src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
}
-
+#if defined(HAS_ARGBBLENDROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBBlendRow = ARGBBlendRow_SSSE3;
+ }
+#endif
+#if defined(HAS_ARGBBLENDROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBBlendRow = ARGBBlendRow_NEON;
+ }
+#endif
+#if defined(HAS_ARGBBLENDROW_MSA)
+ if (TestCpuFlag(kCpuHasMSA)) {
+ ARGBBlendRow = ARGBBlendRow_MSA;
+ }
+#endif
+#if defined(HAS_ARGBBLENDROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBBlendRow = ARGBBlendRow_LSX;
+ }
+#endif
for (y = 0; y < height; ++y) {
ARGBBlendRow(src_argb0, src_argb1, dst_argb, width);
src_argb0 += src_stride_argb0;
diff --git a/source/rotate.cc b/source/rotate.cc
index 8d3978c7..3678b80a 100644
--- a/source/rotate.cc
+++ b/source/rotate.cc
@@ -489,13 +489,12 @@ int RotatePlane(const uint8_t* src,
return -1;
}
-LIBYUV_API
-void TransposePlane_16(const uint16_t* src,
- int src_stride,
- uint16_t* dst,
- int dst_stride,
- int width,
- int height) {
+static void TransposePlane_16(const uint16_t* src,
+ int src_stride,
+ uint16_t* dst,
+ int dst_stride,
+ int width,
+ int height) {
int i = height;
// Work across the source in 8x8 tiles
while (i >= 8) {
diff --git a/source/rotate_common.cc b/source/rotate_common.cc
index 4b496d1b..e72608e9 100644
--- a/source/rotate_common.cc
+++ b/source/rotate_common.cc
@@ -120,37 +120,6 @@ void TransposeWx8_16_C(const uint16_t* src,
}
}
-void TransposeUVWx8_16_C(const uint16_t* src,
- int src_stride,
- uint16_t* dst_a,
- int dst_stride_a,
- uint16_t* dst_b,
- int dst_stride_b,
- int width) {
- int i;
- for (i = 0; i < width; ++i) {
- dst_a[0] = src[0 * src_stride + 0];
- dst_b[0] = src[0 * src_stride + 1];
- dst_a[1] = src[1 * src_stride + 0];
- dst_b[1] = src[1 * src_stride + 1];
- dst_a[2] = src[2 * src_stride + 0];
- dst_b[2] = src[2 * src_stride + 1];
- dst_a[3] = src[3 * src_stride + 0];
- dst_b[3] = src[3 * src_stride + 1];
- dst_a[4] = src[4 * src_stride + 0];
- dst_b[4] = src[4 * src_stride + 1];
- dst_a[5] = src[5 * src_stride + 0];
- dst_b[5] = src[5 * src_stride + 1];
- dst_a[6] = src[6 * src_stride + 0];
- dst_b[6] = src[6 * src_stride + 1];
- dst_a[7] = src[7 * src_stride + 0];
- dst_b[7] = src[7 * src_stride + 1];
- src += 2;
- dst_a += dst_stride_a;
- dst_b += dst_stride_b;
- }
-}
-
void TransposeWxH_16_C(const uint16_t* src,
int src_stride,
uint16_t* dst,
diff --git a/source/row_neon64.cc b/source/row_neon64.cc
index a5c24e84..1679f87c 100644
--- a/source/row_neon64.cc
+++ b/source/row_neon64.cc
@@ -3449,8 +3449,8 @@ void ARGBAttenuateRow_NEON(const uint8_t* src_argb,
"subs %w2, %w2, #8 \n" // 8 processed per loop.
"umull v4.8h, v0.8b, v3.8b \n" // b * a
"prfm pldl1keep, [%0, 448] \n"
- "umull v5.8h, v1.8b, v3.8b \n" // g * a
- "umull v6.8h, v2.8b, v3.8b \n" // r * a
+ "umull v5.8h, v1.8b, v3.8b \n" // g * a
+ "umull v6.8h, v2.8b, v3.8b \n" // r * a
"addhn v0.8b, v4.8h, v7.8h \n" // (b + 255) >> 8
"addhn v1.8b, v5.8h, v7.8h \n" // (g + 255) >> 8
"addhn v2.8b, v6.8h, v7.8h \n" // (r + 255) >> 8
diff --git a/source/row_rvv.cc b/source/row_rvv.cc
index b49e5b1b..ad5ccbcd 100644
--- a/source/row_rvv.cc
+++ b/source/row_rvv.cc
@@ -18,7 +18,8 @@
#include "libyuv/row.h"
// This module is for clang rvv. GCC hasn't supported segment load & store.
-#if !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && defined(__clang__)
+#if !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && \
+ defined(__clang__)
#include <assert.h>
#include <riscv_vector.h>
@@ -988,4 +989,5 @@ void ARGBCopyYToAlphaRow_RVV(const uint8_t* src, uint8_t* dst, int width) {
} // namespace libyuv
#endif
-#endif // !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && defined(__clang__)
+#endif // !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) &&
+ // defined(__clang__)
diff --git a/source/scale.cc b/source/scale.cc
index fe706dd6..8fa6b7cd 100644
--- a/source/scale.cc
+++ b/source/scale.cc
@@ -1077,15 +1077,15 @@ static void ScalePlaneBox_16(int src_width,
}
// Scale plane down with bilinear interpolation.
-void ScalePlaneBilinearDown(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_ptr,
- uint8_t* dst_ptr,
- enum FilterMode filtering) {
+static void ScalePlaneBilinearDown(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_ptr,
+ uint8_t* dst_ptr,
+ enum FilterMode filtering) {
// Initial source x/y coordinate and step values as 16.16 fixed point.
int x = 0;
int y = 0;
@@ -1205,15 +1205,15 @@ void ScalePlaneBilinearDown(int src_width,
free_aligned_buffer_64(row);
}
-void ScalePlaneBilinearDown_16(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr,
- enum FilterMode filtering) {
+static void ScalePlaneBilinearDown_16(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr,
+ enum FilterMode filtering) {
// Initial source x/y coordinate and step values as 16.16 fixed point.
int x = 0;
int y = 0;
@@ -1297,15 +1297,15 @@ void ScalePlaneBilinearDown_16(int src_width,
}
// Scale up down with bilinear interpolation.
-void ScalePlaneBilinearUp(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_ptr,
- uint8_t* dst_ptr,
- enum FilterMode filtering) {
+static void ScalePlaneBilinearUp(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_ptr,
+ uint8_t* dst_ptr,
+ enum FilterMode filtering) {
int j;
// Initial source x/y coordinate and step values as 16.16 fixed point.
int x = 0;
@@ -1454,14 +1454,14 @@ void ScalePlaneBilinearUp(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original width, using linear interpolation.
// This is used to scale U and V planes of I422 to I444.
-void ScalePlaneUp2_Linear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_ptr,
- uint8_t* dst_ptr) {
+static void ScalePlaneUp2_Linear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_ptr,
+ uint8_t* dst_ptr) {
void (*ScaleRowUp)(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width) =
ScaleRowUp2_Linear_Any_C;
int i;
@@ -1513,14 +1513,14 @@ void ScalePlaneUp2_Linear(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original size, using bilinear interpolation.
// This is used to scale U and V planes of I420 to I444.
-void ScalePlaneUp2_Bilinear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_ptr,
- uint8_t* dst_ptr) {
+static void ScalePlaneUp2_Bilinear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_ptr,
+ uint8_t* dst_ptr) {
void (*Scale2RowUp)(const uint8_t* src_ptr, ptrdiff_t src_stride,
uint8_t* dst_ptr, ptrdiff_t dst_stride, int dst_width) =
ScaleRowUp2_Bilinear_Any_C;
@@ -1573,14 +1573,14 @@ void ScalePlaneUp2_Bilinear(int src_width,
// its original width, using linear interpolation.
// stride is in count of uint16_t.
// This is used to scale U and V planes of I210 to I410 and I212 to I412.
-void ScalePlaneUp2_12_Linear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr) {
+static void ScalePlaneUp2_12_Linear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr) {
void (*ScaleRowUp)(const uint16_t* src_ptr, uint16_t* dst_ptr,
int dst_width) = ScaleRowUp2_Linear_16_Any_C;
int i;
@@ -1627,14 +1627,14 @@ void ScalePlaneUp2_12_Linear(int src_width,
// its original size, using bilinear interpolation.
// stride is in count of uint16_t.
// This is used to scale U and V planes of I010 to I410 and I012 to I412.
-void ScalePlaneUp2_12_Bilinear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr) {
+static void ScalePlaneUp2_12_Bilinear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr) {
void (*Scale2RowUp)(const uint16_t* src_ptr, ptrdiff_t src_stride,
uint16_t* dst_ptr, ptrdiff_t dst_stride, int dst_width) =
ScaleRowUp2_Bilinear_16_Any_C;
@@ -1674,14 +1674,14 @@ void ScalePlaneUp2_12_Bilinear(int src_width,
}
}
-void ScalePlaneUp2_16_Linear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr) {
+static void ScalePlaneUp2_16_Linear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr) {
void (*ScaleRowUp)(const uint16_t* src_ptr, uint16_t* dst_ptr,
int dst_width) = ScaleRowUp2_Linear_16_Any_C;
int i;
@@ -1723,14 +1723,14 @@ void ScalePlaneUp2_16_Linear(int src_width,
}
}
-void ScalePlaneUp2_16_Bilinear(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr) {
+static void ScalePlaneUp2_16_Bilinear(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr) {
void (*Scale2RowUp)(const uint16_t* src_ptr, ptrdiff_t src_stride,
uint16_t* dst_ptr, ptrdiff_t dst_stride, int dst_width) =
ScaleRowUp2_Bilinear_16_Any_C;
@@ -1770,15 +1770,15 @@ void ScalePlaneUp2_16_Bilinear(int src_width,
}
}
-void ScalePlaneBilinearUp_16(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr,
- enum FilterMode filtering) {
+static void ScalePlaneBilinearUp_16(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr,
+ enum FilterMode filtering) {
int j;
// Initial source x/y coordinate and step values as 16.16 fixed point.
int x = 0;
diff --git a/source/scale_argb.cc b/source/scale_argb.cc
index 214f932c..1d5c1b60 100644
--- a/source/scale_argb.cc
+++ b/source/scale_argb.cc
@@ -16,6 +16,7 @@
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h" // For CopyARGB
#include "libyuv/row.h"
+#include "libyuv/scale_argb.h"
#include "libyuv/scale_row.h"
#ifdef __cplusplus
diff --git a/source/scale_common.cc b/source/scale_common.cc
index 77455903..d07a39af 100644
--- a/source/scale_common.cc
+++ b/source/scale_common.cc
@@ -1964,35 +1964,6 @@ void ScaleSlope(int src_width,
}
#undef CENTERSTART
-// Read 8x2 upsample with filtering and write 16x1.
-// actually reads an extra pixel, so 9x2.
-void ScaleRowUp2_16_C(const uint16_t* src_ptr,
- ptrdiff_t src_stride,
- uint16_t* dst,
- int dst_width) {
- const uint16_t* src2 = src_ptr + src_stride;
-
- int x;
- for (x = 0; x < dst_width - 1; x += 2) {
- uint16_t p0 = src_ptr[0];
- uint16_t p1 = src_ptr[1];
- uint16_t p2 = src2[0];
- uint16_t p3 = src2[1];
- dst[0] = (p0 * 9 + p1 * 3 + p2 * 3 + p3 + 8) >> 4;
- dst[1] = (p0 * 3 + p1 * 9 + p2 + p3 * 3 + 8) >> 4;
- ++src_ptr;
- ++src2;
- dst += 2;
- }
- if (dst_width & 1) {
- uint16_t p0 = src_ptr[0];
- uint16_t p1 = src_ptr[1];
- uint16_t p2 = src2[0];
- uint16_t p3 = src2[1];
- dst[0] = (p0 * 9 + p1 * 3 + p2 * 3 + p3 + 8) >> 4;
- }
-}
-
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
diff --git a/source/scale_rvv.cc b/source/scale_rvv.cc
index 137da77e..33c913f8 100644
--- a/source/scale_rvv.cc
+++ b/source/scale_rvv.cc
@@ -19,7 +19,8 @@
#include "libyuv/scale_row.h"
// This module is for clang rvv. GCC hasn't supported segment load & store.
-#if !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && defined(__clang__)
+#if !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && \
+ defined(__clang__)
#include <riscv_vector.h>
#ifdef __cplusplus
@@ -599,4 +600,5 @@ void ScaleUVRowDownEven_RVV(const uint8_t* src_uv,
} // namespace libyuv
#endif
-#endif // !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) && defined(__clang__)
+#endif // !defined(LIBYUV_DISABLE_RVV) && defined(__riscv_vector) &&
+ // defined(__clang__)
diff --git a/source/scale_uv.cc b/source/scale_uv.cc
index 5246f8f6..40ec1b14 100644
--- a/source/scale_uv.cc
+++ b/source/scale_uv.cc
@@ -657,14 +657,14 @@ static void ScaleUVBilinearUp(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original width, using linear interpolation.
// This is used to scale U and V planes of NV16 to NV24.
-void ScaleUVLinearUp2(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_uv,
- uint8_t* dst_uv) {
+static void ScaleUVLinearUp2(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_uv,
+ uint8_t* dst_uv) {
void (*ScaleRowUp)(const uint8_t* src_uv, uint8_t* dst_uv, int dst_width) =
ScaleUVRowUp2_Linear_Any_C;
int i;
@@ -710,14 +710,14 @@ void ScaleUVLinearUp2(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original size, using bilinear interpolation.
// This is used to scale U and V planes of NV12 to NV24.
-void ScaleUVBilinearUp2(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint8_t* src_ptr,
- uint8_t* dst_ptr) {
+static void ScaleUVBilinearUp2(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint8_t* src_ptr,
+ uint8_t* dst_ptr) {
void (*Scale2RowUp)(const uint8_t* src_ptr, ptrdiff_t src_stride,
uint8_t* dst_ptr, ptrdiff_t dst_stride, int dst_width) =
ScaleUVRowUp2_Bilinear_Any_C;
@@ -764,14 +764,14 @@ void ScaleUVBilinearUp2(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original width, using linear interpolation.
// This is used to scale U and V planes of P210 to P410.
-void ScaleUVLinearUp2_16(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_uv,
- uint16_t* dst_uv) {
+static void ScaleUVLinearUp2_16(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_uv,
+ uint16_t* dst_uv) {
void (*ScaleRowUp)(const uint16_t* src_uv, uint16_t* dst_uv, int dst_width) =
ScaleUVRowUp2_Linear_16_Any_C;
int i;
@@ -817,14 +817,14 @@ void ScaleUVLinearUp2_16(int src_width,
// This is an optimized version for scaling up a plane to 2 times of
// its original size, using bilinear interpolation.
// This is used to scale U and V planes of P010 to P410.
-void ScaleUVBilinearUp2_16(int src_width,
- int src_height,
- int dst_width,
- int dst_height,
- int src_stride,
- int dst_stride,
- const uint16_t* src_ptr,
- uint16_t* dst_ptr) {
+static void ScaleUVBilinearUp2_16(int src_width,
+ int src_height,
+ int dst_width,
+ int dst_height,
+ int src_stride,
+ int dst_stride,
+ const uint16_t* src_ptr,
+ uint16_t* dst_ptr) {
void (*Scale2RowUp)(const uint16_t* src_ptr, ptrdiff_t src_stride,
uint16_t* dst_ptr, ptrdiff_t dst_stride, int dst_width) =
ScaleUVRowUp2_Bilinear_16_Any_C;
diff --git a/unit_test/scale_test.cc b/unit_test/scale_test.cc
index a8c95268..c2232e66 100644
--- a/unit_test/scale_test.cc
+++ b/unit_test/scale_test.cc
@@ -1217,48 +1217,6 @@ TEST_F(LibYUVScaleTest, TestScaleRowDown2Box_Odd_SSSE3) {
}
#endif // HAS_SCALEROWDOWN2_SSSE3
-extern "C" void ScaleRowUp2_16_NEON(const uint16_t* src_ptr,
- ptrdiff_t src_stride,
- uint16_t* dst,
- int dst_width);
-extern "C" void ScaleRowUp2_16_C(const uint16_t* src_ptr,
- ptrdiff_t src_stride,
- uint16_t* dst,
- int dst_width);
-
-TEST_F(LibYUVScaleTest, TestScaleRowUp2_16) {
- SIMD_ALIGNED(uint16_t orig_pixels[640 * 2 + 1]); // 2 rows + 1 pixel overrun.
- SIMD_ALIGNED(uint16_t dst_pixels_opt[1280]);
- SIMD_ALIGNED(uint16_t dst_pixels_c[1280]);
-
- memset(orig_pixels, 0, sizeof(orig_pixels));
- memset(dst_pixels_opt, 1, sizeof(dst_pixels_opt));
- memset(dst_pixels_c, 2, sizeof(dst_pixels_c));
-
- for (int i = 0; i < 640 * 2 + 1; ++i) {
- orig_pixels[i] = i;
- }
- ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_c[0], 1280);
- for (int i = 0; i < benchmark_pixels_div1280_; ++i) {
-#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
- int has_neon = TestCpuFlag(kCpuHasNEON);
- if (has_neon) {
- ScaleRowUp2_16_NEON(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280);
- } else {
- ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280);
- }
-#else
- ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280);
-#endif
- }
-
- for (int i = 0; i < 1280; ++i) {
- EXPECT_EQ(dst_pixels_c[i], dst_pixels_opt[i]);
- }
- EXPECT_EQ(dst_pixels_c[0], (0 * 9 + 1 * 3 + 640 * 3 + 641 * 1 + 8) / 16);
- EXPECT_EQ(dst_pixels_c[1279], 800);
-}
-
extern "C" void ScaleRowDown2Box_16_NEON(const uint16_t* src_ptr,
ptrdiff_t src_stride,
uint16_t* dst,