aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2021-02-12 09:59:00 -0800
committerFrank Barchard <fbarchard@chromium.org>2021-02-12 19:45:16 +0000
commitd7687742995c4cb004fbbc5cffc7c9e0d22ec7e5 (patch)
tree1d32a5216d0406f20841bb4b20a033fc1fb857c6
parentd4ecb70610325fdaaeec6af074d6e3ceab9866d1 (diff)
downloadlibyuv-d7687742995c4cb004fbbc5cffc7c9e0d22ec7e5.tar.gz
add yuvconvstants util
miscellaneous cleanup of other code/comments Bug: libyuv:873, libyuv:877 Change-Id: I0d8caf9a65908ff8898b25494f7c724775f84fa3 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/2692930 Reviewed-by: Wan-Teh Chang <wtc@google.com> Reviewed-by: Frank Barchard <fbarchard@chromium.org>
-rw-r--r--Android.bp9
-rw-r--r--BUILD.gn12
-rw-r--r--docs/deprecated_builds.md1
-rw-r--r--docs/getting_started.md1
-rw-r--r--include/libyuv/convert_argb.h10
-rw-r--r--include/libyuv/scale_row.h24
-rw-r--r--include/libyuv/scale_uv.h2
-rw-r--r--linux.mk8
-rw-r--r--source/convert.cc10
-rw-r--r--source/row_gcc.cc4
-rw-r--r--source/scale.cc10
-rw-r--r--source/scale_gcc.cc17
-rw-r--r--source/scale_neon.cc3
-rw-r--r--source/scale_neon64.cc3
-rw-r--r--source/scale_uv.cc26
-rw-r--r--unit_test/convert_test.cc30
-rw-r--r--util/yuvconstants.cc106
17 files changed, 205 insertions, 71 deletions
diff --git a/Android.bp b/Android.bp
index d0b23432..ae59dffb 100644
--- a/Android.bp
+++ b/Android.bp
@@ -145,3 +145,12 @@ cc_test {
static_libs: ["libyuv"],
shared_libs: ["libjpeg"],
}
+
+cc_test {
+ name: "yuvconstants",
+ gtest: false,
+ srcs: [
+ "util/yuvconstants.cc",
+ ],
+ static_libs: ["libyuv"],
+}
diff --git a/BUILD.gn b/BUILD.gn
index d733e71d..40a67c79 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -41,6 +41,7 @@ group("default") {
":libyuv_unittest",
":psnr",
":yuvconvert",
+ ":yuvconstants",
]
}
}
@@ -357,6 +358,17 @@ if (libyuv_include_tests) {
}
}
+ executable("yuvconstants") {
+ sources = [
+ # sources
+ "util/yuvconstants.cc",
+ ]
+ deps = [ ":libyuv" ]
+ if (is_linux || is_chromeos) {
+ cflags = [ "-fexceptions" ]
+ }
+ }
+
executable("psnr") {
sources = [
# sources
diff --git a/docs/deprecated_builds.md b/docs/deprecated_builds.md
index 29e0bf9b..ba42966c 100644
--- a/docs/deprecated_builds.md
+++ b/docs/deprecated_builds.md
@@ -239,6 +239,7 @@ If you get a compile error for atlthunk.lib on Windows, read http://www.chromium
ninja -C out/Debug libyuv_unittest
ninja -C out/Debug compare
ninja -C out/Debug yuvconvert
+ ninja -C out/Debug yuvconstants
ninja -C out/Debug psnr
ninja -C out/Debug cpuid
diff --git a/docs/getting_started.md b/docs/getting_started.md
index 3e339712..e363c4ef 100644
--- a/docs/getting_started.md
+++ b/docs/getting_started.md
@@ -165,6 +165,7 @@ Running test with C code:
ninja -C out/Debug libyuv_unittest
ninja -C out/Debug compare
ninja -C out/Debug yuvconvert
+ ninja -C out/Debug yuvconstants
ninja -C out/Debug psnr
ninja -C out/Debug cpuid
diff --git a/include/libyuv/convert_argb.h b/include/libyuv/convert_argb.h
index cf7f923e..297de151 100644
--- a/include/libyuv/convert_argb.h
+++ b/include/libyuv/convert_argb.h
@@ -22,17 +22,17 @@ extern "C" {
// Conversion matrix for YUV to RGB
LIBYUV_API extern const struct YuvConstants kYuvI601Constants; // BT.601
-LIBYUV_API extern const struct YuvConstants kYuvJPEGConstants; // JPeg
-LIBYUV_API extern const struct YuvConstants kYuvF709Constants; // BT.709 full
+LIBYUV_API extern const struct YuvConstants kYuvJPEGConstants; // BT.601 full
LIBYUV_API extern const struct YuvConstants kYuvH709Constants; // BT.709
+LIBYUV_API extern const struct YuvConstants kYuvF709Constants; // BT.709 full
LIBYUV_API extern const struct YuvConstants kYuv2020Constants; // BT.2020
LIBYUV_API extern const struct YuvConstants kYuvV2020Constants; // BT.2020 full
// Conversion matrix for YVU to BGR
LIBYUV_API extern const struct YuvConstants kYvuI601Constants; // BT.601
-LIBYUV_API extern const struct YuvConstants kYvuJPEGConstants; // JPeg
-LIBYUV_API extern const struct YuvConstants kYvuF709Constants; // BT.709 full
+LIBYUV_API extern const struct YuvConstants kYvuJPEGConstants; // BT.601 full
LIBYUV_API extern const struct YuvConstants kYvuH709Constants; // BT.709
+LIBYUV_API extern const struct YuvConstants kYvuF709Constants; // BT.709 full
LIBYUV_API extern const struct YuvConstants kYvu2020Constants; // BT.2020
LIBYUV_API extern const struct YuvConstants kYvuV2020Constants; // BT.2020 full
@@ -41,8 +41,8 @@ LIBYUV_API extern const struct YuvConstants kYvuV2020Constants; // BT.2020 full
// TODO(fbarchard): Add macro for each Matrix function.
#define kYuvI601ConstantsVU kYvuI601Constants
#define kYuvJPEGConstantsVU kYvuJPEGConstants
-#define kYuvF709ConstantsVU kYvuF709Constants
#define kYuvH709ConstantsVU kYvuH709Constants
+#define kYuvF709ConstantsVU kYvuF709Constants
#define kYuv2020ConstantsVU kYvu2020Constants
#define kYuvV2020ConstantsVU kYvuV2020Constants
diff --git a/include/libyuv/scale_row.h b/include/libyuv/scale_row.h
index e972b533..18ffb546 100644
--- a/include/libyuv/scale_row.h
+++ b/include/libyuv/scale_row.h
@@ -612,13 +612,13 @@ void ScaleRowUp2_Bilinear_SSE2(const uint8_t* src_ptr,
ptrdiff_t dst_stride,
int dst_width);
void ScaleRowUp2_Linear_16_SSSE3(const uint16_t* src_ptr,
- uint16_t* dst_ptr,
- int dst_width);
+ uint16_t* dst_ptr,
+ int dst_width);
void ScaleRowUp2_Bilinear_16_SSSE3(const uint16_t* src_ptr,
- ptrdiff_t src_stride,
- uint16_t* dst_ptr,
- ptrdiff_t dst_stride,
- int dst_width);
+ ptrdiff_t src_stride,
+ uint16_t* dst_ptr,
+ ptrdiff_t dst_stride,
+ int dst_width);
void ScaleRowUp2_Linear_SSSE3(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int dst_width);
@@ -652,13 +652,13 @@ void ScaleRowUp2_Bilinear_Any_SSE2(const uint8_t* src_ptr,
ptrdiff_t dst_stride,
int dst_width);
void ScaleRowUp2_Linear_16_Any_SSSE3(const uint16_t* src_ptr,
- uint16_t* dst_ptr,
- int dst_width);
+ uint16_t* dst_ptr,
+ int dst_width);
void ScaleRowUp2_Bilinear_16_Any_SSSE3(const uint16_t* src_ptr,
- ptrdiff_t src_stride,
- uint16_t* dst_ptr,
- ptrdiff_t dst_stride,
- int dst_width);
+ ptrdiff_t src_stride,
+ uint16_t* dst_ptr,
+ ptrdiff_t dst_stride,
+ int dst_width);
void ScaleRowUp2_Linear_Any_SSSE3(const uint8_t* src_ptr,
uint8_t* dst_ptr,
int dst_width);
diff --git a/include/libyuv/scale_uv.h b/include/libyuv/scale_uv.h
index 7b212178..8e74e319 100644
--- a/include/libyuv/scale_uv.h
+++ b/include/libyuv/scale_uv.h
@@ -30,7 +30,7 @@ int UVScale(const uint8_t* src_uv,
int dst_height,
enum FilterMode filtering);
-// Scale an 16 bit UV image.
+// Scale a 16 bit UV image.
// This function is currently incomplete, it can't handle all cases.
LIBYUV_API
int UVScale_16(const uint16_t* src_uv,
diff --git a/linux.mk b/linux.mk
index 3e93b710..083f1fa2 100644
--- a/linux.mk
+++ b/linux.mk
@@ -66,7 +66,7 @@ LOCAL_OBJ_FILES := \
.c.o:
$(CC) -c $(CFLAGS) $*.c -o $*.o
-all: libyuv.a i444tonv12_eg yuvconvert cpuid psnr
+all: libyuv.a i444tonv12_eg yuvconvert yuvconstants cpuid psnr
libyuv.a: $(LOCAL_OBJ_FILES)
$(AR) $(ARFLAGS) $@ $(LOCAL_OBJ_FILES)
@@ -75,6 +75,10 @@ libyuv.a: $(LOCAL_OBJ_FILES)
yuvconvert: util/yuvconvert.cc libyuv.a
$(CXX) $(CXXFLAGS) -Iutil/ -o $@ util/yuvconvert.cc libyuv.a
+# A C++ test utility that generates yuvconstants for yuv to rgb.
+yuvconstants: util/yuvconstants.cc libyuv.a
+ $(CXX) $(CXXFLAGS) -Iutil/ -o $@ util/yuvconstants.cc libyuv.a
+
# A standalone test utility
psnr: util/psnr.cc
$(CXX) $(CXXFLAGS) -Iutil/ -o $@ util/psnr.cc util/psnr_main.cc util/ssim.cc
@@ -90,4 +94,4 @@ cpuid: util/cpuid.c libyuv.a
$(CC) $(CFLAGS) -o $@ util/cpuid.c libyuv.a
clean:
- /bin/rm -f source/*.o *.ii *.s libyuv.a i444tonv12_eg yuvconvert cpuid psnr
+ /bin/rm -f source/*.o *.ii *.s libyuv.a i444tonv12_eg yuvconvert yuvconstants cpuid psnr
diff --git a/source/convert.cc b/source/convert.cc
index 8bf02b76..b0314df4 100644
--- a/source/convert.cc
+++ b/source/convert.cc
@@ -49,7 +49,7 @@ static int I4xxToI420(const uint8_t* src_y,
const int dst_y_height = Abs(src_y_height);
const int dst_uv_width = SUBSAMPLE(dst_y_width, 1, 1);
const int dst_uv_height = SUBSAMPLE(dst_y_height, 1, 1);
- if (src_uv_width == 0 || src_uv_height == 0) {
+ if (src_uv_width <= 0 || src_uv_height == 0) {
return -1;
}
if (dst_y) {
@@ -625,7 +625,7 @@ int NV12ToNV24(const uint8_t* src_y,
int dst_stride_uv,
int width,
int height) {
- if (width == 0 || height == 0) {
+ if (width <= 0 || height == 0) {
return -1;
}
@@ -650,7 +650,7 @@ int NV16ToNV24(const uint8_t* src_y,
int dst_stride_uv,
int width,
int height) {
- if (width == 0 || height == 0) {
+ if (width <= 0 || height == 0) {
return -1;
}
@@ -674,7 +674,7 @@ int P010ToP410(const uint16_t* src_y,
int dst_stride_uv,
int width,
int height) {
- if (width == 0 || height == 0) {
+ if (width <= 0 || height == 0) {
return -1;
}
@@ -699,7 +699,7 @@ int P210ToP410(const uint16_t* src_y,
int dst_stride_uv,
int width,
int height) {
- if (width == 0 || height == 0) {
+ if (width <= 0 || height == 0) {
return -1;
}
diff --git a/source/row_gcc.cc b/source/row_gcc.cc
index c7e3fb95..cf87d46e 100644
--- a/source/row_gcc.cc
+++ b/source/row_gcc.cc
@@ -4444,7 +4444,7 @@ void SplitARGBRow_AVX2(const uint8_t* src_argb,
"sub %1,%3 \n"
"sub %1,%4 \n"
"vmovdqa %7,%%ymm3 \n"
- "vbroadcastf128 %6,%%ymm4 \n"
+ "vbroadcastf128 %6,%%ymm4 \n"
LABELALIGN
"1: \n"
@@ -4491,7 +4491,7 @@ void SplitXRGBRow_AVX2(const uint8_t* src_argb,
asm volatile(
"vmovdqa %6,%%ymm3 \n"
- "vbroadcastf128 %5,%%ymm4 \n"
+ "vbroadcastf128 %5,%%ymm4 \n"
LABELALIGN
"1: \n"
diff --git a/source/scale.cc b/source/scale.cc
index 84c78711..5f0ff646 100644
--- a/source/scale.cc
+++ b/source/scale.cc
@@ -2021,7 +2021,7 @@ int I420Scale(const uint8_t* src_y,
int src_halfheight = SUBSAMPLE(src_height, 1, 1);
int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
- if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 ||
src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v ||
dst_width <= 0 || dst_height <= 0) {
return -1;
@@ -2058,7 +2058,7 @@ int I420Scale_16(const uint16_t* src_y,
int src_halfheight = SUBSAMPLE(src_height, 1, 1);
int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
- if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 ||
src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v ||
dst_width <= 0 || dst_height <= 0) {
return -1;
@@ -2094,7 +2094,7 @@ int I444Scale(const uint8_t* src_y,
int dst_width,
int dst_height,
enum FilterMode filtering) {
- if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 ||
src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v ||
dst_width <= 0 || dst_height <= 0) {
return -1;
@@ -2127,7 +2127,7 @@ int I444Scale_16(const uint16_t* src_y,
int dst_width,
int dst_height,
enum FilterMode filtering) {
- if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 ||
src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v ||
dst_width <= 0 || dst_height <= 0) {
return -1;
@@ -2163,7 +2163,7 @@ int NV12Scale(const uint8_t* src_y,
int src_halfheight = SUBSAMPLE(src_height, 1, 1);
int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
- if (!src_y || !src_uv || src_width == 0 || src_height == 0 ||
+ if (!src_y || !src_uv || src_width <= 0 || src_height == 0 ||
src_width > 32768 || src_height > 32768 || !dst_y || !dst_uv ||
dst_width <= 0 || dst_height <= 0) {
return -1;
diff --git a/source/scale_gcc.cc b/source/scale_gcc.cc
index d1fb7de1..f03903f0 100644
--- a/source/scale_gcc.cc
+++ b/source/scale_gcc.cc
@@ -1234,7 +1234,7 @@ void ScaleRowUp2_Linear_AVX2(const uint8_t* src_ptr,
"vpcmpeqw %%ymm4,%%ymm4,%%ymm4 \n"
"vpsrlw $15,%%ymm4,%%ymm4 \n"
"vpsllw $1,%%ymm4,%%ymm4 \n" // all 2
- "vbroadcastf128 %3,%%ymm3 \n"
+ "vbroadcastf128 %3,%%ymm3 \n"
LABELALIGN
"1: \n"
@@ -1278,7 +1278,7 @@ void ScaleRowUp2_Bilinear_AVX2(const uint8_t* src_ptr,
"vpcmpeqw %%ymm6,%%ymm6,%%ymm6 \n"
"vpsrlw $15,%%ymm6,%%ymm6 \n"
"vpsllw $3,%%ymm6,%%ymm6 \n" // all 8
- "vbroadcastf128 %5,%%ymm7 \n"
+ "vbroadcastf128 %5,%%ymm7 \n"
LABELALIGN
"1: \n"
@@ -1357,7 +1357,7 @@ void ScaleRowUp2_Linear_16_AVX2(const uint16_t* src_ptr,
uint16_t* dst_ptr,
int dst_width) {
asm volatile(
- "vbroadcastf128 %3,%%ymm5 \n"
+ "vbroadcastf128 %3,%%ymm5 \n"
"vpcmpeqw %%ymm4,%%ymm4,%%ymm4 \n"
"vpsrlw $15,%%ymm4,%%ymm4 \n"
"vpsllw $1,%%ymm4,%%ymm4 \n" // all 2
@@ -1409,7 +1409,7 @@ void ScaleRowUp2_Bilinear_16_AVX2(const uint16_t* src_ptr,
ptrdiff_t dst_stride,
int dst_width) {
asm volatile(
- "vbroadcastf128 %5,%%ymm5 \n"
+ "vbroadcastf128 %5,%%ymm5 \n"
"vpcmpeqw %%ymm4,%%ymm4,%%ymm4 \n"
"vpsrlw $15,%%ymm4,%%ymm4 \n"
"vpsllw $3,%%ymm4,%%ymm4 \n" // all 8
@@ -2123,8 +2123,8 @@ void ScaleUVRowDown2Box_AVX2(const uint8_t* src_ptr,
}
#endif // HAS_SCALEUVROWDOWN2BOX_AVX2
-static const uvec8 kUVLinearMadd31 = {3, 1, 3, 1, 1, 3, 1, 3, 3, 1, 3,
- 1, 1, 3, 1, 3};
+static const uvec8 kUVLinearMadd31 = {3, 1, 3, 1, 1, 3, 1, 3,
+ 3, 1, 3, 1, 1, 3, 1, 3};
#ifdef HAS_SCALEUVROWUP2LINEAR_SSSE3
void ScaleUVRowUp2_Linear_SSSE3(const uint8_t* src_ptr,
@@ -2259,7 +2259,7 @@ void ScaleUVRowUp2_Linear_AVX2(const uint8_t* src_ptr,
"vpcmpeqw %%ymm4,%%ymm4,%%ymm4 \n"
"vpsrlw $15,%%ymm4,%%ymm4 \n"
"vpsllw $1,%%ymm4,%%ymm4 \n" // all 2
- "vbroadcastf128 %3,%%ymm3 \n"
+ "vbroadcastf128 %3,%%ymm3 \n"
LABELALIGN
"1: \n"
@@ -2302,7 +2302,7 @@ void ScaleUVRowUp2_Bilinear_AVX2(const uint8_t* src_ptr,
"vpcmpeqw %%ymm6,%%ymm6,%%ymm6 \n"
"vpsrlw $15,%%ymm6,%%ymm6 \n"
"vpsllw $3,%%ymm6,%%ymm6 \n" // all 8
- "vbroadcastf128 %5,%%ymm7 \n"
+ "vbroadcastf128 %5,%%ymm7 \n"
LABELALIGN
"1: \n"
@@ -2416,7 +2416,6 @@ void ScaleUVRowUp2_Linear_16_SSE2(const uint16_t* src_ptr,
"lea 0x10(%1),%1 \n" // 2 uv to 4 uv
"sub $0x4,%2 \n"
"jg 1b \n"
- "vzeroupper \n"
: "+r"(src_ptr), // %0
"+r"(dst_ptr), // %1
"+r"(dst_width) // %2
diff --git a/source/scale_neon.cc b/source/scale_neon.cc
index 14d8fcd8..41dba3e8 100644
--- a/source/scale_neon.cc
+++ b/source/scale_neon.cc
@@ -825,7 +825,8 @@ void ScaleUVRowUp2_Linear_16_NEON(const uint16_t* src_ptr,
"+r"(dst_width), // %2
"+r"(src_temp) // %3
:
- : "memory", "cc", "q0", "q1", "q2", "q3", "q4", "q5", "d30" // Clobber List
+ : "memory", "cc", "q0", "q1", "q2", "q3", "q4", "q5",
+ "d30" // Clobber List
);
}
diff --git a/source/scale_neon64.cc b/source/scale_neon64.cc
index 0ac4e2ea..22fedcb5 100644
--- a/source/scale_neon64.cc
+++ b/source/scale_neon64.cc
@@ -852,7 +852,8 @@ void ScaleUVRowUp2_Linear_16_NEON(const uint16_t* src_ptr,
"+r"(dst_ptr), // %2
"+r"(dst_width) // %3
:
- : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v31" // Clobber List
+ : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5",
+ "v31" // Clobber List
);
}
diff --git a/source/scale_uv.cc b/source/scale_uv.cc
index 7baeae6d..7b977912 100644
--- a/source/scale_uv.cc
+++ b/source/scale_uv.cc
@@ -934,9 +934,9 @@ static void ScaleUVSimple(int src_width,
// Copy UV with optional flipping
#if HAS_UVCOPY
static int UVCopy(const uint8_t* src_UV,
- int src_stride_UV,
+ int src_stride_uv,
uint8_t* dst_UV,
- int dst_stride_UV,
+ int dst_stride_uv,
int width,
int height) {
if (!src_UV || !dst_UV || width <= 0 || height == 0) {
@@ -945,18 +945,18 @@ static int UVCopy(const uint8_t* src_UV,
// Negative height means invert the image.
if (height < 0) {
height = -height;
- src_UV = src_UV + (height - 1) * src_stride_UV;
- src_stride_UV = -src_stride_UV;
+ src_UV = src_UV + (height - 1) * src_stride_uv;
+ src_stride_uv = -src_stride_uv;
}
- CopyPlane(src_UV, src_stride_UV, dst_UV, dst_stride_UV, width * 2, height);
+ CopyPlane(src_UV, src_stride_uv, dst_UV, dst_stride_uv, width * 2, height);
return 0;
}
static int UVCopy_16(const uint16_t* src_UV,
- int src_stride_UV,
+ int src_stride_uv,
uint16_t* dst_UV,
- int dst_stride_UV,
+ int dst_stride_uv,
int width,
int height) {
if (!src_UV || !dst_UV || width <= 0 || height == 0) {
@@ -965,11 +965,11 @@ static int UVCopy_16(const uint16_t* src_UV,
// Negative height means invert the image.
if (height < 0) {
height = -height;
- src_UV = src_UV + (height - 1) * src_stride_UV;
- src_stride_UV = -src_stride_UV;
+ src_UV = src_UV + (height - 1) * src_stride_uv;
+ src_stride_uv = -src_stride_uv;
}
- CopyPlane_16(src_UV, src_stride_UV, dst_UV, dst_stride_UV, width * 2, height);
+ CopyPlane_16(src_UV, src_stride_uv, dst_UV, dst_stride_uv, width * 2, height);
return 0;
}
#endif // HAS_UVCOPY
@@ -1117,7 +1117,7 @@ int UVScale(const uint8_t* src_uv,
int dst_width,
int dst_height,
enum FilterMode filtering) {
- if (!src_uv || src_width == 0 || src_height == 0 || src_width > 32768 ||
+ if (!src_uv || src_width <= 0 || src_height == 0 || src_width > 32768 ||
src_height > 32768 || !dst_uv || dst_width <= 0 || dst_height <= 0) {
return -1;
}
@@ -1126,7 +1126,7 @@ int UVScale(const uint8_t* src_uv,
return 0;
}
-// Scale an 16 bit UV image.
+// Scale a 16 bit UV image.
// This function is currently incomplete, it can't handle all cases.
LIBYUV_API
int UVScale_16(const uint16_t* src_uv,
@@ -1140,7 +1140,7 @@ int UVScale_16(const uint16_t* src_uv,
enum FilterMode filtering) {
int dy = 0;
- if (!src_uv || src_width == 0 || src_height == 0 || src_width > 32768 ||
+ if (!src_uv || src_width <= 0 || src_height == 0 || src_width > 32768 ||
src_height > 32768 || !dst_uv || dst_width <= 0 || dst_height <= 0) {
return -1;
}
diff --git a/unit_test/convert_test.cc b/unit_test/convert_test.cc
index 2922fc40..50593160 100644
--- a/unit_test/convert_test.cc
+++ b/unit_test/convert_test.cc
@@ -55,13 +55,13 @@ namespace libyuv {
static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \
static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \
static_assert(SRC_SUBSAMP_X == 1 || SRC_SUBSAMP_X == 2, \
- "DST SRC_SUBSAMP_X unsupported"); \
+ "SRC_SUBSAMP_X unsupported"); \
static_assert(SRC_SUBSAMP_Y == 1 || SRC_SUBSAMP_Y == 2, \
- "DST SRC_SUBSAMP_Y unsupported"); \
+ "SRC_SUBSAMP_Y unsupported"); \
static_assert(DST_SUBSAMP_X == 1 || DST_SUBSAMP_X == 2, \
- "DST DST_SUBSAMP_X unsupported"); \
+ "DST_SUBSAMP_X unsupported"); \
static_assert(DST_SUBSAMP_Y == 1 || DST_SUBSAMP_Y == 2, \
- "DST DST_SUBSAMP_Y unsupported"); \
+ "DST_SUBSAMP_Y unsupported"); \
const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
const int kHeight = benchmark_height_; \
const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \
@@ -385,13 +385,13 @@ TESTPLANARTOBP(I400, 2, 2, NV21, 2, 2)
static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \
static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \
static_assert(SRC_SUBSAMP_X == 1 || SRC_SUBSAMP_X == 2, \
- "DST SRC_SUBSAMP_X unsupported"); \
+ "SRC_SUBSAMP_X unsupported"); \
static_assert(SRC_SUBSAMP_Y == 1 || SRC_SUBSAMP_Y == 2, \
- "DST SRC_SUBSAMP_Y unsupported"); \
+ "SRC_SUBSAMP_Y unsupported"); \
static_assert(DST_SUBSAMP_X == 1 || DST_SUBSAMP_X == 2, \
- "DST DST_SUBSAMP_X unsupported"); \
+ "DST_SUBSAMP_X unsupported"); \
static_assert(DST_SUBSAMP_Y == 1 || DST_SUBSAMP_Y == 2, \
- "DST DST_SUBSAMP_Y unsupported"); \
+ "DST_SUBSAMP_Y unsupported"); \
const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
const int kHeight = benchmark_height_; \
const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \
@@ -423,17 +423,17 @@ TESTPLANARTOBP(I400, 2, 2, NV21, 2, 2)
memset(dst_uv_opt, 102, 2 * kDstHalfWidth * kDstHalfHeight * DST_BPC); \
MaskCpuFlags(disable_cpu_flags_); \
SRC_FMT_PLANAR##To##FMT_PLANAR( \
- src_y_p, kWidth, src_uv_p, 2 * kSrcHalfWidth, \
+ src_y_p, kWidth, src_uv_p, 2 * kSrcHalfWidth, \
DOY ? reinterpret_cast<DST_T*>(dst_y_c) : NULL, kWidth, \
- reinterpret_cast<DST_T*>(dst_uv_c), 2 * kDstHalfWidth, \
- kWidth, NEG kHeight); \
+ reinterpret_cast<DST_T*>(dst_uv_c), 2 * kDstHalfWidth, kWidth, \
+ NEG kHeight); \
MaskCpuFlags(benchmark_cpu_info_); \
for (int i = 0; i < benchmark_iterations_; ++i) { \
SRC_FMT_PLANAR##To##FMT_PLANAR( \
- src_y_p, kWidth, src_uv_p, 2 * kSrcHalfWidth, \
+ src_y_p, kWidth, src_uv_p, 2 * kSrcHalfWidth, \
DOY ? reinterpret_cast<DST_T*>(dst_y_opt) : NULL, kWidth, \
- reinterpret_cast<DST_T*>(dst_uv_opt), 2 * kDstHalfWidth, \
- kWidth, NEG kHeight); \
+ reinterpret_cast<DST_T*>(dst_uv_opt), 2 * kDstHalfWidth, kWidth, \
+ NEG kHeight); \
} \
if (DOY) { \
for (int i = 0; i < kHeight; ++i) { \
@@ -483,7 +483,7 @@ TESTBIPLANARTOBP(NV21, uint8_t, 1, 2, 2, NV12, uint8_t, 1, 2, 2, 8)
TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV12Mirror, uint8_t, 1, 2, 2, 8)
TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV24, uint8_t, 1, 1, 1, 8)
TESTBIPLANARTOBP(NV16, uint8_t, 1, 2, 1, NV24, uint8_t, 1, 1, 1, 8)
-// These formats put data at high bits, so test on full 16bit range.
+// These formats put data in high bits, so test on full 16bit range.
TESTBIPLANARTOBP(P010, uint16_t, 2, 2, 2, P410, uint16_t, 2, 1, 1, 16)
TESTBIPLANARTOBP(P210, uint16_t, 2, 2, 1, P410, uint16_t, 2, 1, 1, 16)
TESTBIPLANARTOBP(P012, uint16_t, 2, 2, 2, P412, uint16_t, 2, 1, 1, 16)
diff --git a/util/yuvconstants.cc b/util/yuvconstants.cc
new file mode 100644
index 00000000..be900878
--- /dev/null
+++ b/util/yuvconstants.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// This utility computes values needed to generate yuvconstants based on
+// white point values.
+// The yuv formulas are tuned for 8 bit YUV channels.
+
+// See Also
+// https://mymusing.co/bt601-yuv-to-rgb-conversion-color/
+
+// BT.709 full range YUV to RGB reference
+// R = Y + V * 1.5748
+// G = Y - U * 0.18732 - V * 0.46812
+// B = Y + U * 1.8556
+// KR = 0.2126
+// KB = 0.0722
+
+// // Y contribution to R,G,B. Scale and bias.
+// #define YG 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+// #define YB 32 /* 64 / 2 */
+//
+// // U and V contributions to R,G,B.
+// #define UB 113 /* round(1.77200 * 64) */
+// #define UG 22 /* round(0.34414 * 64) */
+// #define VG 46 /* round(0.71414 * 64) */
+// #define VR 90 /* round(1.40200 * 64) */
+//
+// // Bias values to round, and subtract 128 from U and V.
+// #define BB (-UB * 128 + YB)
+// #define BG (UG * 128 + VG * 128 + YB)
+// #define BR (-VR * 128 + YB)
+
+#define round(v) ((int)(v + 0.5))
+
+int main(int argc, const char* argv[]) {
+ if (argc < 2) {
+ printf("yuvconstants Kr Kb\n");
+ printf(" MC BT KR = 0.2126; KB = 0.0722\n");
+ printf(" 1 BT.709 KR = 0.2126; KB = 0.0722\n");
+ printf(" 4 FCC KR = 0.30; KB = 0.11\n");
+ printf(" 6 BT.601 KR = 0.299; KB = 0.114\n");
+ printf(" 7 SMPTE 240M KR = 0.212; KB = 0.087\n");
+ printf(" 9 BT.2020 KR = 0.2627; KB = 0.0593\n");
+ return -1;
+ }
+ float kr = atof(argv[1]);
+ float kb = atof(argv[2]);
+ float kg = 1 - kr - kb;
+
+ float vr = 2 * (1 - kr);
+ float ug = 2 * ((1 - kb) * kb / kg);
+ float vg = 2 * ((1 - kr) * kr / kg);
+ float ub = 2 * (1 - kb);
+
+ printf("Full range\n");
+ printf("R = Y + V * %5f\n", vr);
+ printf("G = Y - U * %6f - V * %6f\n", ug, vg);
+ printf("B = Y + U * %5f\n", ub);
+
+ printf("KR = %4f; ", kr);
+ printf("KB = %4f\n", kb);
+ // printf("KG = %4f\n", kg);
+ // #define YG 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+ // #define YB 32 /* 64 / 2 */
+ //
+ // // U and V contributions to R,G,B.
+
+ printf("UB %-3d /* round(%f * 64 = %f) */\n", round(ub * 64), ub, ub * 64);
+ printf("UG %-3d /* round(%f * 64 = %f) */\n", round(ug * 64), ug, ug * 64);
+ printf("VG %-3d /* round(%f * 64 = %f) */\n", round(vg * 64), vg, vg * 64);
+ printf("VR %-3d /* round(%f * 64 = %f) */\n", round(vr * 64), vr, vr * 64);
+
+ vr = 255.f / 224.f * 2 * (1 - kr);
+ ug = 255.f / 224.f * 2 * ((1 - kb) * kb / kg);
+ vg = 255.f / 224.f * 2 * ((1 - kr) * kr / kg);
+ ub = 255.f / 224.f * 2 * (1 - kb);
+
+ printf("\nLimited range\n");
+ printf("R = (Y - 16) * 1.164 + V * %5f\n", vr);
+ printf("G = (Y - 16) * 1.164 - U * %6f - V * %6f\n", ug, vg);
+ printf("B = (Y - 16) * 1.164 + U * %5f\n", ub);
+
+ // printf("KG = %4f\n", kg);
+ // #define YG 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+ // #define YB 32 /* 64 / 2 */
+ //
+ // // U and V contributions to R,G,B.
+
+ printf("UB %-3d /* round(%f * 64 = %f) */\n", round(ub * 64), ub, ub * 64);
+ printf("UG %-3d /* round(%f * 64 = %f) */\n", round(ug * 64), ug, ug * 64);
+ printf("VG %-3d /* round(%f * 64 = %f) */\n", round(vg * 64), vg, vg * 64);
+ printf("VR %-3d /* round(%f * 64 = %f) */\n", round(vr * 64), vr, vr * 64);
+
+ return 0;
+}