From def473f501acbd652cd4593fd2a90a067e8c9f1a Mon Sep 17 00:00:00 2001 From: Frank Barchard Date: Mon, 4 Dec 2023 01:16:59 -0800 Subject: malloc return 1 for failures and assert for internal functions Bug: libyuv:968 Change-Id: Iea2f907061532d2e00347996124bc80d079a7bdc Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/5010874 Reviewed-by: Wan-Teh Chang Commit-Queue: Frank Barchard --- source/convert.cc | 30 ++++++++++++++++--------- source/convert_argb.cc | 54 ++++++++++++++++++++++++++++++--------------- source/convert_from_argb.cc | 42 +++++++++++++++++++++-------------- source/planar_functions.cc | 12 ++++++---- source/rotate.cc | 23 ++++++++++++------- source/rotate_argb.cc | 5 +++-- source/scale.cc | 21 +++++++++++------- source/scale_argb.cc | 26 ++++++++++------------ source/scale_uv.cc | 12 +++++----- 9 files changed, 139 insertions(+), 86 deletions(-) (limited to 'source') diff --git a/source/convert.cc b/source/convert.cc index 5f779a81..6ac5bc43 100644 --- a/source/convert.cc +++ b/source/convert.cc @@ -790,8 +790,9 @@ int I422ToNV21(const uint8_t* src_y, // Allocate u and v buffers align_buffer_64(plane_u, halfwidth * halfheight * 2); - if (!plane_u) return 1; uint8_t* plane_v = plane_u + halfwidth * halfheight; + if (!plane_u) + return 1; I422ToI420(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, dst_y, dst_stride_y, plane_u, halfwidth, plane_v, halfwidth, width, @@ -907,7 +908,8 @@ int MT2TToP010(const uint8_t* src_y, void (*UnpackMT2T)(const uint8_t* src, uint16_t* dst, size_t size) = UnpackMT2T_C; align_buffer_64(row_buf, row_buf_size); - if (!row_buf) return 1; + if (!row_buf) + return 1; #if defined(HAS_UNPACKMT2T_NEON) if (TestCpuFlag(kCpuHasNEON)) { @@ -1107,8 +1109,9 @@ int I422ToNV21(const uint8_t* src_y, // Allocate 2 rows of vu. int awidth = halfwidth * 2; align_buffer_64(row_vu_0, awidth * 2); - if (!row_vu_0) return 1; uint8_t* row_vu_1 = row_vu_0 + awidth; + if (!row_vu_0) + return 1; for (y = 0; y < height - 1; y += 2) { MergeUVRow(src_v, src_u, row_vu_0, halfwidth); @@ -2693,7 +2696,8 @@ int RGB24ToI420(const uint8_t* src_rgb24, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { @@ -2870,7 +2874,8 @@ int RGB24ToJ420(const uint8_t* src_rgb24, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { @@ -3050,7 +3055,8 @@ int RAWToI420(const uint8_t* src_raw, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { @@ -3227,7 +3233,8 @@ int RAWToJ420(const uint8_t* src_raw, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { @@ -3406,7 +3413,8 @@ int RGB565ToI420(const uint8_t* src_rgb565, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ @@ -3587,7 +3595,8 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { @@ -3801,7 +3810,8 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, // Allocate 2 rows of ARGB. const int row_size = (width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; #endif for (y = 0; y < height - 1; y += 2) { diff --git a/source/convert_argb.cc b/source/convert_argb.cc index ad4470c7..871fea59 100644 --- a/source/convert_argb.cc +++ b/source/convert_argb.cc @@ -4670,7 +4670,8 @@ int Android420ToARGBMatrix(const uint8_t* src_y, // General case fallback creates NV12 align_buffer_64(plane_uv, halfwidth * 2 * halfheight); - if (!plane_uv) return 1; + if (!plane_uv) + return 1; dst_uv = plane_uv; for (y = 0; y < halfheight; ++y) { WeavePixels(src_u, src_v, src_pixel_stride_uv, dst_uv, halfwidth); @@ -5983,7 +5984,8 @@ int I420ToRGB565Dither(const uint8_t* src_y, { // Allocate a row of argb. align_buffer_64(row_argb, width * 4); - if (!row_argb) return 1; + if (!row_argb) + return 1; for (y = 0; y < height; ++y) { I422ToARGBRow(src_y, src_u, src_v, row_argb, &kYuvI601Constants, width); ARGBToRGB565DitherRow(row_argb, dst_rgb565, @@ -6242,11 +6244,12 @@ static int I420ToARGBMatrixBilinear(const uint8_t* src_y, // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4); - if (!row) return 1; uint8_t* temp_u_1 = row; uint8_t* temp_u_2 = row + row_size; uint8_t* temp_v_1 = row + row_size * 2; uint8_t* temp_v_2 = row + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear(src_u, temp_u_1, width); ScaleRowUp2_Linear(src_v, temp_v_1, width); @@ -6379,9 +6382,10 @@ static int I422ToARGBMatrixLinear(const uint8_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; uint8_t* temp_u = row; uint8_t* temp_v = row + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_u, temp_u, width); @@ -6511,11 +6515,12 @@ static int I420ToRGB24MatrixBilinear(const uint8_t* src_y, // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4); - if (!row) return 1; uint8_t* temp_u_1 = row; uint8_t* temp_u_2 = row + row_size; uint8_t* temp_v_1 = row + row_size * 2; uint8_t* temp_v_2 = row + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear(src_u, temp_u_1, width); ScaleRowUp2_Linear(src_v, temp_v_1, width); @@ -6618,11 +6623,12 @@ static int I010ToAR30MatrixBilinear(const uint16_t* src_y, // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u_1 = (uint16_t*)(row); uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear_12(src_u, temp_u_1, width); ScaleRowUp2_Linear_12(src_v, temp_v_1, width); @@ -6718,9 +6724,10 @@ static int I210ToAR30MatrixLinear(const uint16_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u = (uint16_t*)(row); uint16_t* temp_v = (uint16_t*)(row) + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear_12(src_u, temp_u, width); @@ -6807,11 +6814,12 @@ static int I010ToARGBMatrixBilinear(const uint16_t* src_y, // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u_1 = (uint16_t*)(row); uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear_12(src_u, temp_u_1, width); ScaleRowUp2_Linear_12(src_v, temp_v_1, width); @@ -6906,9 +6914,10 @@ static int I210ToARGBMatrixLinear(const uint16_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u = (uint16_t*)(row); uint16_t* temp_v = (uint16_t*)(row) + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear_12(src_u, temp_u, width); @@ -7083,11 +7092,12 @@ static int I420AlphaToARGBMatrixBilinear( // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4); - if (!row) return 1; uint8_t* temp_u_1 = row; uint8_t* temp_u_2 = row + row_size; uint8_t* temp_v_1 = row + row_size * 2; uint8_t* temp_v_2 = row + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear(src_u, temp_u_1, width); ScaleRowUp2_Linear(src_v, temp_v_1, width); @@ -7284,9 +7294,10 @@ static int I422AlphaToARGBMatrixLinear(const uint8_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; uint8_t* temp_u = row; uint8_t* temp_v = row + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_u, temp_u, width); @@ -7424,11 +7435,12 @@ static int I010AlphaToARGBMatrixBilinear( // alloc 4 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u_1 = (uint16_t*)(row); uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + if (!row) + return 1; ScaleRowUp2_Linear_12(src_u, temp_u_1, width); ScaleRowUp2_Linear_12(src_v, temp_v_1, width); @@ -7586,9 +7598,10 @@ static int I210AlphaToARGBMatrixLinear(const uint16_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_u = (uint16_t*)(row); uint16_t* temp_v = (uint16_t*)(row) + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_u, temp_u, width); @@ -7672,9 +7685,10 @@ static int P010ToARGBMatrixBilinear(const uint16_t* src_y, // alloc 2 lines temp const int row_size = (2 * width + 31) & ~31; align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_uv_1 = (uint16_t*)(row); uint16_t* temp_uv_2 = (uint16_t*)(row) + row_size; + if (!row) + return 1; Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); P410ToARGBRow(src_y, temp_uv_1, dst_argb, yuvconstants, width); @@ -7763,8 +7777,9 @@ static int P210ToARGBMatrixLinear(const uint16_t* src_y, const int row_size = (2 * width + 31) & ~31; align_buffer_64(row, row_size * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_uv = (uint16_t*)(row); + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_uv, temp_uv, width); @@ -7842,9 +7857,10 @@ static int P010ToAR30MatrixBilinear(const uint16_t* src_y, // alloc 2 lines temp const int row_size = (2 * width + 31) & ~31; align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_uv_1 = (uint16_t*)(row); uint16_t* temp_uv_2 = (uint16_t*)(row) + row_size; + if (!row) + return 1; Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); P410ToAR30Row(src_y, temp_uv_1, dst_ar30, yuvconstants, width); @@ -7933,8 +7949,9 @@ static int P210ToAR30MatrixLinear(const uint16_t* src_y, const int row_size = (2 * width + 31) & ~31; align_buffer_64(row, row_size * sizeof(uint16_t)); - if (!row) return 1; uint16_t* temp_uv = (uint16_t*)(row); + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_uv, temp_uv, width); @@ -8034,9 +8051,10 @@ static int I422ToRGB24MatrixLinear(const uint8_t* src_y, // alloc 2 lines temp const int row_size = (width + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; uint8_t* temp_u = row; uint8_t* temp_v = row + row_size; + if (!row) + return 1; for (y = 0; y < height; ++y) { ScaleRowUp2_Linear(src_u, temp_u, width); diff --git a/source/convert_from_argb.cc b/source/convert_from_argb.cc index 1c0d250d..b45de8c8 100644 --- a/source/convert_from_argb.cc +++ b/source/convert_from_argb.cc @@ -462,8 +462,9 @@ int ARGBToNV12(const uint8_t* src_argb, { // Allocate a rows of uv. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2); - if (!row_u) return 1; uint8_t* row_v = row_u + ((halfwidth + 31) & ~31); + if (!row_u) + return 1; for (y = 0; y < height - 1; y += 2) { ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width); @@ -661,8 +662,9 @@ int ARGBToNV21(const uint8_t* src_argb, { // Allocate a rows of uv. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2); - if (!row_u) return 1; uint8_t* row_v = row_u + ((halfwidth + 31) & ~31); + if (!row_u) + return 1; for (y = 0; y < height - 1; y += 2) { ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width); @@ -847,8 +849,9 @@ int ABGRToNV12(const uint8_t* src_abgr, { // Allocate a rows of uv. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2); - if (!row_u) return 1; uint8_t* row_v = row_u + ((halfwidth + 31) & ~31); + if (!row_u) + return 1; for (y = 0; y < height - 1; y += 2) { ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width); @@ -1034,8 +1037,9 @@ int ABGRToNV21(const uint8_t* src_abgr, { // Allocate a rows of uv. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2); - if (!row_u) return 1; uint8_t* row_v = row_u + ((halfwidth + 31) & ~31); + if (!row_u) + return 1; for (y = 0; y < height - 1; y += 2) { ABGRToUVRow(src_abgr, src_stride_abgr, row_u, row_v, width); @@ -1234,9 +1238,10 @@ int ARGBToYUY2(const uint8_t* src_argb, { // Allocate a rows of yuv. align_buffer_64(row_y, ((width + 63) & ~63) * 2); - if (!row_y) return 1; uint8_t* row_u = row_y + ((width + 63) & ~63); uint8_t* row_v = row_u + ((width + 63) & ~63) / 2; + if (!row_y) + return 1; for (y = 0; y < height; ++y) { ARGBToUVRow(src_argb, 0, row_u, row_v, width); @@ -1429,9 +1434,10 @@ int ARGBToUYVY(const uint8_t* src_argb, { // Allocate a rows of yuv. align_buffer_64(row_y, ((width + 63) & ~63) * 2); - if (!row_y) return 1; uint8_t* row_u = row_y + ((width + 63) & ~63); uint8_t* row_v = row_u + ((width + 63) & ~63) / 2; + if (!row_y) + return 1; for (y = 0; y < height; ++y) { ARGBToUVRow(src_argb, 0, row_u, row_v, width); @@ -3278,16 +3284,21 @@ int RAWToJNV21(const uint8_t* src_raw, } #endif { +#if defined(HAS_RAWTOYJROW) // Allocate a row of uv. - align_buffer_64(row_uj, ((halfwidth + 31) & ~31) * 2); - if (!row_uj) return 1; - uint8_t* row_vj = row_uj + ((halfwidth + 31) & ~31); -#if !defined(HAS_RAWTOYJROW) - // Allocate 2 rows of ARGB. - const int row_size = (width * 4 + 31) & ~31; - align_buffer_64(row, row_size * 2); - if (!row) return 1; + const int row_uv_size = ((halfwidth + 31) & ~31); + align_buffer_64(row_uj, row_uv_size * 2); + uint8_t* row_vj = row_uj + row_uv_size; +#else + // Allocate row of uv and 2 rows of ARGB. + const int row_size = ((width * 4 + 31) & ~31); + const int row_uv_size = ((halfwidth + 31) & ~31); + align_buffer_64(row_uj, row_uv_size * 2 + row_size * 2); + uint8_t* row_vj = row_uj + row_uv_size; + uint8_t* row = row_vj + row_uv_size; #endif + if (!row_uj) + return 1; for (y = 0; y < height - 1; y += 2) { #if defined(HAS_RAWTOYJROW) @@ -3319,9 +3330,6 @@ int RAWToJNV21(const uint8_t* src_raw, ARGBToYJRow(row, dst_y, width); #endif } -#if !defined(HAS_RAWTOYJROW) - free_aligned_buffer_64(row); -#endif free_aligned_buffer_64(row_uj); } return 0; diff --git a/source/planar_functions.cc b/source/planar_functions.cc index 93a714e6..1c94e260 100644 --- a/source/planar_functions.cc +++ b/source/planar_functions.cc @@ -3027,7 +3027,8 @@ int I420Blend(const uint8_t* src_y0, // Row buffer for intermediate alpha pixels. align_buffer_64(halfalpha, halfwidth); - if (!halfalpha) return 1; + if (!halfalpha) + return 1; for (y = 0; y < height; y += 2) { // last row of odd height image use 1 row of alpha instead of 2. if (y == (height - 1)) { @@ -4711,7 +4712,8 @@ int GaussPlane_F32(const float* src, { // 2 pixels on each side, but aligned out to 16 bytes. align_buffer_64(rowbuf, (4 + width + 4) * 4); - if (!rowbuf) return 1; + if (!rowbuf) + return 1; memset(rowbuf, 0, 16); memset(rowbuf + (4 + width) * 4, 0, 16); float* row = (float*)(rowbuf + 16); @@ -4862,7 +4864,6 @@ static int ARGBSobelize(const uint8_t* src_argb, // 3 rows with edges before/after. const int row_size = (width + kEdge + 31) & ~31; align_buffer_64(rows, row_size * 2 + (kEdge + row_size * 3 + kEdge)); - if (!rows) return 1; uint8_t* row_sobelx = rows; uint8_t* row_sobely = rows + row_size; uint8_t* row_y = rows + row_size * 2; @@ -4871,6 +4872,8 @@ static int ARGBSobelize(const uint8_t* src_argb, uint8_t* row_y0 = row_y + kEdge; uint8_t* row_y1 = row_y0 + row_size; uint8_t* row_y2 = row_y1 + row_size; + if (!rows) + return 1; ARGBToYJRow(src_argb, row_y0, width); row_y0[-1] = row_y0[0]; memset(row_y0 + width, row_y0[width - 1], 16); // Extrude 16 for valgrind. @@ -5657,7 +5660,8 @@ int UYVYToNV12(const uint8_t* src_uyvy, int awidth = halfwidth * 2; // row of y and 2 rows of uv align_buffer_64(rows, awidth * 3); - if (!rows) return 1; + if (!rows) + return 1; for (y = 0; y < height - 1; y += 2) { // Split Y from UV. diff --git a/source/rotate.cc b/source/rotate.cc index 09ba2106..3f8332c3 100644 --- a/source/rotate.cc +++ b/source/rotate.cc @@ -8,6 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include + #include "libyuv/rotate.h" #include "libyuv/convert.h" @@ -140,7 +142,9 @@ void RotatePlane180(const uint8_t* src, int height) { // Swap top and bottom row and mirror the content. Uses a temporary row. align_buffer_64(row, width); - if (!row) return; + assert(row); + if (!row) + return; const uint8_t* src_bot = src + src_stride * (height - 1); uint8_t* dst_bot = dst + dst_stride * (height - 1); int half_height = (height + 1) >> 1; @@ -544,20 +548,23 @@ static void RotatePlane180_16(const uint16_t* src, int dst_stride, int width, int height) { - // Swap top and bottom row and mirror the content. Uses a temporary row. - align_buffer_64(row, width * 2); - if (!row) return; - uint16_t* row_bot = (uint16_t*) row; const uint16_t* src_bot = src + src_stride * (height - 1); uint16_t* dst_bot = dst + dst_stride * (height - 1); int half_height = (height + 1) >> 1; int y; + // Swap top and bottom row and mirror the content. Uses a temporary row. + align_buffer_64(row, width * 2); + uint16_t* row_tmp = (uint16_t*)row; + assert(row); + if (!row) + return; + // Odd height will harmlessly mirror the middle row twice. for (y = 0; y < half_height; ++y) { - CopyRow_16_C(src, row_bot, width); // Copy top row into buffer - MirrorRow_16_C(src_bot, dst, width); // Mirror bottom row into top row - MirrorRow_16_C(row_bot, dst_bot, width); // Mirror buffer into bottom row + CopyRow_16_C(src, row_tmp, width); // Copy top row into buffer + MirrorRow_16_C(src_bot, dst, width); // Mirror bottom row into top row + MirrorRow_16_C(row_tmp, dst_bot, width); // Mirror buffer into bottom row src += src_stride; dst += dst_stride; src_bot -= src_stride; diff --git a/source/rotate_argb.cc b/source/rotate_argb.cc index 70a1ee98..d55fac4f 100644 --- a/source/rotate_argb.cc +++ b/source/rotate_argb.cc @@ -120,8 +120,6 @@ static int ARGBRotate180(const uint8_t* src_argb, int width, int height) { // Swap first and last row and mirror the content. Uses a temporary row. - align_buffer_64(row, width * 4); - if (!row) return 1; const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1); uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1); int half_height = (height + 1) >> 1; @@ -130,6 +128,9 @@ static int ARGBRotate180(const uint8_t* src_argb, ARGBMirrorRow_C; void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = CopyRow_C; + align_buffer_64(row, width * 4); + if (!row) + return 1; #if defined(HAS_ARGBMIRRORROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBMirrorRow = ARGBMirrorRow_Any_NEON; diff --git a/source/scale.cc b/source/scale.cc index b518a39d..b7a602ba 100644 --- a/source/scale.cc +++ b/source/scale.cc @@ -960,7 +960,8 @@ static int ScalePlaneBox(int src_width, { // Allocate a row buffer of uint16_t. align_buffer_64(row16, src_width * 2); - if (!row16) return 1; + if (!row16) + return 1; void (*ScaleAddCols)(int dst_width, int boxheight, int x, int dx, const uint16_t* src_ptr, uint8_t* dst_ptr) = (dx & 0xffff) ? ScaleAddCols2_C @@ -1056,7 +1057,8 @@ static int ScalePlaneBox_16(int src_width, { // Allocate a row buffer of uint32_t. align_buffer_64(row32, src_width * 4); - if (!row32) return 1; + if (!row32) + return 1; void (*ScaleAddCols)(int dst_width, int boxheight, int x, int dx, const uint32_t* src_ptr, uint16_t* dst_ptr) = (dx & 0xffff) ? ScaleAddCols2_16_C : ScaleAddCols1_16_C; @@ -1109,7 +1111,8 @@ static int ScalePlaneBilinearDown(int src_width, // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear. // Allocate a row buffer. align_buffer_64(row, src_width); - if (!row) return 1; + if (!row) + return 1; const int max_y = (src_height - 1) << 16; int j; @@ -1239,7 +1242,8 @@ static int ScalePlaneBilinearDown_16(int src_width, // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear. // Allocate a row buffer. align_buffer_64(row, src_width * 2); - if (!row) return 1; + if (!row) + return 1; const int max_y = (src_height - 1) << 16; int j; @@ -1423,7 +1427,8 @@ static int ScalePlaneBilinearUp(int src_width, // Allocate 2 row buffers. const int row_size = (dst_width + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; uint8_t* rowptr = row; int rowstride = row_size; @@ -1892,11 +1897,11 @@ static int ScalePlaneBilinearUp_16(int src_width, // Allocate 2 row buffers. const int row_size = (dst_width + 31) & ~31; align_buffer_64(row, row_size * 4); - if (!row) return 1; - - uint16_t* rowptr = (uint16_t*)row; int rowstride = row_size; int lasty = yi; + uint16_t* rowptr = (uint16_t*)row; + if (!row) + return 1; ScaleFilterCols(rowptr, src, dst_width, x, dx); if (src_height > 1) { diff --git a/source/scale_argb.cc b/source/scale_argb.cc index 65d0c892..c8e0db9e 100644 --- a/source/scale_argb.cc +++ b/source/scale_argb.cc @@ -170,7 +170,8 @@ static int ScaleARGBDown4Box(int src_width, // but implemented via a 2 pass wrapper that uses a very small array on the // stack with a horizontal loop. align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; int row_stride = src_stride * (dy >> 16); void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width) = @@ -412,7 +413,8 @@ static int ScaleARGBBilinearDown(int src_width, // Allocate a row of ARGB. { align_buffer_64(row, clip_src_width * 4); - if (!row) return 1; + if (!row) + return 1; const int max_y = (src_height - 1) << 16; if (y > max_y) { @@ -588,7 +590,8 @@ static int ScaleARGBBilinearUp(int src_width, // Allocate 2 rows of ARGB. const int row_size = (dst_width * 4 + 31) & ~31; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; uint8_t* rowptr = row; int rowstride = row_size; @@ -855,21 +858,17 @@ static int ScaleYUVToARGBBilinearUp(int src_width, const uint8_t* src_row_u = src_u + uv_yi * (intptr_t)src_stride_u; const uint8_t* src_row_v = src_v + uv_yi * (intptr_t)src_stride_v; - // Allocate 2 rows of ARGB. + // Allocate 1 row of ARGB for source conversion and 2 rows of ARGB + // scaled horizontally to the destination width. const int row_size = (dst_width * 4 + 31) & ~31; - align_buffer_64(row, row_size * 2); - if (!row) return 1; - - // Allocate 1 row of ARGB for source conversion. - align_buffer_64(argb_row, src_width * 4); - if (!argb_row) { - free_aligned_buffer_64(row); - return 1; - } + align_buffer_64(row, row_size * 2 + src_width * 4); + uint8_t* argb_row = row + row_size * 2; uint8_t* rowptr = row; int rowstride = row_size; int lasty = yi; + if (!row) + return 1; // TODO(fbarchard): Convert first 2 rows of YUV to ARGB. ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx); @@ -924,7 +923,6 @@ static int ScaleYUVToARGBBilinearUp(int src_width, y += dy; } free_aligned_buffer_64(row); - free_aligned_buffer_64(row_argb); return 0; } #endif diff --git a/source/scale_uv.cc b/source/scale_uv.cc index b006d657..0931c89a 100644 --- a/source/scale_uv.cc +++ b/source/scale_uv.cc @@ -204,7 +204,8 @@ static int ScaleUVDown4Box(int src_width, // Allocate 2 rows of UV. const int row_size = (dst_width * 2 * 2 + 15) & ~15; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; int row_stride = src_stride * (dy >> 16); void (*ScaleUVRowDown2)(const uint8_t* src_uv, ptrdiff_t src_stride, uint8_t* dst_uv, int dst_width) = @@ -448,10 +449,10 @@ static int ScaleUVBilinearDown(int src_width, // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear. // Allocate a row of UV. { - align_buffer_64(row, clip_src_width * 2); - if (!row) return 1; - const int max_y = (src_height - 1) << 16; + align_buffer_64(row, clip_src_width * 2); + if (!row) + return 1; if (y > max_y) { y = max_y; } @@ -610,7 +611,8 @@ static int ScaleUVBilinearUp(int src_width, // Allocate 2 rows of UV. const int row_size = (dst_width * 2 + 15) & ~15; align_buffer_64(row, row_size * 2); - if (!row) return 1; + if (!row) + return 1; uint8_t* rowptr = row; int rowstride = row_size; -- cgit v1.2.3