aboutsummaryrefslogtreecommitdiff
path: root/source/scale_argb.cc
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2021-10-14 13:06:54 -0700
committerlibyuv LUCI CQ <libyuv-scoped@luci-project-accounts.iam.gserviceaccount.com>2021-10-14 20:37:39 +0000
commit11cbf8f976a41ccb279dc67489832ea9f12d56d7 (patch)
tree07e2ffa24eaadc89eb70feb18de8d039de60b214 /source/scale_argb.cc
parentdaf9778a24a138cf7578b1ddf70ca867c2882c2c (diff)
downloadlibyuv-11cbf8f976a41ccb279dc67489832ea9f12d56d7.tar.gz
Add LIBYUV_BIT_EXACT macro to force C to match SIMD
- C code use ARM path, so NEON and C match - C used on Intel platforms, disabling AVX. Bug: libyuv:908, b/202888439 Change-Id: Ie035a150a60d3cf4ee7c849a96819d43640cf020 Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/3223507 Commit-Queue: Frank Barchard <fbarchard@chromium.org> Reviewed-by: richard winterton <rrwinterton@gmail.com>
Diffstat (limited to 'source/scale_argb.cc')
-rw-r--r--source/scale_argb.cc36
1 files changed, 18 insertions, 18 deletions
diff --git a/source/scale_argb.cc b/source/scale_argb.cc
index 451d4ec4..073df1ae 100644
--- a/source/scale_argb.cc
+++ b/source/scale_argb.cc
@@ -58,9 +58,9 @@ static void ScaleARGBDown2(int src_width,
assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
// Advance to odd row, even column.
if (filtering == kFilterBilinear) {
- src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+ src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
} else {
- src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4;
+ src_argb += (y >> 16) * (int64_t)src_stride + ((x >> 16) - 1) * 4;
}
#if defined(HAS_SCALEARGBROWDOWN2_SSE2)
@@ -162,7 +162,7 @@ static void ScaleARGBDown4Box(int src_width,
uint8_t* dst_argb, int dst_width) =
ScaleARGBRowDown2Box_C;
// Advance to odd row, even column.
- src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+ src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
(void)src_width;
(void)src_height;
(void)dx;
@@ -214,7 +214,7 @@ static void ScaleARGBDownEven(int src_width,
enum FilterMode filtering) {
int j;
int col_step = dx >> 16;
- int row_stride = (dy >> 16) * src_stride;
+ int row_stride = (dy >> 16) * (int64_t)src_stride;
void (*ScaleARGBRowDownEven)(const uint8_t* src_argb, ptrdiff_t src_stride,
int src_step, uint8_t* dst_argb, int dst_width) =
filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
@@ -222,7 +222,7 @@ static void ScaleARGBDownEven(int src_width,
(void)src_height;
assert(IS_ALIGNED(src_width, 2));
assert(IS_ALIGNED(src_height, 2));
- src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+ src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4;
#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2
@@ -372,7 +372,7 @@ static void ScaleARGBBilinearDown(int src_width,
}
for (j = 0; j < dst_height; ++j) {
int yi = y >> 16;
- const uint8_t* src = src_argb + yi * src_stride;
+ const uint8_t* src = src_argb + yi * (int64_t)src_stride;
if (filtering == kFilterLinear) {
ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
} else {
@@ -526,7 +526,7 @@ static void ScaleARGBBilinearUp(int src_width,
{
int yi = y >> 16;
- const uint8_t* src = src_argb + yi * src_stride;
+ const uint8_t* src = src_argb + yi * (int64_t)src_stride;
// Allocate 2 rows of ARGB.
const int kRowSize = (dst_width * 4 + 31) & ~31;
@@ -549,7 +549,7 @@ static void ScaleARGBBilinearUp(int src_width,
if (y > max_y) {
y = max_y;
yi = y >> 16;
- src = src_argb + yi * src_stride;
+ src = src_argb + yi * (int64_t)src_stride;
}
if (yi != lasty) {
ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
@@ -750,9 +750,9 @@ static void ScaleYUVToARGBBilinearUp(int src_width,
const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
int yi = y >> 16;
int uv_yi = yi >> kYShift;
- const uint8_t* src_row_y = src_y + yi * src_stride_y;
- const uint8_t* src_row_u = src_u + uv_yi * src_stride_u;
- const uint8_t* src_row_v = src_v + uv_yi * src_stride_v;
+ const uint8_t* src_row_y = src_y + yi * (int64_t)src_stride_y;
+ const uint8_t* src_row_u = src_u + uv_yi * (int64_t)src_stride_u;
+ const uint8_t* src_row_v = src_v + uv_yi * (int64_t)src_stride_v;
// Allocate 2 rows of ARGB.
const int kRowSize = (dst_width * 4 + 31) & ~31;
@@ -790,9 +790,9 @@ static void ScaleYUVToARGBBilinearUp(int src_width,
y = max_y;
yi = y >> 16;
uv_yi = yi >> kYShift;
- src_row_y = src_y + yi * src_stride_y;
- src_row_u = src_u + uv_yi * src_stride_u;
- src_row_v = src_v + uv_yi * src_stride_v;
+ src_row_y = src_y + yi * (int64_t)src_stride_y;
+ src_row_u = src_u + uv_yi * (int64_t)src_stride_u;
+ src_row_v = src_v + uv_yi * (int64_t)src_stride_v;
}
if (yi != lasty) {
// TODO(fbarchard): Convert the clipped region of row.
@@ -888,7 +888,7 @@ static void ScaleARGBSimple(int src_width,
}
for (j = 0; j < dst_height; ++j) {
- ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride, dst_width, x,
+ ScaleARGBCols(dst_argb, src_argb + (y >> 16) * (int64_t)src_stride, dst_width, x,
dx);
dst_argb += dst_stride;
y += dy;
@@ -924,7 +924,7 @@ static void ScaleARGB(const uint8_t* src,
// Negative src_height means invert the image.
if (src_height < 0) {
src_height = -src_height;
- src = src + (src_height - 1) * src_stride;
+ src = src + (src_height - 1) * (int64_t)src_stride;
src_stride = -src_stride;
}
ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y,
@@ -939,7 +939,7 @@ static void ScaleARGB(const uint8_t* src,
if (clip_y) {
int64_t clipf = (int64_t)(clip_y)*dy;
y += (clipf & 0xffff);
- src += (clipf >> 16) * src_stride;
+ src += (clipf >> 16) * (int64_t)src_stride;
dst += clip_y * dst_stride;
}
@@ -973,7 +973,7 @@ static void ScaleARGB(const uint8_t* src,
filtering = kFilterNone;
if (dx == 0x10000 && dy == 0x10000) {
// Straight copy.
- ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride,
+ ARGBCopy(src + (y >> 16) * (int64_t)src_stride + (x >> 16) * 4, src_stride,
dst, dst_stride, clip_width, clip_height);
return;
}