summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmit Pundir <amit.pundir@linaro.org>2023-09-06 01:39:10 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-09-06 01:39:10 +0000
commitf628c0e2ced48e443078fe4e6baf7840eceaea16 (patch)
tree881269af3c22664e6571be093ca36e8de721f344
parentcdbe4f653c003ad167fe214490d79e570a7eb3e2 (diff)
parent9a379409ec9bf476197097bbdd71fa8ebd160daa (diff)
downloadminigbm-f628c0e2ced48e443078fe4e6baf7840eceaea16.tar.gz
Merge remote-tracking branch 'aosp/upstream-main' into 'aosp/main' am: 21fc4f10c8 am: 2f8cb4cd96 am: 47a79c7f79 am: 9a379409ec
Original change: https://android-review.googlesource.com/c/platform/external/minigbm/+/2606528 Change-Id: Ic58e83033eb2a230e06ee1c0646672f4f3441069 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--amdgpu.c29
-rw-r--r--cros_gralloc/cros_gralloc_buffer.cc9
-rw-r--r--cros_gralloc/cros_gralloc_driver.cc22
-rw-r--r--cros_gralloc/cros_gralloc_driver.h1
-rw-r--r--cros_gralloc/cros_gralloc_helpers.cc9
-rw-r--r--cros_gralloc/gralloc0/gralloc0.cc46
-rw-r--r--dri.c109
-rw-r--r--drv.c57
-rw-r--r--drv.h7
-rw-r--r--drv_helpers.c126
-rw-r--r--drv_helpers.h22
-rw-r--r--drv_priv.h2
-rw-r--r--i915.c28
-rw-r--r--mediatek.c76
-rw-r--r--msm.c10
-rw-r--r--rockchip.c6
-rw-r--r--vc4.c5
-rw-r--r--virtgpu_cross_domain.c48
-rw-r--r--virtgpu_virgl.c204
19 files changed, 544 insertions, 272 deletions
diff --git a/amdgpu.c b/amdgpu.c
index a3f5e45..a775cb7 100644
--- a/amdgpu.c
+++ b/amdgpu.c
@@ -518,14 +518,28 @@ static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t heigh
uint64_t use_flags)
{
int ret;
+ bool need_align = false;
uint32_t stride_align = 1;
- uint32_t plane, stride;
+ uint32_t stride;
union drm_amdgpu_gem_create gem_create = { { 0 } };
struct amdgpu_priv *priv = bo->drv->priv;
stride = drv_stride_from_format(format, width, 0);
- if (use_flags & BO_USE_HW_MASK) {
+ /* some clients (e.g., virtio-wl) set BO_USE_LINEAR to mean
+ * BO_USE_SCANOUT or BO_USE_TEXTURE
+ */
+ need_align = use_flags & (BO_USE_HW_MASK | BO_USE_LINEAR);
+
+#if defined(ANDROID) && ANDROID_API_LEVEL < 30
+ /* work around
+ * android.hardware.camera2.cts.ImageWriterTest#testYuvImageWriterReaderOperation
+ * failure before R
+ */
+ need_align = true;
+#endif
+
+ if (need_align) {
/* GFX9+ requires the stride to be aligned to 256 bytes */
stride_align = 256;
stride = ALIGN(stride, stride_align);
@@ -595,8 +609,7 @@ static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t heigh
if (ret < 0)
return ret;
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = gem_create.out.handle;
+ bo->handle.u32 = gem_create.out.handle;
bo->meta.format_modifier = DRM_FORMAT_MOD_LINEAR;
@@ -689,7 +702,7 @@ static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, uint32_t map_flags)
union drm_amdgpu_gem_mmap gem_map = { { 0 } };
struct drm_amdgpu_gem_create_in bo_info = { 0 };
struct drm_amdgpu_gem_op gem_op = { 0 };
- uint32_t handle = bo->handles[0].u32;
+ uint32_t handle = bo->handle.u32;
struct amdgpu_linear_vma_priv *priv = NULL;
struct amdgpu_priv *drv_priv;
@@ -731,7 +744,7 @@ static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, uint32_t map_flags)
priv->map_flags = map_flags;
handle = priv->handle = gem_create.out.handle;
- ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
+ ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handle.u32, priv->handle,
bo_info.bo_size);
if (ret) {
drv_loge("SDMA copy for read failed\n");
@@ -779,7 +792,7 @@ static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
if (BO_MAP_WRITE & priv->map_flags) {
r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
- bo->handles[0].u32, vma->length);
+ bo->handle.u32, vma->length);
if (r)
return r;
}
@@ -800,7 +813,7 @@ static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
if (bo->priv)
return 0;
- wait_idle.in.handle = bo->handles[0].u32;
+ wait_idle.in.handle = bo->handle.u32;
wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
diff --git a/cros_gralloc/cros_gralloc_buffer.cc b/cros_gralloc/cros_gralloc_buffer.cc
index 44652a9..dba8061 100644
--- a/cros_gralloc/cros_gralloc_buffer.cc
+++ b/cros_gralloc/cros_gralloc_buffer.cc
@@ -138,15 +138,6 @@ int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_fla
memset(addr, 0, DRV_MAX_PLANES * sizeof(*addr));
- /*
- * Gralloc consumers don't support more than one kernel buffer per buffer object yet, so
- * just use the first kernel buffer.
- */
- if (drv_num_buffers_per_bo(bo_) != 1) {
- ALOGE("Can only support one buffer per bo.");
- return -EINVAL;
- }
-
if (map_flags) {
if (lock_data_[0]) {
drv_bo_invalidate(bo_, lock_data_[0]);
diff --git a/cros_gralloc/cros_gralloc_driver.cc b/cros_gralloc/cros_gralloc_driver.cc
index a714798..e8adf42 100644
--- a/cros_gralloc/cros_gralloc_driver.cc
+++ b/cros_gralloc/cros_gralloc_driver.cc
@@ -155,9 +155,6 @@ static void drv_destroy_and_close(struct driver *drv)
cros_gralloc_driver::cros_gralloc_driver() : drv_(init_try_nodes(), drv_destroy_and_close)
{
- char buf[PROP_VALUE_MAX];
- property_get("ro.product.device", buf, "unknown");
- mt8183_camera_quirk_ = !strncmp(buf, "kukui", strlen("kukui"));
}
cros_gralloc_driver::~cros_gralloc_driver()
@@ -179,14 +176,6 @@ bool cros_gralloc_driver::get_resolved_format_and_use_flags(
uint64_t resolved_use_flags;
struct combination *combo;
- if (mt8183_camera_quirk_ && (descriptor->use_flags & BO_USE_CAMERA_READ) &&
- !(descriptor->use_flags & BO_USE_SCANOUT) &&
- descriptor->drm_format == DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED) {
- *out_use_flags = descriptor->use_flags;
- *out_format = DRM_FORMAT_MTISP_SXYZW10;
- return true;
- }
-
drv_resolve_format_and_use_flags(drv_.get(), descriptor->drm_format, descriptor->use_flags,
&resolved_format, &resolved_use_flags);
@@ -273,16 +262,6 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
return -errno;
}
- /*
- * If there is a desire for more than one kernel buffer, this can be
- * removed once the ArcCodec and Wayland service have the ability to
- * send more than one fd. GL/Vulkan drivers may also have to modified.
- */
- if (drv_num_buffers_per_bo(bo) != 1) {
- ALOGE("Can only support one buffer per bo.");
- goto destroy_bo;
- }
-
num_planes = drv_bo_get_num_planes(bo);
num_fds = num_planes;
@@ -359,7 +338,6 @@ destroy_hnd:
native_handle_close(hnd);
native_handle_delete(hnd);
-destroy_bo:
drv_bo_destroy(bo);
return ret;
}
diff --git a/cros_gralloc/cros_gralloc_driver.h b/cros_gralloc/cros_gralloc_driver.h
index f35757c..2189c33 100644
--- a/cros_gralloc/cros_gralloc_driver.h
+++ b/cros_gralloc/cros_gralloc_driver.h
@@ -83,7 +83,6 @@ class cros_gralloc_driver
std::mutex mutex_;
std::unordered_map<uint32_t, std::unique_ptr<cros_gralloc_buffer>> buffers_;
std::unordered_map<cros_gralloc_handle_t, cros_gralloc_imported_handle_info> handles_;
- bool mt8183_camera_quirk_ = false;
};
#endif
diff --git a/cros_gralloc/cros_gralloc_helpers.cc b/cros_gralloc/cros_gralloc_helpers.cc
index 8c86c66..67a53a0 100644
--- a/cros_gralloc/cros_gralloc_helpers.cc
+++ b/cros_gralloc/cros_gralloc_helpers.cc
@@ -153,10 +153,19 @@ uint32_t cros_gralloc_convert_map_usage(uint64_t usage)
cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle)
{
+ if (sizeof(native_handle_t) + (sizeof(int) * (handle->numFds + handle->numInts)) !=
+ sizeof(struct cros_gralloc_handle))
+ return nullptr;
+
auto hnd = reinterpret_cast<cros_gralloc_handle_t>(handle);
if (!hnd || hnd->magic != cros_gralloc_magic)
return nullptr;
+ // if hnd->reserved_region_size == 0, handle->numFds is hnd->num_planes
+ // if hnd->reserved_region_size > 0, handle->numFds is hnd->num_planes + 1
+ if ((uint32_t)handle->numFds != hnd->num_planes + (hnd->reserved_region_size > 0))
+ return nullptr;
+
return hnd;
}
diff --git a/cros_gralloc/gralloc0/gralloc0.cc b/cros_gralloc/gralloc0/gralloc0.cc
index 3412e85..d04d87e 100644
--- a/cros_gralloc/gralloc0/gralloc0.cc
+++ b/cros_gralloc/gralloc0/gralloc0.cc
@@ -107,6 +107,11 @@ static int gralloc0_free(alloc_device_t *dev, buffer_handle_t handle)
int32_t ret;
auto mod = (struct gralloc0_module const *)dev->common.module;
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
ret = mod->driver->release(handle);
if (ret)
return ret;
@@ -173,6 +178,11 @@ static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct
static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
{
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
auto mod = const_cast<struct gralloc0_module *>(const_module);
@@ -186,6 +196,11 @@ static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffe
static int gralloc0_unregister_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
{
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
auto mod = (struct gralloc0_module const *)module;
return mod->driver->release(handle);
}
@@ -193,12 +208,23 @@ static int gralloc0_unregister_buffer(struct gralloc_module_t const *module, buf
static int gralloc0_lock(struct gralloc_module_t const *module, buffer_handle_t handle, int usage,
int l, int t, int w, int h, void **vaddr)
{
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
return module->lockAsync(module, handle, usage, l, t, w, h, vaddr, -1);
}
static int gralloc0_unlock(struct gralloc_module_t const *module, buffer_handle_t handle)
{
int32_t fence_fd, ret;
+
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
auto mod = (struct gralloc0_module const *)module;
ret = mod->driver->unlock(handle, &fence_fd);
if (ret)
@@ -244,6 +270,10 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
case GRALLOC_DRM_GET_BUFFER_INFO:
/* retrieve handles for ops with buffer_handle_t */
handle = va_arg(args, buffer_handle_t);
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
va_end(args);
@@ -349,6 +379,11 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
return -ENODEV;
}
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
ALOGE("Invalid handle.");
@@ -375,6 +410,12 @@ static int gralloc0_unlock_async(struct gralloc_module_t const *module, buffer_h
int *fence_fd)
{
auto mod = (struct gralloc0_module const *)module;
+
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
return mod->driver->unlock(handle, fence_fd);
}
@@ -400,6 +441,11 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
return -ENODEV;
}
+ if (!handle) {
+ ALOGE("Invalid buffer handle.");
+ return -EINVAL;
+ }
+
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
ALOGE("Invalid handle.");
diff --git a/dri.c b/dri.c
index 5a30a77..ccce32a 100644
--- a/dri.c
+++ b/dri.c
@@ -68,28 +68,14 @@ static bool lookup_extension(const __DRIextension *const *extensions, const char
}
/*
- * Close Gem Handle
- */
-static void close_gem_handle(uint32_t handle, int fd)
-{
- struct drm_gem_close gem_close = { 0 };
- int ret = 0;
-
- gem_close.handle = handle;
- ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- if (ret)
- drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", handle, ret);
-}
-
-/*
* The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
* to import into minigbm.
*/
static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
{
- uint32_t handle;
- int ret, modifier_upper, modifier_lower, num_planes, i, j;
- off_t dmabuf_sizes[DRV_MAX_PLANES];
+ uint32_t handle = 0;
+ int ret, modifier_upper, modifier_lower, num_planes, prime_fd;
+ off_t dmabuf_size;
__DRIimage *plane_image = NULL;
if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
@@ -105,84 +91,61 @@ static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
return -errno;
bo->meta.num_planes = num_planes;
- for (i = 0; i < num_planes; ++i) {
- int prime_fd, stride, offset;
- plane_image = dri->image_extension->fromPlanar(bo->priv, i, NULL);
- __DRIimage *image = plane_image ? plane_image : bo->priv;
- if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride) ||
- !dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
- ret = -errno;
- goto cleanup;
- }
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
+ return -errno;
- if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &prime_fd)) {
- ret = -errno;
- goto cleanup;
- }
+ dmabuf_size = lseek(prime_fd, 0, SEEK_END);
+ if (dmabuf_size == (off_t)-1) {
+ close(prime_fd);
+ return -errno;
+ }
- dmabuf_sizes[i] = lseek(prime_fd, 0, SEEK_END);
- if (dmabuf_sizes[i] == (off_t)-1) {
- ret = -errno;
- close(prime_fd);
- goto cleanup;
- }
+ lseek(prime_fd, 0, SEEK_SET);
- lseek(prime_fd, 0, SEEK_SET);
+ ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
- ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
+ close(prime_fd);
- close(prime_fd);
+ if (ret) {
+ drv_loge("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ return ret;
+ }
- if (ret) {
- drv_loge("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ bo->handle.u32 = handle;
+ for (int i = 0; i < num_planes; ++i) {
+ int stride, offset;
+ plane_image = dri->image_extension->fromPlanar(bo->priv, i, NULL);
+ __DRIimage *image = plane_image ? plane_image : bo->priv;
+
+ if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride) ||
+ !dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ ret = -errno;
goto cleanup;
}
- bo->handles[i].u32 = handle;
-
bo->meta.strides[i] = stride;
bo->meta.offsets[i] = offset;
if (plane_image)
dri->image_extension->destroyImage(plane_image);
- }
-
- for (i = 0; i < num_planes; ++i) {
- off_t next_plane = dmabuf_sizes[i];
- for (j = 0; j < num_planes; ++j) {
- if (bo->meta.offsets[j] < next_plane &&
- bo->meta.offsets[j] > bo->meta.offsets[i] &&
- bo->handles[j].u32 == bo->handles[i].u32)
- next_plane = bo->meta.offsets[j];
- }
- bo->meta.sizes[i] = next_plane - bo->meta.offsets[i];
-
- /* This is kind of misleading if different planes use
- different dmabufs. */
- bo->meta.total_size += bo->meta.sizes[i];
+ if (i > 0)
+ bo->meta.sizes[i - 1] = bo->meta.offsets[i] - bo->meta.offsets[i - 1];
}
+ bo->meta.sizes[num_planes - 1] = dmabuf_size - bo->meta.offsets[num_planes - 1];
+ bo->meta.total_size = dmabuf_size;
+
return 0;
cleanup:
if (plane_image)
dri->image_extension->destroyImage(plane_image);
- while (--i >= 0) {
- for (j = 0; j <= i; ++j)
- if (bo->handles[j].u32 == bo->handles[i].u32)
- break;
-
- /* Multiple equivalent handles) */
- if (i == j)
- break;
-
- /* This kind of goes horribly wrong when we already imported
- * the same handles earlier, as we should really reference
- * count handles. */
- close_gem_handle(bo->handles[i].u32, bo->drv->fd);
- }
+
+ if (handle != 0)
+ drv_gem_close(bo->drv, handle);
+
return ret;
}
@@ -416,7 +379,7 @@ int dri_bo_release(struct bo *bo)
int dri_bo_destroy(struct bo *bo)
{
assert(bo->priv);
- close_gem_handle(bo->handles[0].u32, bo->drv->fd);
+ drv_gem_close(bo->drv, bo->handle.u32);
bo->priv = NULL;
return 0;
}
diff --git a/drv.c b/drv.c
index cbd7b4b..a2eef82 100644
--- a/drv.c
+++ b/drv.c
@@ -18,6 +18,9 @@
#ifdef __ANDROID__
#include <cutils/log.h>
#include <libgen.h>
+#define MINIGBM_DEBUG "vendor.minigbm.debug"
+#else
+#define MINIGBM_DEBUG "MINIGBM_DEBUG"
#endif
#include "drv_helpers.h"
@@ -113,8 +116,8 @@ struct driver *drv_create(int fd)
if (!drv)
return NULL;
- char *minigbm_debug;
- minigbm_debug = getenv("MINIGBM_DEBUG");
+ const char *minigbm_debug;
+ minigbm_debug = drv_get_os_option(MINIGBM_DEBUG);
drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
drv->fd = fd;
@@ -250,7 +253,7 @@ static void drv_bo_mapping_destroy(struct bo *bo)
while (idx < drv_array_size(drv->mappings)) {
struct mapping *mapping =
(struct mapping *)drv_array_at_idx(drv->mappings, idx);
- if (mapping->vma->handle != bo->handles[plane].u32) {
+ if (mapping->vma->handle != bo->handle.u32) {
idx++;
continue;
}
@@ -285,10 +288,10 @@ static void drv_bo_acquire(struct bo *bo)
for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
uintptr_t num = 0;
- if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num))
- drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
+ if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num))
+ drmHashDelete(drv->buffer_table, bo->handle.u32);
- drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
+ drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num + 1));
}
pthread_mutex_unlock(&drv->buffer_table_lock);
}
@@ -307,19 +310,18 @@ static bool drv_bo_release(struct bo *bo)
pthread_mutex_lock(&drv->buffer_table_lock);
for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
- if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
- drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
+ if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
+ drmHashDelete(drv->buffer_table, bo->handle.u32);
if (num > 1) {
- drmHashInsert(drv->buffer_table, bo->handles[plane].u32,
- (void *)(num - 1));
+ drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num - 1));
}
}
}
/* The same buffer can back multiple planes with different offsets. */
for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
- if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
+ if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
/* num is positive if found in the hashmap. */
pthread_mutex_unlock(&drv->buffer_table_lock);
return false;
@@ -491,8 +493,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
for (i = 0; i < drv_array_size(drv->mappings); i++) {
struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
- if (prior->vma->handle != bo->handles[plane].u32 ||
- prior->vma->map_flags != map_flags)
+ if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
continue;
if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
@@ -506,8 +507,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
for (i = 0; i < drv_array_size(drv->mappings); i++) {
struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
- if (prior->vma->handle != bo->handles[plane].u32 ||
- prior->vma->map_flags != map_flags)
+ if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
continue;
prior->vma->refcount++;
@@ -533,7 +533,7 @@ void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags
mapping.vma->refcount = 1;
mapping.vma->addr = addr;
- mapping.vma->handle = bo->handles[plane].u32;
+ mapping.vma->handle = bo->handle.u32;
mapping.vma->map_flags = map_flags;
success:
@@ -639,7 +639,7 @@ size_t drv_bo_get_num_planes(struct bo *bo)
union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
{
- return bo->handles[plane];
+ return bo->handle;
}
#ifndef DRM_RDWR
@@ -655,11 +655,11 @@ int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
if (bo->is_test_buffer)
return -EINVAL;
- ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
+ ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC | DRM_RDWR, &fd);
// Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
if (ret)
- ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
+ ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC, &fd);
if (ret)
drv_loge("Failed to get plane fd: %s\n", strerror(errno));
@@ -727,25 +727,6 @@ void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint6
out_use_flags);
}
-uint32_t drv_num_buffers_per_bo(struct bo *bo)
-{
- uint32_t count = 0;
- size_t plane, p;
-
- if (bo->is_test_buffer)
- return 0;
-
- for (plane = 0; plane < bo->meta.num_planes; plane++) {
- for (p = 0; p < plane; p++)
- if (bo->handles[p].u32 == bo->handles[plane].u32)
- break;
- if (p == plane)
- count++;
- }
-
- return count;
-}
-
void drv_log_prefix(enum drv_log_level level, const char *prefix, const char *file, int line,
const char *format, ...)
{
diff --git a/drv.h b/drv.h
index b824fc5..106c29c 100644
--- a/drv.h
+++ b/drv.h
@@ -207,9 +207,6 @@ uint64_t drv_resolve_use_flags(struct driver *drv, uint32_t format, uint64_t use
size_t drv_num_planes_from_format(uint32_t format);
size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier);
-
-uint32_t drv_num_buffers_per_bo(struct bo *bo);
-
int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier);
@@ -228,7 +225,11 @@ enum drv_log_level {
} while (0)
#define drv_loge(format, ...) _drv_log(DRV_LOGE, format, ##__VA_ARGS__)
+#ifdef NDEBUG
+#define drv_logv(format, ...)
+#else
#define drv_logv(format, ...) _drv_log(DRV_LOGV, format, ##__VA_ARGS__)
+#endif
#define drv_logd(format, ...) _drv_log(DRV_LOGD, format, ##__VA_ARGS__)
#define drv_logi(format, ...) _drv_log(DRV_LOGI, format, ##__VA_ARGS__)
diff --git a/drv_helpers.c b/drv_helpers.c
index cd51881..c810d8e 100644
--- a/drv_helpers.c
+++ b/drv_helpers.c
@@ -16,6 +16,10 @@
#include <unistd.h>
#include <xf86drm.h>
+#ifdef __ANDROID__
+#include <cutils/properties.h>
+#endif
+
#include "drv_priv.h"
#include "util.h"
@@ -329,7 +333,6 @@ int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32
uint64_t use_flags, uint64_t quirks)
{
int ret;
- size_t plane;
uint32_t aligned_width, aligned_height;
struct drm_mode_create_dumb create_dumb = { 0 };
@@ -387,8 +390,7 @@ int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32
drv_bo_from_format(bo, create_dumb.pitch, 1, height, format);
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = create_dumb.handle;
+ bo->handle.u32 = create_dumb.handle;
bo->meta.total_size = create_dumb.size;
return 0;
@@ -405,44 +407,38 @@ int drv_dumb_bo_destroy(struct bo *bo)
int ret;
struct drm_mode_destroy_dumb destroy_dumb = { 0 };
- destroy_dumb.handle = bo->handles[0].u32;
+ destroy_dumb.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
if (ret) {
- drv_loge("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
+ drv_loge("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handle.u32);
return -errno;
}
return 0;
}
-int drv_gem_bo_destroy(struct bo *bo)
+int drv_gem_close(struct driver *drv, uint32_t gem_handle)
{
struct drm_gem_close gem_close;
int ret, error = 0;
- size_t plane, i;
-
- for (plane = 0; plane < bo->meta.num_planes; plane++) {
- for (i = 0; i < plane; i++)
- if (bo->handles[i].u32 == bo->handles[plane].u32)
- break;
- /* Make sure close hasn't already been called on this handle */
- if (i != plane)
- continue;
- memset(&gem_close, 0, sizeof(gem_close));
- gem_close.handle = bo->handles[plane].u32;
+ memset(&gem_close, 0, sizeof(gem_close));
+ gem_close.handle = gem_handle;
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- if (ret) {
- drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
- bo->handles[plane].u32, ret);
- error = -errno;
- }
+ ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ if (ret) {
+ drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", gem_handle, ret);
+ error = -errno;
}
return error;
}
+int drv_gem_bo_destroy(struct bo *bo)
+{
+ return drv_gem_close(bo->drv, bo->handle.u32);
+}
+
int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
{
int ret;
@@ -455,21 +451,20 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
+ if (plane > 0 && !ret && bo->handle.u32 != prime_handle.handle) {
+ drv_gem_close(bo->drv, prime_handle.handle);
+ ret = -1;
+ errno = EINVAL;
+ }
+
if (ret) {
drv_loge("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
-
- /*
- * Need to call GEM close on planes that were opened,
- * if any. Adjust the num_planes variable to be the
- * plane that failed, so GEM close will be called on
- * planes before that plane.
- */
- bo->meta.num_planes = plane;
- drv_gem_bo_destroy(bo);
+ if (plane > 0)
+ drv_gem_close(bo->drv, bo->handle.u32);
return -errno;
}
- bo->handles[plane].u32 = prime_handle.handle;
+ bo->handle.u32 = prime_handle.handle;
}
bo->meta.tiling = data->tiling;
@@ -483,7 +478,7 @@ void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
struct drm_mode_map_dumb map_dumb;
memset(&map_dumb, 0, sizeof(map_dumb));
- map_dumb.handle = bo->handles[0].u32;
+ map_dumb.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
if (ret) {
@@ -620,3 +615,66 @@ void drv_resolve_format_and_use_flags_helper(struct driver *drv, uint32_t format
break;
}
}
+
+const char *drv_get_os_option(const char *name)
+{
+ const char *ret = getenv(name);
+#ifdef __ANDROID__
+ if (!ret) {
+ static char prop[PROPERTY_VALUE_MAX];
+ return property_get(name, prop, NULL) > 1 ? prop : NULL;
+ }
+#endif
+ return ret;
+}
+
+static void lru_remove_entry(struct lru_entry *entry)
+{
+ entry->prev->next = entry->next;
+ entry->next->prev = entry->prev;
+}
+
+static void lru_link_entry(struct lru *lru, struct lru_entry *entry)
+{
+ struct lru_entry *head = &lru->head;
+ entry->prev = head;
+ entry->next = head->next;
+
+ head->next->prev = entry;
+ head->next = entry;
+}
+
+struct lru_entry *lru_find(struct lru *lru, bool (*eq)(struct lru_entry *e, void *data), void *data)
+{
+ struct lru_entry *head = &lru->head;
+ struct lru_entry *cur = head->next;
+
+ while (cur != head) {
+ if (eq(cur, data)) {
+ lru_remove_entry(cur);
+ lru_link_entry(lru, cur);
+ return cur;
+ }
+ cur = cur->next;
+ }
+
+ return NULL;
+}
+
+void lru_insert(struct lru *lru, struct lru_entry *entry)
+{
+ if (lru->count == lru->max) {
+ lru_remove_entry(lru->head.prev);
+ } else {
+ lru->count++;
+ }
+ lru_link_entry(lru, entry);
+}
+
+void lru_init(struct lru *lru, int max)
+{
+ lru->head.next = &lru->head;
+ lru->head.prev = &lru->head;
+ lru->count = 0;
+ lru->max = max;
+}
diff --git a/drv_helpers.h b/drv_helpers.h
index 0ea9ba9..873bd08 100644
--- a/drv_helpers.h
+++ b/drv_helpers.h
@@ -31,6 +31,7 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t
int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags, uint64_t quirks);
int drv_dumb_bo_destroy(struct bo *bo);
+int drv_gem_close(struct driver *drv, uint32_t gem_handle);
int drv_gem_bo_destroy(struct bo *bo);
int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags);
@@ -50,4 +51,25 @@ void drv_resolve_format_and_use_flags_helper(struct driver *drv, uint32_t format
uint64_t use_flags, uint32_t *out_format,
uint64_t *out_use_flags);
+/*
+ * Get an option. Should return NULL if specified option is not set.
+ */
+const char *drv_get_os_option(const char *name);
+
+struct lru_entry {
+ struct lru_entry *next;
+ struct lru_entry *prev;
+};
+
+struct lru {
+ struct lru_entry head;
+ int count;
+ int max;
+};
+
+struct lru_entry *lru_find(struct lru *lru, bool (*eq)(struct lru_entry *e, void *data),
+ void *data);
+void lru_insert(struct lru *lru, struct lru_entry *entry);
+void lru_init(struct lru *lru, int max);
+
#endif
diff --git a/drv_priv.h b/drv_priv.h
index a2ccdda..d9888d0 100644
--- a/drv_priv.h
+++ b/drv_priv.h
@@ -44,7 +44,7 @@ struct bo {
struct driver *drv;
struct bo_metadata meta;
bool is_test_buffer;
- union bo_handle handles[DRV_MAX_PLANES];
+ union bo_handle handle;
void *priv;
};
diff --git a/i915.c b/i915.c
index 8619580..b18a07e 100644
--- a/i915.c
+++ b/i915.c
@@ -246,6 +246,9 @@ static int i915_add_combinations(struct driver *drv)
BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
hw_protected);
+ /* P010 linear can be used for scanout too. */
+ drv_modify_combination(drv, DRM_FORMAT_P010, &metadata_linear, BO_USE_SCANOUT);
+
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata_linear, BO_USE_SW_MASK);
@@ -290,7 +293,7 @@ static int i915_add_combinations(struct driver *drv)
&metadata_4_tiled, render_not_linear);
drv_add_combinations(drv, scanout_render_formats,
ARRAY_SIZE(scanout_render_formats), &metadata_4_tiled,
- render_not_linear);
+ scanout_and_render_not_linear);
} else {
struct format_metadata metadata_y_tiled = { .tiling = I915_TILING_Y,
.priority = 3,
@@ -406,9 +409,14 @@ static void i915_clflush(void *start, size_t size)
__builtin_ia32_mfence();
while (p < end) {
+#if defined(__CLFLUSHOPT__)
+ __builtin_ia32_clflushopt(p);
+#else
__builtin_ia32_clflush(p);
+#endif
p = (void *)((uintptr_t)p + I915_CACHELINE_SIZE);
}
+ __builtin_ia32_mfence();
}
static int i915_init(struct driver *drv)
@@ -716,7 +724,6 @@ static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t heig
static int i915_bo_create_from_metadata(struct bo *bo)
{
int ret;
- size_t plane;
uint32_t gem_handle;
struct drm_i915_gem_set_tiling gem_set_tiling = { 0 };
struct i915_device *i915 = bo->drv->priv;
@@ -752,21 +759,20 @@ static int i915_bo_create_from_metadata(struct bo *bo)
gem_handle = gem_create.handle;
}
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = gem_handle;
+ bo->handle.u32 = gem_handle;
/* Set/Get tiling ioctl not supported based on fence availability
Refer : "https://patchwork.freedesktop.org/patch/325343/"
*/
if (i915->num_fences_avail) {
- gem_set_tiling.handle = bo->handles[0].u32;
+ gem_set_tiling.handle = bo->handle.u32;
gem_set_tiling.tiling_mode = bo->meta.tiling;
gem_set_tiling.stride = bo->meta.strides[0];
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling);
if (ret) {
struct drm_gem_close gem_close = { 0 };
- gem_close.handle = bo->handles[0].u32;
+ gem_close.handle = bo->handle.u32;
drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
drv_loge("DRM_IOCTL_I915_GEM_SET_TILING failed with %d\n", errno);
@@ -800,7 +806,7 @@ static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
*/
if (i915->num_fences_avail) {
/* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
- gem_get_tiling.handle = bo->handles[0].u32;
+ gem_get_tiling.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
if (ret) {
@@ -827,7 +833,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
if (bo->meta.tiling == I915_TILING_NONE) {
if (i915->has_mmap_offset) {
struct drm_i915_gem_mmap_offset gem_map = { 0 };
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
gem_map.flags = I915_MMAP_OFFSET_WB;
/* Get the fake offset back */
@@ -850,7 +856,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
(BO_USE_RENDERSCRIPT | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)))
gem_map.flags = I915_MMAP_WC;
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
gem_map.offset = 0;
gem_map.size = bo->meta.total_size;
@@ -870,7 +876,7 @@ static void *i915_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
if (addr == MAP_FAILED) {
struct drm_i915_gem_mmap_gtt gem_map = { 0 };
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
if (ret) {
drv_loge("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
@@ -895,7 +901,7 @@ static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
int ret;
struct drm_i915_gem_set_domain set_domain = { 0 };
- set_domain.handle = bo->handles[0].u32;
+ set_domain.handle = bo->handle.u32;
if (bo->meta.tiling == I915_TILING_NONE) {
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
if (mapping->vma->map_flags & BO_MAP_WRITE)
diff --git a/mediatek.c b/mediatek.c
index 113273b..2d60462 100644
--- a/mediatek.c
+++ b/mediatek.c
@@ -29,10 +29,11 @@
#if defined(MTK_MT8183) || \
defined(MTK_MT8186)
// clang-format on
-#define SUPPORTS_YUV422
+#define SUPPORT_YUV422
#endif
-// All platforms except MT8173 should USE_NV12_FOR_HW_VIDEO_DECODING.
+// All platforms except MT8173 should USE_NV12_FOR_HW_VIDEO_DECODING
+// and SUPPORT_FP16_AND_10BIT_ABGR
// clang-format off
#if defined(MTK_MT8183) || \
defined(MTK_MT8186) || \
@@ -41,6 +42,7 @@
defined(MTK_MT8195)
// clang-format on
#define USE_NV12_FOR_HW_VIDEO_DECODING
+#define SUPPORT_FP16_AND_10BIT_ABGR
#else
#define DONT_USE_64_ALIGNMENT_FOR_VIDEO_BUFFERS
#endif
@@ -65,12 +67,14 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMA
// clang-format off
static const uint32_t texture_source_formats[] = {
-#ifdef SUPPORTS_YUV422
+#ifdef SUPPORT_YUV422
DRM_FORMAT_NV21,
DRM_FORMAT_YUYV,
#endif
+#ifdef SUPPORT_FP16_AND_10BIT_ABGR
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR16161616F,
+#endif
DRM_FORMAT_NV12,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU420_ANDROID
@@ -100,17 +104,18 @@ static int mediatek_init(struct driver *drv)
struct format_metadata metadata;
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_SCANOUT | BO_USE_PROTECTED);
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK | BO_USE_PROTECTED);
- drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, BO_USE_SW_MASK | BO_USE_LINEAR);
+ drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_PROTECTED);
/* YUYV format for video overlay and camera subsystem. */
drv_add_combination(drv, DRM_FORMAT_YUYV, &LINEAR_METADATA,
BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_LINEAR |
- BO_USE_TEXTURE);
+ BO_USE_TEXTURE | BO_USE_PROTECTED);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
@@ -119,11 +124,11 @@ static int mediatek_init(struct driver *drv)
metadata.tiling = TILE_TYPE_LINEAR;
metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER);
- drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER | BO_USE_PROTECTED);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER | BO_USE_PROTECTED);
#ifdef USE_NV12_FOR_HW_VIDEO_DECODING
// TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER | BO_USE_PROTECTED);
#endif
/*
@@ -170,6 +175,13 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
*/
const bool is_camera_preview =
(bo->meta.use_flags & BO_USE_SCANOUT) && (bo->meta.use_flags & BO_USE_CAMERA_WRITE);
+ const bool is_hw_video_encoder = bo->meta.use_flags & BO_USE_HW_VIDEO_ENCODER;
+ /*
+ * Android sends blobs for encoding in the shape of a single-row pixel buffer. Use R8 +
+ * single row as a proxy for Android HAL_PIXEL_FORMAT_BLOB until a drm equivalent is
+ * defined.
+ */
+ const bool is_format_blob = format == DRM_FORMAT_R8 && height == 1;
if (!drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) {
errno = EINVAL;
@@ -191,7 +203,7 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
stride = ALIGN(stride, 64);
#endif
- if ((bo->meta.use_flags & BO_USE_HW_VIDEO_ENCODER) || is_camera_preview) {
+ if ((is_hw_video_encoder && !is_format_blob) || is_camera_preview) {
uint32_t aligned_height = ALIGN(height, 32);
uint32_t padding[DRV_MAX_PLANES] = { 0 };
@@ -203,7 +215,23 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
drv_bo_from_format_and_padding(bo, stride, 1, aligned_height, format, padding);
} else {
-#ifdef SUPPORTS_YUV422
+#ifdef USE_EXTRA_PADDING_FOR_YVU420
+ /*
+ * Apply extra padding for YV12 if the height does not meet round up requirement and
+ * the image is to be sampled by gpu.
+ */
+ static const uint32_t required_round_up = 4;
+ const uint32_t height_mod = height % required_round_up;
+ const bool is_texture = bo->meta.use_flags & BO_USE_TEXTURE;
+ /*
+ * YVU420 and YVU420_ANDROID treatments have been aligned in mediatek backend. Check
+ * both since gbm frontend still maps linear YVU420 to YVU420_ANDROID for other hw
+ * backends.
+ */
+ const bool is_format_yv12 =
+ format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID;
+#endif
+#ifdef SUPPORT_YUV422
/*
* JPEG Encoder Accelerator requires 16x16 alignment. We want the buffer
* from camera can be put in JEA directly so align the height to 16
@@ -215,17 +243,13 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
drv_bo_from_format(bo, stride, 1, height, format);
#ifdef USE_EXTRA_PADDING_FOR_YVU420
- /*
- * Apply extra padding for YV12 if the height does not meet round up requirement and
- * the image is to be sampled by gpu.
- */
- static const uint32_t required_round_up = 4;
- const uint32_t height_mod = height % required_round_up;
- if ((format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID) &&
- (bo->meta.use_flags & BO_USE_TEXTURE) && height_mod) {
+ if (is_format_yv12 && is_texture && height_mod) {
const uint32_t height_padding = required_round_up - height_mod;
+ const uint32_t y_padding =
+ drv_size_from_format(format, bo->meta.strides[0], height_padding, 0);
const uint32_t u_padding =
drv_size_from_format(format, bo->meta.strides[2], height_padding, 2);
+ const uint32_t vu_size = drv_bo_get_plane_size(bo, 2) * 2;
bo->meta.total_size += u_padding;
@@ -233,9 +257,6 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
* Since we are not aligning Y, we must make sure that its padding fits
* inside the rest of the space allocated for the V/U planes.
*/
- const uint32_t y_padding =
- drv_size_from_format(format, bo->meta.strides[0], height_padding, 0);
- const uint32_t vu_size = drv_bo_get_plane_size(bo, 2) * 2;
if (y_padding > vu_size) {
/* Align with mali workaround to pad all 3 planes. */
bo->meta.total_size += y_padding + u_padding;
@@ -246,14 +267,17 @@ static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
gem_create.size = bo->meta.total_size;
+ /* For protected data buffer needs to be allocated from GEM */
+ if (bo->meta.use_flags & BO_USE_PROTECTED)
+ gem_create.flags |= DRM_MTK_GEM_CREATE_ENCRYPTED;
+
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
if (ret) {
drv_loge("DRM_IOCTL_MTK_GEM_CREATE failed (size=%" PRIu64 ")\n", gem_create.size);
return -errno;
}
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = gem_create.handle;
+ bo->handle.u32 = gem_create.handle;
return 0;
}
@@ -273,7 +297,7 @@ static void *mediatek_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
struct mediatek_private_map_data *priv;
void *addr = NULL;
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map);
if (ret) {
diff --git a/msm.c b/msm.c
index 0e86d95..d6f74cc 100644
--- a/msm.c
+++ b/msm.c
@@ -296,7 +296,6 @@ static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
{
struct drm_msm_gem_new req = { 0 };
int ret;
- size_t i;
bo->meta.tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0;
msm_calculate_layout(bo);
@@ -310,12 +309,7 @@ static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
return -errno;
}
- /*
- * Though we use only one plane, we need to set handle for
- * all planes to pass kernel checks
- */
- for (i = 0; i < bo->meta.num_planes; i++)
- bo->handles[i].u32 = req.handle;
+ bo->handle.u32 = req.handle;
bo->meta.format_modifier = modifier;
return 0;
@@ -360,7 +354,7 @@ static void *msm_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
if (bo->meta.format_modifier)
return MAP_FAILED;
- req.handle = bo->handles[0].u32;
+ req.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_INFO, &req);
if (ret) {
drv_loge("DRM_IOCLT_MSM_GEM_INFO failed with %s\n", strerror(errno));
diff --git a/rockchip.c b/rockchip.c
index 2dc7146..4b32fc5 100644
--- a/rockchip.c
+++ b/rockchip.c
@@ -118,7 +118,6 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
uint32_t count)
{
int ret;
- size_t plane;
struct drm_rockchip_gem_create gem_create = { 0 };
uint64_t afbc_modifier;
@@ -178,8 +177,7 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint
return -errno;
}
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = gem_create.handle;
+ bo->handle.u32 = gem_create.handle;
return 0;
}
@@ -205,7 +203,7 @@ static void *rockchip_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
bo->meta.format_modifier == DRM_FORMAT_MOD_ROCKCHIP_AFBC)
return MAP_FAILED;
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map);
if (ret) {
drv_loge("DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
diff --git a/vc4.c b/vc4.c
index d53fabf..ab8e43e 100644
--- a/vc4.c
+++ b/vc4.c
@@ -74,8 +74,7 @@ static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t he
return -errno;
}
- for (plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = bo_create.handle;
+ bo->handle.u32 = bo_create.handle;
return 0;
}
@@ -110,7 +109,7 @@ static void *vc4_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
int ret;
struct drm_vc4_mmap_bo bo_map = { 0 };
- bo_map.handle = bo->handles[0].u32;
+ bo_map.handle = bo->handle.u32;
ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map));
if (ret) {
drv_loge("DRM_VC4_MMAP_BO failed\n");
diff --git a/virtgpu_cross_domain.c b/virtgpu_cross_domain.c
index 45b5580..dc3fbc1 100644
--- a/virtgpu_cross_domain.c
+++ b/virtgpu_cross_domain.c
@@ -37,6 +37,7 @@ struct cross_domain_private {
void *ring_addr;
struct drv_array *metadata_cache;
pthread_mutex_t metadata_cache_lock;
+ bool mt8183_camera_quirk_;
};
static void cross_domain_release_private(struct driver *drv)
@@ -344,6 +345,10 @@ static int cross_domain_init(struct driver *drv)
if (ret < 0)
goto free_private;
+ const char *name;
+ name = drv_get_os_option("ro.product.name");
+ priv->mt8183_camera_quirk_ = name && !strcmp(name, "kukui");
+
// minigbm bookkeeping
add_combinations(drv);
return 0;
@@ -404,8 +409,7 @@ static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height
return -errno;
}
- for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+ bo->handle.u32 = drm_rc_blob.bo_handle;
return 0;
}
@@ -415,7 +419,7 @@ static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, uint32_t map_fl
int ret;
struct drm_virtgpu_map gem_map = { 0 };
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
if (ret) {
drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
@@ -427,6 +431,42 @@ static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, uint32_t map_fl
gem_map.offset);
}
+static void cross_domain_resolve_format_and_use_flags(struct driver *drv, uint32_t format,
+ uint64_t use_flags, uint32_t *out_format,
+ uint64_t *out_use_flags)
+{
+ struct cross_domain_private *priv = drv->priv;
+ *out_format = format;
+ *out_use_flags = use_flags;
+
+ switch (format) {
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ if (priv->mt8183_camera_quirk_ && (use_flags & BO_USE_CAMERA_READ) &&
+ !(use_flags & BO_USE_SCANOUT)) {
+ *out_format = DRM_FORMAT_MTISP_SXYZW10;
+ break;
+ }
+ /* Common camera implementation defined format. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) {
+ *out_format = DRM_FORMAT_NV12;
+ } else {
+ /* HACK: See b/28671744 */
+ *out_format = DRM_FORMAT_XBGR8888;
+ *out_use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
+ }
+ break;
+ case DRM_FORMAT_FLEX_YCbCr_420_888:
+ /* Common flexible video format. */
+ *out_format = DRM_FORMAT_NV12;
+ break;
+ case DRM_FORMAT_YVU420_ANDROID:
+ *out_use_flags &= ~BO_USE_SCANOUT;
+ break;
+ default:
+ break;
+ }
+}
+
const struct backend virtgpu_cross_domain = {
.name = "virtgpu_cross_domain",
.init = cross_domain_init,
@@ -436,5 +476,5 @@ const struct backend virtgpu_cross_domain = {
.bo_destroy = drv_gem_bo_destroy,
.bo_map = cross_domain_bo_map,
.bo_unmap = drv_bo_munmap,
- .resolve_format_and_use_flags = drv_resolve_format_and_use_flags_helper,
+ .resolve_format_and_use_flags = cross_domain_resolve_format_and_use_flags,
};
diff --git a/virtgpu_virgl.c b/virtgpu_virgl.c
index 9474c40..b8517fc 100644
--- a/virtgpu_virgl.c
+++ b/virtgpu_virgl.c
@@ -7,6 +7,7 @@
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <pthread.h>
#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
@@ -53,11 +54,23 @@ static const uint32_t texture_source_formats[] = {
extern struct virtgpu_param params[];
+struct virgl_blob_metadata_cache {
+ struct lru_entry entry;
+ struct bo_metadata meta;
+};
+
+#define lru_entry_to_metadata(entry) ((struct virgl_blob_metadata_cache *)(void *)(entry))
+
+#define MAX_CACHED_FORMATS 128
+
struct virgl_priv {
int caps_is_v2;
union virgl_caps caps;
int host_gbm_enabled;
atomic_int next_blob_id;
+
+ pthread_mutex_t host_blob_format_lock;
+ struct lru virgl_blob_metadata_cache;
};
static uint32_t translate_format(uint32_t drm_fourcc)
@@ -499,8 +512,7 @@ static int virgl_3d_bo_create(struct bo *bo, uint32_t width, uint32_t height, ui
return ret;
}
- for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = res_create.bo_handle;
+ bo->handle.u32 = res_create.bo_handle;
return 0;
}
@@ -510,7 +522,7 @@ static void *virgl_3d_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
int ret;
struct drm_virtgpu_map gem_map = { 0 };
- gem_map.handle = bo->handles[0].u32;
+ gem_map.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
if (ret) {
drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
@@ -592,7 +604,12 @@ static int virgl_init(struct driver *drv)
if (!priv)
return -ENOMEM;
+ int ret = pthread_mutex_init(&priv->host_blob_format_lock, NULL);
+ if (ret)
+ return ret;
+
drv->priv = priv;
+ lru_init(&priv->virgl_blob_metadata_cache, MAX_CACHED_FORMATS);
virgl_init_params_and_caps(drv);
@@ -677,41 +694,56 @@ static void virgl_close(struct driver *drv)
drv->priv = NULL;
}
-static int virgl_bo_create_blob(struct driver *drv, struct bo *bo)
+static uint32_t blob_flags_from_use_flags(uint32_t use_flags)
{
- int ret;
- uint32_t stride;
- uint32_t cur_blob_id;
- uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
- struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
- struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
-
uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
- if (bo->meta.use_flags & (BO_USE_SW_MASK | BO_USE_GPU_DATA_BUFFER))
+ if (use_flags & (BO_USE_SW_MASK | BO_USE_GPU_DATA_BUFFER))
blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
// For now, all blob use cases are cross device. When we add wider
// support for blobs, we can revisit making this unconditional.
blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
+ return blob_flags;
+}
+
+static bool virgl_blob_metadata_eq(struct lru_entry *entry, void *data)
+{
+ struct virgl_blob_metadata_cache *e = lru_entry_to_metadata(entry);
+ struct bo_metadata *meta = data;
+ uint32_t virgl_format1 = translate_format(e->meta.format);
+ uint32_t virgl_format2 = translate_format(meta->format);
+
+ return e->meta.height == meta->height && e->meta.width == meta->width &&
+ e->meta.use_flags == meta->use_flags && virgl_format1 == virgl_format2;
+}
+
+static int virgl_blob_do_create(struct driver *drv, uint32_t width, uint32_t height,
+ uint32_t use_flags, uint32_t virgl_format, uint32_t total_size,
+ uint32_t *bo_handle)
+{
+ int ret;
+ uint32_t cur_blob_id;
+ uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
+ uint32_t virgl_bind_flags = compute_virgl_bind_flags(use_flags);
+ uint32_t blob_flags = blob_flags_from_use_flags(use_flags);
+
cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
- stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
- drv_bo_from_format(bo, stride, 1, bo->meta.height, bo->meta.format);
- bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
- bo->meta.tiling = blob_flags;
cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
- cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
- cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
- cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
- cmd[VIRGL_PIPE_RES_CREATE_BIND] = compute_virgl_bind_flags(bo->meta.use_flags);
+ cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
+ cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
+ cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = virgl_format;
+ cmd[VIRGL_PIPE_RES_CREATE_BIND] = virgl_bind_flags;
cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
drm_rc_blob.cmd = (uint64_t)&cmd;
drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
- drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.size = total_size;
drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
drm_rc_blob.blob_flags = blob_flags;
drm_rc_blob.blob_id = cur_blob_id;
@@ -722,8 +754,119 @@ static int virgl_bo_create_blob(struct driver *drv, struct bo *bo)
return -errno;
}
- for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
- bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+ *bo_handle = drm_rc_blob.bo_handle;
+ return 0;
+}
+
+// Queries the host layout for the requested buffer metadata.
+//
+// Of particular interest is total_size. This value is passed to the kernel when creating
+// a buffer via drm_virtgpu_resource_create_blob.size, to specify how much "vram" to
+// allocate for use when exposing the host buffer to the guest. As such, we need to know
+// this value before allocating a buffer to ensure that the full host buffer is actually
+// visible to the guest.
+//
+// Note that we can't reuse these test buffers as actual allocations because our guess for
+// total_size is insufficient if width!=stride or padding!=0.
+static int virgl_blob_get_host_format(struct driver *drv, struct bo_metadata *meta)
+{
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
+ int num_planes = drv_num_planes_from_format(meta->format);
+
+ pthread_mutex_lock(&priv->host_blob_format_lock);
+ if (meta->format == DRM_FORMAT_R8) {
+ meta->offsets[0] = 0;
+ meta->sizes[0] = meta->width;
+ meta->strides[0] = meta->width;
+ meta->total_size = meta->width;
+ } else {
+ uint32_t virgl_format = translate_format(meta->format);
+ struct virgl_blob_metadata_cache *entry;
+
+ entry = lru_entry_to_metadata(
+ lru_find(&priv->virgl_blob_metadata_cache, virgl_blob_metadata_eq, meta));
+
+ if (!entry) {
+ uint32_t total_size = 0;
+ for (int i = 0; i < num_planes; i++) {
+ uint32_t stride =
+ drv_stride_from_format(meta->format, meta->width, i);
+ total_size +=
+ drv_size_from_format(meta->format, stride, meta->height, i);
+ }
+
+ uint32_t handle;
+ int ret =
+ virgl_blob_do_create(drv, meta->width, meta->height, meta->use_flags,
+ virgl_format, total_size, &handle);
+ if (ret) {
+ pthread_mutex_unlock(&priv->host_blob_format_lock);
+ return ret;
+ }
+
+ struct drm_virtgpu_resource_info_cros info = { 0 };
+ info.bo_handle = handle;
+ info.type = VIRTGPU_RESOURCE_INFO_TYPE_EXTENDED;
+ int info_ret =
+ drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO_CROS, &info);
+
+ struct drm_gem_close gem_close = { 0 };
+ gem_close.handle = handle;
+ int close_ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ if (close_ret)
+ drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
+ handle, close_ret);
+
+ if (info_ret) {
+ pthread_mutex_unlock(&priv->host_blob_format_lock);
+ drv_loge("Getting resource info failed with %s\n", strerror(errno));
+ return info_ret;
+ }
+
+ entry = calloc(1, sizeof(*entry));
+ entry->meta = *meta;
+
+ for (int i = 0; i < num_planes; i++) {
+ entry->meta.strides[i] = info.strides[i];
+ entry->meta.sizes[i] =
+ info.strides[i] *
+ drv_height_from_format(meta->format, meta->height, i);
+ entry->meta.offsets[i] = info.offsets[i];
+ }
+ entry->meta.total_size =
+ entry->meta.offsets[num_planes - 1] + entry->meta.sizes[num_planes - 1];
+ entry->meta.format_modifier = info.format_modifier;
+
+ lru_insert(&priv->virgl_blob_metadata_cache, &entry->entry);
+ }
+
+ memcpy(meta->offsets, entry->meta.offsets, sizeof(meta->offsets));
+ memcpy(meta->sizes, entry->meta.sizes, sizeof(meta->sizes));
+ memcpy(meta->strides, entry->meta.strides, sizeof(meta->strides));
+ meta->total_size = entry->meta.total_size;
+ meta->format_modifier = entry->meta.format_modifier;
+ }
+ pthread_mutex_unlock(&priv->host_blob_format_lock);
+
+ meta->total_size = ALIGN(meta->total_size, PAGE_SIZE);
+ meta->tiling = blob_flags_from_use_flags(meta->use_flags);
+
+ return 0;
+}
+
+static int virgl_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+ int ret;
+ uint32_t virgl_format = translate_format(bo->meta.format);
+ uint32_t bo_handle;
+
+ virgl_blob_get_host_format(drv, &bo->meta);
+ ret = virgl_blob_do_create(drv, bo->meta.width, bo->meta.height, bo->meta.use_flags,
+ virgl_format, bo->meta.total_size, &bo_handle);
+ if (ret)
+ return ret;
+
+ bo->handle.u32 = bo_handle;
return 0;
}
@@ -751,10 +894,17 @@ static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_fl
case DRM_FORMAT_R8:
// Formats with strictly defined strides are supported
return true;
+ case DRM_FORMAT_ABGR8888:
+ // Formats used with frequent software reads are supported as long as they
+ // will not be used by non-GPU hardware.
+ return (use_flags & BO_USE_SW_READ_OFTEN) && !(use_flags & BO_USE_NON_GPU_HW);
case DRM_FORMAT_YVU420_ANDROID:
case DRM_FORMAT_NV12:
- // Knowing buffer metadata at buffer creation isn't yet supported, so buffers
- // can't be properly mapped into the guest.
+ // Zero copy buffers are exposed for guest software access via a persistent
+ // mapping, with no flush/invalidate messages. However, the virtio-video
+ // device relies transfers to/from the host waiting on implicit fences in
+ // the host kernel to synchronize with hardware output. As such, we can only
+ // use zero copy if the guest doesn't need software access.
return (use_flags & BO_USE_SW_MASK) == 0;
default:
return false;
@@ -815,7 +965,7 @@ static bool is_arc_screen_capture_bo(struct bo *bo)
(bo->meta.format != DRM_FORMAT_ABGR8888 && bo->meta.format != DRM_FORMAT_ARGB8888 &&
bo->meta.format != DRM_FORMAT_XRGB8888 && bo->meta.format != DRM_FORMAT_XBGR8888))
return false;
- prime_handle.handle = bo->handles[0].u32;
+ prime_handle.handle = bo->handle.u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &prime_handle);
if (ret < 0)
return false;
@@ -1125,7 +1275,7 @@ static int virgl_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
if (!params[param_3d].value)
return 0;
- res_info.bo_handle = bo->handles[0].u32;
+ res_info.bo_handle = bo->handle.u32;
res_info.type = VIRTGPU_RESOURCE_INFO_TYPE_EXTENDED;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO_CROS, &res_info);
if (ret) {