summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Macnak <natsu@google.com>2021-06-14 13:14:49 -0700
committerJason Macnak <natsu@google.com>2021-06-14 13:14:49 -0700
commit43750d7950263221139a60b3bcce6780318d7103 (patch)
tree2c241a7ad325d9ec00b7dad5881fcb0111269335
parent836a28c3bcb88eae49209f3cbe3b5e4ad2649e65 (diff)
parent20e8ce7d5f3cfe3bc51c221e70693b6085914b9d (diff)
downloadminigbm-android12-dev.tar.gz
... to update Minigbm for Cuttlefish to pull in crrev.com/c/2946768 to support PixelFormat::RGBA_1010102. Bug: b/179501410 Test: cts -m CtsMediaV2TestCases Change-Id: Iad719b16b68a4a9bb15c535f2296e9476f9f0472
-rw-r--r--Android.bp29
-rw-r--r--OWNERS11
-rw-r--r--amdgpu.c20
-rw-r--r--cros_gralloc/cros_gralloc_buffer.cc5
-rw-r--r--cros_gralloc/cros_gralloc_buffer.h3
-rw-r--r--cros_gralloc/cros_gralloc_driver.cc15
-rw-r--r--cros_gralloc/cros_gralloc_driver.h2
-rw-r--r--cros_gralloc/gralloc0/gralloc0.cc110
-rw-r--r--cros_gralloc/gralloc0/tests/gralloctest.c20
-rw-r--r--cros_gralloc/gralloc4/Android.bp49
-rw-r--r--cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm_msm.rc24
-rw-r--r--dri.c1
-rw-r--r--drv.c24
-rw-r--r--drv.h5
-rw-r--r--drv_priv.h19
-rw-r--r--external/virtgpu_cross_domain_protocol.h58
-rw-r--r--external/virtgpu_drm.h47
-rw-r--r--gbm.c5
-rw-r--r--gbm.h15
-rw-r--r--gbm_helpers.c2
-rw-r--r--helpers.h4
-rw-r--r--i915.c148
-rw-r--r--mediatek.c4
-rw-r--r--minigbm_helpers.c3
-rw-r--r--msm.c12
-rw-r--r--tegra.c357
-rw-r--r--virtgpu.c71
-rw-r--r--virtgpu.h25
-rw-r--r--virtgpu_cross_domain.c407
-rw-r--r--virtgpu_virgl.c (renamed from virtio_gpu.c)315
30 files changed, 1127 insertions, 683 deletions
diff --git a/Android.bp b/Android.bp
index 437451a..e7dfff8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -46,9 +46,10 @@ cc_defaults {
"mediatek.c",
"msm.c",
"rockchip.c",
- "tegra.c",
"vc4.c",
- "virtio_gpu.c",
+ "virtgpu.c",
+ "virtgpu_cross_domain.c",
+ "virtgpu_virgl.c",
],
cflags: [
@@ -146,8 +147,8 @@ cc_library {
],
}
-cc_library_static {
- name: "libminigbm_cros_gralloc",
+cc_defaults {
+ name: "libminigbm_cros_gralloc_defaults",
defaults: ["minigbm_cros_gralloc_defaults"],
shared_libs: ["liblog"],
static_libs: ["libdrm"],
@@ -155,6 +156,11 @@ cc_library_static {
export_include_dirs: ["."],
}
+cc_library_static {
+ name: "libminigbm_cros_gralloc",
+ defaults: ["libminigbm_cros_gralloc_defaults"],
+}
+
cc_library_shared {
name: "gralloc.minigbm",
defaults: ["minigbm_cros_gralloc_defaults"],
@@ -183,3 +189,18 @@ cc_library_shared {
cflags: ["-DDRV_MESON"],
srcs: ["cros_gralloc/gralloc0/gralloc0.cc"],
}
+
+cc_library_shared {
+ name: "gralloc.minigbm_msm",
+ defaults: ["minigbm_cros_gralloc_defaults"],
+ cflags: ["-DDRV_MSM"],
+ srcs: [
+ "cros_gralloc/gralloc0/gralloc0.cc",
+ ],
+}
+
+cc_library_static {
+ name: "libminigbm_cros_gralloc_msm",
+ defaults: ["libminigbm_cros_gralloc_defaults"],
+ cflags: ["-DDRV_MSM"],
+}
diff --git a/OWNERS b/OWNERS
index d9d5bf3..6a69a37 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,11 +1,10 @@
-dbehr@chromium.org
-dcastagna@chromium.org
-ddavenport@chromium.org
gurchetansingh@chromium.org
hoegsberg@chromium.org
-ihf@chromium.org
-lepton@chromium.org
marcheu@chromium.org
stevensd@chromium.org
tfiga@chromium.org
-tutankhamen@chromium.org
+
+# Also natsu@google.com is great for gralloc/Android stuff but doesn't have a
+# chromium account.
+# So any team members can +2
+*
diff --git a/amdgpu.c b/amdgpu.c
index 359a8c2..f987f6f 100644
--- a/amdgpu.c
+++ b/amdgpu.c
@@ -48,9 +48,10 @@ struct amdgpu_linear_vma_priv {
};
const static uint32_t render_target_formats[] = {
- DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
- DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ABGR16161616F,
};
const static uint32_t texture_source_formats[] = {
@@ -354,10 +355,12 @@ static int amdgpu_init(struct driver *drv)
/* NV12 format for camera, display, decoding and encoding. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
- BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
+ BO_USE_PROTECTED);
drv_modify_combination(drv, DRM_FORMAT_P010, &metadata,
- BO_USE_SCANOUT | BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+ BO_USE_SCANOUT | BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
+ BO_USE_PROTECTED);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
@@ -471,6 +474,10 @@ static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t heigh
if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ /* For protected data Buffer needs to be allocated from TMZ */
+ if (use_flags & BO_USE_PROTECTED)
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
+
/* Allocate the buffer with the preferred heap. */
ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
sizeof(gem_create));
@@ -489,6 +496,7 @@ static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint
uint64_t use_flags)
{
struct combination *combo;
+ struct amdgpu_priv *priv = bo->drv->priv;
combo = drv_get_combination(bo->drv, format, use_flags);
if (!combo)
@@ -508,7 +516,7 @@ static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint
needs_alignment = true;
#endif
// See b/122049612
- if (use_flags & (BO_USE_SCANOUT))
+ if (use_flags & (BO_USE_SCANOUT) && priv->dev_info.family == AMDGPU_FAMILY_CZ)
needs_alignment = true;
if (needs_alignment) {
diff --git a/cros_gralloc/cros_gralloc_buffer.cc b/cros_gralloc/cros_gralloc_buffer.cc
index 783180f..2f4ceb0 100644
--- a/cros_gralloc/cros_gralloc_buffer.cc
+++ b/cros_gralloc/cros_gralloc_buffer.cc
@@ -116,9 +116,10 @@ int32_t cros_gralloc_buffer::unlock()
}
int32_t cros_gralloc_buffer::resource_info(uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES])
+ uint32_t offsets[DRV_MAX_PLANES],
+ uint64_t *format_modifier)
{
- return drv_resource_info(bo_, strides, offsets);
+ return drv_resource_info(bo_, strides, offsets, format_modifier);
}
int32_t cros_gralloc_buffer::invalidate()
diff --git a/cros_gralloc/cros_gralloc_buffer.h b/cros_gralloc/cros_gralloc_buffer.h
index cb6cb4b..9bc0ef0 100644
--- a/cros_gralloc/cros_gralloc_buffer.h
+++ b/cros_gralloc/cros_gralloc_buffer.h
@@ -27,7 +27,8 @@ class cros_gralloc_buffer
int32_t lock(const struct rectangle *rect, uint32_t map_flags,
uint8_t *addr[DRV_MAX_PLANES]);
int32_t unlock();
- int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES]);
+ int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES],
+ uint64_t *format_modifier);
int32_t invalidate();
int32_t flush();
diff --git a/cros_gralloc/cros_gralloc_driver.cc b/cros_gralloc/cros_gralloc_driver.cc
index b383782..f0c0392 100644
--- a/cros_gralloc/cros_gralloc_driver.cc
+++ b/cros_gralloc/cros_gralloc_driver.cc
@@ -170,14 +170,6 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
resolved_format = drv_resolve_format(drv_, descriptor->drm_format, descriptor->use_flags);
use_flags = descriptor->use_flags;
- /*
- * TODO(b/79682290): ARC++ assumes NV12 is always linear and doesn't
- * send modifiers across Wayland protocol, so we or in the
- * BO_USE_LINEAR flag here. We need to fix ARC++ to allocate and work
- * with tiled buffers.
- */
- if (resolved_format == DRM_FORMAT_NV12)
- use_flags |= BO_USE_LINEAR;
/*
* This unmask is a backup in the case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED is resolved
@@ -217,7 +209,7 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto
* native_handle_clone() copies data based on hnd->base.numInts.
*/
num_bytes = ALIGN(num_bytes, sizeof(int));
- num_ints = num_bytes - sizeof(native_handle_t) - num_fds;
+ num_ints = ((num_bytes - sizeof(native_handle_t)) / sizeof(int)) - num_fds;
hnd =
reinterpret_cast<struct cros_gralloc_handle *>(native_handle_create(num_fds, num_ints));
@@ -473,7 +465,8 @@ int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t
}
int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES])
+ uint32_t offsets[DRV_MAX_PLANES],
+ uint64_t *format_modifier)
{
std::lock_guard<std::mutex> lock(mutex_);
@@ -489,7 +482,7 @@ int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t stri
return -EINVAL;
}
- return buffer->resource_info(strides, offsets);
+ return buffer->resource_info(strides, offsets, format_modifier);
}
int32_t cros_gralloc_driver::get_reserved_region(buffer_handle_t handle,
diff --git a/cros_gralloc/cros_gralloc_driver.h b/cros_gralloc/cros_gralloc_driver.h
index 37692ac..ef9e21f 100644
--- a/cros_gralloc/cros_gralloc_driver.h
+++ b/cros_gralloc/cros_gralloc_driver.h
@@ -37,7 +37,7 @@ class cros_gralloc_driver
int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store);
int32_t resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES]);
+ uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier);
int32_t get_reserved_region(buffer_handle_t handle, void **reserved_region_addr,
uint64_t *reserved_region_size);
diff --git a/cros_gralloc/gralloc0/gralloc0.cc b/cros_gralloc/gralloc0/gralloc0.cc
index bbf75a8..5899d5a 100644
--- a/cros_gralloc/gralloc0/gralloc0.cc
+++ b/cros_gralloc/gralloc0/gralloc0.cc
@@ -42,6 +42,22 @@ enum {
GRALLOC_DRM_GET_DIMENSIONS,
GRALLOC_DRM_GET_BACKING_STORE,
GRALLOC_DRM_GET_BUFFER_INFO,
+ GRALLOC_DRM_GET_USAGE,
+};
+
+/* This enumeration corresponds to the GRALLOC_DRM_GET_USAGE query op, which
+ * defines a set of bit flags used by the client to query vendor usage bits.
+ *
+ * Here is the common flow:
+ * 1) EGL/Vulkan calls GRALLOC_DRM_GET_USAGE to append one or multiple vendor
+ * usage bits to the existing usage and sets onto the ANativeWindow.
+ * 2) Some implicit GL draw cmd or the explicit vkCreateSwapchainKHR kicks off
+ * the next dequeueBuffer on the ANativeWindow with the combined usage.
+ * 3) dequeueBuffer then asks gralloc hal for an allocation/re-allocation, and
+ * calls into the below `gralloc0_alloc(...)` api.
+ */
+enum {
+ GRALLOC_DRM_GET_USAGE_FRONT_RENDERING_BIT = 0x00000001,
};
// clang-format on
@@ -50,6 +66,11 @@ enum {
// entirety, so we can detect the video decoder flag passed by IAllocator clients.
#define BUFFER_USAGE_VIDEO_DECODER (1 << 22)
+// Reserve the GRALLOC_USAGE_PRIVATE_0 bit for buffers used for front rendering.
+// minigbm backend later decides to use BO_USE_FRONT_RENDERING or BO_USE_LINEAR
+// upon buffer allocaton.
+#define BUFFER_USAGE_FRONT_RENDERING GRALLOC_USAGE_PRIVATE_0
+
static uint64_t gralloc0_convert_usage(int usage)
{
uint64_t use_flags = BO_USE_NONE;
@@ -97,6 +118,8 @@ static uint64_t gralloc0_convert_usage(int usage)
use_flags |= BO_USE_RENDERSCRIPT;
if (usage & BUFFER_USAGE_VIDEO_DECODER)
use_flags |= BO_USE_HW_VIDEO_DECODER;
+ if (usage & BUFFER_USAGE_FRONT_RENDERING)
+ use_flags |= BO_USE_FRONT_RENDERING;
return use_flags;
}
@@ -151,6 +174,11 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa
drv_log("Retrying format %u allocation without encoder flag", format);
supported = mod->driver->is_supported(&descriptor);
}
+ if (!supported && (usage & BUFFER_USAGE_FRONT_RENDERING)) {
+ descriptor.use_flags &= ~BO_USE_FRONT_RENDERING;
+ descriptor.use_flags |= BO_USE_LINEAR;
+ supported = mod->driver->is_supported(&descriptor);
+ }
if (!supported) {
drv_log("Unsupported combination -- HAL format: %u, HAL usage: %u, "
@@ -236,9 +264,10 @@ static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffe
auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
auto mod = const_cast<struct gralloc0_module *>(const_module);
- if (!mod->initialized)
+ if (!mod->initialized) {
if (gralloc0_init(mod, false))
return -ENODEV;
+ }
return mod->driver->retain(handle);
}
@@ -276,11 +305,24 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
int32_t *out_format, ret;
uint64_t *out_store;
buffer_handle_t handle;
+ cros_gralloc_handle_t hnd;
uint32_t *out_width, *out_height, *out_stride;
uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint64_t format_modifier = 0;
struct cros_gralloc0_buffer_info *info;
- auto mod = (struct gralloc0_module const *)module;
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
+ auto mod = const_cast<struct gralloc0_module *>(const_module);
+ uint32_t req_usage;
+ uint32_t gralloc_usage = 0;
+ uint32_t *out_gralloc_usage;
+
+ if (!mod->initialized) {
+ if (gralloc0_init(mod, false))
+ return -ENODEV;
+ }
+
+ va_start(args, op);
switch (op) {
case GRALLOC_DRM_GET_STRIDE:
@@ -288,25 +330,27 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
case GRALLOC_DRM_GET_DIMENSIONS:
case GRALLOC_DRM_GET_BACKING_STORE:
case GRALLOC_DRM_GET_BUFFER_INFO:
+ /* retrieve handles for ops with buffer_handle_t */
+ handle = va_arg(args, buffer_handle_t);
+ hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ va_end(args);
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+ break;
+ case GRALLOC_DRM_GET_USAGE:
break;
default:
+ va_end(args);
return -EINVAL;
}
- va_start(args, op);
-
ret = 0;
- handle = va_arg(args, buffer_handle_t);
- auto hnd = cros_gralloc_convert_handle(handle);
- if (!hnd) {
- drv_log("Invalid handle.\n");
- return -EINVAL;
- }
-
switch (op) {
case GRALLOC_DRM_GET_STRIDE:
out_stride = va_arg(args, uint32_t *);
- ret = mod->driver->resource_info(handle, strides, offsets);
+ ret = mod->driver->resource_info(handle, strides, offsets, &format_modifier);
if (ret)
break;
@@ -334,15 +378,32 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
break;
case GRALLOC_DRM_GET_BUFFER_INFO:
info = va_arg(args, struct cros_gralloc0_buffer_info *);
+ memset(info, 0, sizeof(*info));
info->drm_fourcc = drv_get_standard_fourcc(hnd->format);
info->num_fds = hnd->num_planes;
- info->modifier = hnd->format_modifier;
+ ret = mod->driver->resource_info(handle, strides, offsets, &format_modifier);
+ if (ret)
+ break;
+
+ info->modifier = format_modifier ? format_modifier : hnd->format_modifier;
for (uint32_t i = 0; i < hnd->num_planes; i++) {
info->fds[i] = hnd->fds[i];
- info->offset[i] = hnd->offsets[i];
- info->stride[i] = hnd->strides[i];
+ if (strides[i]) {
+ info->stride[i] = strides[i];
+ info->offset[i] = offsets[i];
+ } else {
+ info->stride[i] = hnd->strides[i];
+ info->offset[i] = hnd->offsets[i];
+ }
}
break;
+ case GRALLOC_DRM_GET_USAGE:
+ req_usage = va_arg(args, uint32_t);
+ out_gralloc_usage = va_arg(args, uint32_t *);
+ if (req_usage & GRALLOC_DRM_GET_USAGE_FRONT_RENDERING_BIT)
+ gralloc_usage |= BUFFER_USAGE_FRONT_RENDERING;
+ *out_gralloc_usage = gralloc_usage;
+ break;
default:
ret = -EINVAL;
}
@@ -364,12 +425,18 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han
int32_t ret;
uint32_t map_flags;
uint8_t *addr[DRV_MAX_PLANES];
- auto mod = (struct gralloc0_module const *)module;
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
+ auto mod = const_cast<struct gralloc0_module *>(const_module);
struct rectangle rect = { .x = static_cast<uint32_t>(l),
.y = static_cast<uint32_t>(t),
.width = static_cast<uint32_t>(w),
.height = static_cast<uint32_t>(h) };
+ if (!mod->initialized) {
+ if (gralloc0_init(mod, false))
+ return -ENODEV;
+ }
+
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
drv_log("Invalid handle.\n");
@@ -407,13 +474,20 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
uint32_t map_flags;
uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint64_t format_modifier = 0;
uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr };
- auto mod = (struct gralloc0_module const *)module;
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
+ auto mod = const_cast<struct gralloc0_module *>(const_module);
struct rectangle rect = { .x = static_cast<uint32_t>(l),
.y = static_cast<uint32_t>(t),
.width = static_cast<uint32_t>(w),
.height = static_cast<uint32_t>(h) };
+ if (!mod->initialized) {
+ if (gralloc0_init(mod, false))
+ return -ENODEV;
+ }
+
auto hnd = cros_gralloc_convert_handle(handle);
if (!hnd) {
drv_log("Invalid handle.\n");
@@ -437,7 +511,7 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff
return ret;
if (!map_flags) {
- ret = mod->driver->resource_info(handle, strides, offsets);
+ ret = mod->driver->resource_info(handle, strides, offsets, &format_modifier);
if (ret)
return ret;
diff --git a/cros_gralloc/gralloc0/tests/gralloctest.c b/cros_gralloc/gralloc0/tests/gralloctest.c
index c1b0a6e..eea36e4 100644
--- a/cros_gralloc/gralloc0/tests/gralloctest.c
+++ b/cros_gralloc/gralloc0/tests/gralloctest.c
@@ -41,12 +41,20 @@
} \
} while (0)
+#define BUFFER_USAGE_FRONT_RENDERING GRALLOC_USAGE_PRIVATE_0
+
/* Private API enumeration -- see <gralloc_drm.h> */
enum {
GRALLOC_DRM_GET_STRIDE,
GRALLOC_DRM_GET_FORMAT,
GRALLOC_DRM_GET_DIMENSIONS,
GRALLOC_DRM_GET_BACKING_STORE,
+ GRALLOC_DRM_GET_BUFFER_INFO,
+ GRALLOC_DRM_GET_USAGE,
+};
+
+enum {
+ GRALLOC_DRM_GET_USAGE_FRONT_RENDERING_BIT = 0x00000001,
};
struct gralloctest_context {
@@ -467,7 +475,7 @@ static int test_perform(struct gralloctest_context *ctx)
{
int32_t format;
uint64_t id1, id2;
- uint32_t stride, width, height;
+ uint32_t stride, width, height, req_usage, gralloc_usage;
struct grallocinfo info, duplicate;
struct gralloc_module_t *mod = ctx->module;
@@ -498,6 +506,16 @@ static int test_perform(struct gralloctest_context *ctx)
CHECK(unregister_buffer(mod, &duplicate));
CHECK(deallocate(ctx->device, &info));
+ req_usage = 0;
+ gralloc_usage = 0;
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_USAGE, req_usage, &gralloc_usage) == 0);
+ CHECK(gralloc_usage == 0);
+
+ req_usage = GRALLOC_DRM_GET_USAGE_FRONT_RENDERING_BIT;
+ gralloc_usage = 0;
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_USAGE, req_usage, &gralloc_usage) == 0);
+ CHECK(gralloc_usage == BUFFER_USAGE_FRONT_RENDERING);
+
return 1;
}
diff --git a/cros_gralloc/gralloc4/Android.bp b/cros_gralloc/gralloc4/Android.bp
index bbe70e1..4d2b542 100644
--- a/cros_gralloc/gralloc4/Android.bp
+++ b/cros_gralloc/gralloc4/Android.bp
@@ -23,11 +23,10 @@ package {
default_applicable_licenses: ["external_minigbm_license"],
}
-cc_binary {
- name: "android.hardware.graphics.allocator@4.0-service.minigbm",
+cc_defaults {
+ name: "service_minigbm_defaults",
relative_install_path: "hw",
vendor: true,
- init_rc: ["android.hardware.graphics.allocator@4.0-service.minigbm.rc"],
vintf_fragments: ["android.hardware.graphics.allocator@4.0.xml"],
@@ -50,7 +49,6 @@ cc_binary {
static_libs: [
"libdrm",
- "libminigbm_cros_gralloc",
],
srcs: [
@@ -60,8 +58,26 @@ cc_binary {
],
}
-cc_library_shared {
- name: "android.hardware.graphics.mapper@4.0-impl.minigbm",
+cc_binary {
+ name: "android.hardware.graphics.allocator@4.0-service.minigbm",
+ init_rc: ["android.hardware.graphics.allocator@4.0-service.minigbm.rc"],
+ defaults: ["service_minigbm_defaults"],
+ static_libs: [
+ "libminigbm_cros_gralloc",
+ ],
+}
+
+cc_binary {
+ name: "android.hardware.graphics.allocator@4.0-service.minigbm_msm",
+ init_rc: ["android.hardware.graphics.allocator@4.0-service.minigbm_msm.rc"],
+ defaults: ["service_minigbm_defaults"],
+ static_libs: [
+ "libminigbm_cros_gralloc_msm",
+ ],
+}
+
+cc_defaults {
+ name: "impl_minigbm_defaults",
relative_install_path: "hw",
vendor: true,
@@ -85,7 +101,6 @@ cc_library_shared {
static_libs: [
"libdrm",
- "libminigbm_cros_gralloc",
],
srcs: [
@@ -93,3 +108,23 @@ cc_library_shared {
"CrosGralloc4Utils.cc",
],
}
+
+cc_library_shared {
+ name: "android.hardware.graphics.mapper@4.0-impl.minigbm",
+ defaults: ["impl_minigbm_defaults"],
+
+ static_libs: [
+ "libminigbm_cros_gralloc",
+ ],
+
+}
+
+cc_library_shared {
+ name: "android.hardware.graphics.mapper@4.0-impl.minigbm_msm",
+ defaults: ["impl_minigbm_defaults"],
+
+ static_libs: [
+ "libminigbm_cros_gralloc_msm",
+ ],
+
+}
diff --git a/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm_msm.rc b/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm_msm.rc
new file mode 100644
index 0000000..dceb747
--- /dev/null
+++ b/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm_msm.rc
@@ -0,0 +1,24 @@
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+service vendor.graphics.allocator-4-0 /vendor/bin/hw/android.hardware.graphics.allocator@4.0-service.minigbm_msm
+ interface android.hardware.graphics.allocator@4.0::IAllocator default
+ class hal animation
+ user system
+ group graphics drmrpc
+ capabilities SYS_NICE
+ onrestart restart surfaceflinger
+ writepid /dev/cpuset/system-background/tasks
diff --git a/dri.c b/dri.c
index ad4bf1e..13d4833 100644
--- a/dri.c
+++ b/dri.c
@@ -37,6 +37,7 @@ static const struct {
{ DRM_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XBGR2101010 },
{ DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
{ DRM_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ABGR2101010 },
+ { DRM_FORMAT_ABGR16161616F, __DRI_IMAGE_FORMAT_ABGR16161616F },
};
static int drm_format_to_dri_format(uint32_t drm_format)
diff --git a/drv.c b/drv.c
index 5489ee6..9b43e9f 100644
--- a/drv.c
+++ b/drv.c
@@ -43,9 +43,6 @@ extern const struct backend backend_msm;
#ifdef DRV_ROCKCHIP
extern const struct backend backend_rockchip;
#endif
-#ifdef DRV_TEGRA
-extern const struct backend backend_tegra;
-#endif
#ifdef DRV_VC4
extern const struct backend backend_vc4;
#endif
@@ -58,7 +55,7 @@ extern const struct backend backend_nouveau;
extern const struct backend backend_komeda;
extern const struct backend backend_radeon;
extern const struct backend backend_synaptics;
-extern const struct backend backend_virtio_gpu;
+extern const struct backend backend_virtgpu;
extern const struct backend backend_udl;
extern const struct backend backend_vkms;
@@ -94,21 +91,13 @@ static const struct backend *drv_get_backend(int fd)
#ifdef DRV_VC4
&backend_vc4,
#endif
- &backend_evdi, &backend_marvell, &backend_meson, &backend_nouveau,
- &backend_komeda, &backend_radeon, &backend_synaptics, &backend_virtio_gpu,
- &backend_udl, &backend_virtio_gpu, &backend_vkms
+ &backend_evdi, &backend_marvell, &backend_meson, &backend_nouveau,
+ &backend_komeda, &backend_radeon, &backend_synaptics, &backend_virtgpu,
+ &backend_udl, &backend_virtgpu, &backend_vkms
};
for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
const struct backend *b = backend_list[i];
- // Exactly one of the main create functions must be defined.
- assert((b->bo_create != NULL) ^ (b->bo_create_from_metadata != NULL));
- // Either both or neither must be implemented.
- assert((b->bo_compute_metadata != NULL) == (b->bo_create_from_metadata != NULL));
- // Both can't be defined, but it's okay for neither to be (i.e. only bo_create).
- assert((b->bo_create_with_modifiers == NULL) ||
- (b->bo_create_from_metadata == NULL));
-
if (!strcmp(drm_version->name, b->name)) {
drmFreeVersion(drm_version);
return b;
@@ -694,15 +683,16 @@ void drv_log_prefix(const char *prefix, const char *file, int line, const char *
}
int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES])
+ uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
{
for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
strides[plane] = bo->meta.strides[plane];
offsets[plane] = bo->meta.offsets[plane];
}
+ *format_modifier = bo->meta.format_modifier;
if (bo->drv->backend->resource_info)
- return bo->drv->backend->resource_info(bo, strides, offsets);
+ return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
return 0;
}
diff --git a/drv.h b/drv.h
index 50ff6ab..4689558 100644
--- a/drv.h
+++ b/drv.h
@@ -37,7 +37,8 @@ extern "C" {
#define BO_USE_HW_VIDEO_DECODER (1ull << 13)
#define BO_USE_HW_VIDEO_ENCODER (1ull << 14)
#define BO_USE_TEST_ALLOC (1ull << 15)
-#define BO_USE_RENDERSCRIPT (1ull << 16)
+#define BO_USE_FRONT_RENDERING (1ull << 16)
+#define BO_USE_RENDERSCRIPT (1ull << 17)
/* Quirks for allocating a buffer. */
#define BO_QUIRK_NONE 0
@@ -183,7 +184,7 @@ size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_
uint32_t drv_num_buffers_per_bo(struct bo *bo);
int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES]);
+ uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier);
#define drv_log(format, ...) \
do { \
diff --git a/drv_priv.h b/drv_priv.h
index 825ec79..c720077 100644
--- a/drv_priv.h
+++ b/drv_priv.h
@@ -27,6 +27,17 @@ struct bo_metadata {
uint64_t format_modifier;
uint64_t use_flags;
size_t total_size;
+
+ /*
+ * Most of the following metadata is virtgpu cross_domain specific. However, that backend
+ * needs to know traditional metadata (strides, offsets) in addition to this backend
+ * specific metadata. It's easiest just to stuff all the metadata here rather than
+ * having two metadata structs.
+ */
+ uint64_t blob_id;
+ uint32_t map_info;
+ int32_t memory_idx;
+ int32_t physical_device_idx;
};
struct bo {
@@ -82,20 +93,20 @@ struct backend {
uint32_t (*resolve_format)(struct driver *drv, uint32_t format, uint64_t use_flags);
size_t (*num_planes_from_modifier)(struct driver *drv, uint32_t format, uint64_t modifier);
int (*resource_info)(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES]);
+ uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier);
};
// clang-format off
#define BO_USE_RENDER_MASK (BO_USE_LINEAR | BO_USE_RENDERING | BO_USE_RENDERSCRIPT | \
BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
- BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
+ BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE | BO_USE_FRONT_RENDERING)
#define BO_USE_TEXTURE_MASK (BO_USE_LINEAR | BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | \
BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
- BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE)
+ BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE | BO_USE_FRONT_RENDERING)
#define BO_USE_SW_MASK (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY)
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_FRONT_RENDERING)
#define BO_USE_NON_GPU_HW (BO_USE_SCANOUT | BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ | \
BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)
diff --git a/external/virtgpu_cross_domain_protocol.h b/external/virtgpu_cross_domain_protocol.h
new file mode 100644
index 0000000..eaeebd3
--- /dev/null
+++ b/external/virtgpu_cross_domain_protocol.h
@@ -0,0 +1,58 @@
+// Copyright 2021 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef VIRTGPU_CROSS_DOMAIN_PROTOCOL_H
+#define VIRTGPU_CROSS_DOMAIN_PROTOCOL_H
+
+#include <stdint.h>
+
+// Cross-domain commands (only a maximum of 255 supported)
+#define CROSS_DOMAIN_CMD_INIT 1
+#define CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS 2
+
+// Channel types (must match rutabaga channel types)
+#define CROSS_DOMAIN_CHANNEL_TYPE_WAYLAND 0x0001
+#define CROSS_DOMAIN_CHANNEL_TYPE_CAMERA 0x0002
+
+struct CrossDomainCapabilities {
+ uint32_t version;
+ uint32_t supported_channels;
+ uint32_t supports_dmabuf;
+ uint32_t supports_external_gpu_memory;
+};
+
+struct CrossDomainImageRequirements {
+ uint32_t strides[4];
+ uint32_t offsets[4];
+ uint64_t modifier;
+ uint64_t size;
+ uint64_t blob_id;
+ uint32_t map_info;
+ uint32_t pad;
+ int32_t memory_idx;
+ int32_t physical_device_idx;
+};
+
+struct CrossDomainHeader {
+ uint8_t cmd;
+ uint8_t fence_ctx_idx;
+ uint16_t cmd_size;
+ uint32_t pad;
+};
+
+struct CrossDomainInit {
+ struct CrossDomainHeader hdr;
+ uint32_t ring_id;
+ uint32_t channel_type;
+};
+
+struct CrossDomainGetImageRequirements {
+ struct CrossDomainHeader hdr;
+ uint32_t width;
+ uint32_t height;
+ uint32_t drm_format;
+ uint32_t flags;
+};
+
+#endif
diff --git a/external/virtgpu_drm.h b/external/virtgpu_drm.h
index 5d53b37..9b46138 100644
--- a/external/virtgpu_drm.h
+++ b/external/virtgpu_drm.h
@@ -47,12 +47,15 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FENCE_CONTEXT 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_FENCE_CONTEXT |\
0)
struct drm_virtgpu_map {
@@ -68,6 +71,8 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 fence_ctx_idx; /* which fence timeline to use */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -75,6 +80,11 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_CREATE_GUEST_HANDLE 8 /* Host OS handle can be created from guest memory. */
+#define VIRTGPU_PARAM_RESOURCE_SYNC 9 /* Synchronization resources */
+#define VIRTGPU_PARAM_GUEST_VRAM 10 /* All guest allocations happen via virtgpu dedicated heap. */
struct drm_virtgpu_getparam {
__u64 param;
@@ -104,7 +114,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
/* CHROMIUM */
@@ -172,13 +182,15 @@ struct drm_virtgpu_get_caps {
};
struct drm_virtgpu_resource_create_blob {
-#define VIRTGPU_BLOB_MEM_GUEST 0x0001
-#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
-#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
-
-#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
-#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
-#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+#define VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE 0x0008
+#define VIRTGPU_BLOB_FLAG_CREATE_GUEST_CONTIG 0x0010
/* zero is invalid blob_mem */
__u32 blob_mem;
__u32 blob_flags;
@@ -196,6 +208,21 @@ struct drm_virtgpu_resource_create_blob {
__u64 blob_id;
};
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_FENCE_CONTEXTS 0x0002
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -240,6 +267,10 @@ struct drm_virtgpu_resource_create_blob {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/gbm.c b/gbm.c
index 049b035..ecbfb88 100644
--- a/gbm.c
+++ b/gbm.c
@@ -328,6 +328,11 @@ PUBLIC union gbm_bo_handle gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t
return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, (size_t)plane).u64;
}
+PUBLIC int gbm_bo_get_fd_for_plane(struct gbm_bo *bo, int plane)
+{
+ return drv_bo_get_plane_fd(bo->bo, plane);
+}
+
PUBLIC uint32_t gbm_bo_get_offset(struct gbm_bo *bo, size_t plane)
{
return drv_bo_get_plane_offset(bo->bo, (size_t)plane);
diff --git a/gbm.h b/gbm.h
index 3c491cc..9acfaa2 100644
--- a/gbm.h
+++ b/gbm.h
@@ -72,7 +72,7 @@ union gbm_bo_handle {
/** Format of the allocated buffer */
enum gbm_bo_format {
/** RGB with 8 bits per channel in a 32 bit value */
- GBM_BO_FORMAT_XRGB8888,
+ GBM_BO_FORMAT_XRGB8888,
/** ARGB with 8 bits per channel in a 32 bit value */
GBM_BO_FORMAT_ARGB8888
};
@@ -179,6 +179,8 @@ enum gbm_bo_format {
#define GBM_FORMAT_NV16 __gbm_fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define GBM_FORMAT_NV61 __gbm_fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define GBM_FORMAT_P010 __gbm_fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane */
+
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@@ -280,6 +282,14 @@ enum gbm_bo_flags {
* which would otherwise access the underlying buffer will fail.
*/
GBM_TEST_ALLOC = (1 << 15),
+
+ /**
+ * The buffer will be used for front buffer rendering. On some
+ * platforms this may (for example) disable framebuffer compression
+ * to avoid problems with compression flags data being out of sync
+ * with pixel data.
+ */
+ GBM_BO_USE_FRONT_RENDERING = (1 << 16),
};
int
@@ -421,6 +431,9 @@ union gbm_bo_handle
gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane);
int
+gbm_bo_get_fd_for_plane(struct gbm_bo *bo, int plane);
+
+int
gbm_bo_write(struct gbm_bo *bo, const void *buf, size_t count);
void
diff --git a/gbm_helpers.c b/gbm_helpers.c
index 0626a6d..17dcf1f 100644
--- a/gbm_helpers.c
+++ b/gbm_helpers.c
@@ -44,6 +44,8 @@ uint64_t gbm_convert_usage(uint32_t usage)
use_flags |= BO_USE_HW_VIDEO_DECODER;
if (usage & GBM_BO_USE_HW_VIDEO_ENCODER)
use_flags |= BO_USE_HW_VIDEO_ENCODER;
+ if (usage & GBM_BO_USE_FRONT_RENDERING)
+ use_flags |= BO_USE_FRONT_RENDERING;
return use_flags;
}
diff --git a/helpers.h b/helpers.h
index 1919016..d906dcf 100644
--- a/helpers.h
+++ b/helpers.h
@@ -16,6 +16,10 @@ extern "C" {
#include "drv.h"
#include "helpers_array.h"
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 0x1000
+#endif
+
uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane);
uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane);
uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
diff --git a/i915.c b/i915.c
index b00913c..9de38b2 100644
--- a/i915.c
+++ b/i915.c
@@ -50,30 +50,44 @@ struct i915_device {
int32_t has_llc;
int32_t has_hw_protection;
struct modifier_support_t modifier;
+ int device_id;
+ bool is_adlp;
};
-static uint32_t i915_get_gen(int device_id)
+static void i915_info_from_device_id(struct i915_device *i915)
{
const uint16_t gen3_ids[] = { 0x2582, 0x2592, 0x2772, 0x27A2, 0x27AE,
0x29C2, 0x29B2, 0x29D2, 0xA001, 0xA011 };
const uint16_t gen11_ids[] = { 0x4E71, 0x4E61, 0x4E51, 0x4E55, 0x4E57 };
const uint16_t gen12_ids[] = { 0x9A40, 0x9A49, 0x9A59, 0x9A60, 0x9A68, 0x9A70,
0x9A78, 0x9AC0, 0x9AC9, 0x9AD9, 0x9AF8 };
+ const uint16_t adlp_ids[] = { 0x46A0, 0x46A1, 0x46A2, 0x46A3, 0x46A6,
+ 0x46A8, 0x46AA, 0x462A, 0x4626, 0x4628,
+ 0x46B0, 0x46B1, 0x46B2, 0x46B3, 0x46C0,
+ 0x46C1, 0x46C2, 0x46C3 };
unsigned i;
+ i915->gen = 4;
+ i915->is_adlp = false;
+
for (i = 0; i < ARRAY_SIZE(gen3_ids); i++)
- if (gen3_ids[i] == device_id)
- return 3;
+ if (gen3_ids[i] == i915->device_id)
+ i915->gen = 3;
+
/* Gen 11 */
for (i = 0; i < ARRAY_SIZE(gen11_ids); i++)
- if (gen11_ids[i] == device_id)
- return 11;
+ if (gen11_ids[i] == i915->device_id)
+ i915->gen = 11;
/* Gen 12 */
for (i = 0; i < ARRAY_SIZE(gen12_ids); i++)
- if (gen12_ids[i] == device_id)
- return 12;
+ if (gen12_ids[i] == i915->device_id)
+ i915->gen = 12;
- return 4;
+ for (i = 0; i < ARRAY_SIZE(adlp_ids); i++)
+ if (adlp_ids[i] == i915->device_id) {
+ i915->is_adlp = true;
+ i915->gen = 12;
+ }
}
static void i915_get_modifier_order(struct i915_device *i915)
@@ -95,85 +109,88 @@ static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
static int i915_add_combinations(struct driver *drv)
{
- struct format_metadata metadata;
- uint64_t render, scanout_and_render, texture_only, hw_protected;
struct i915_device *i915 = drv->priv;
- scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
- render = BO_USE_RENDER_MASK;
- texture_only = BO_USE_TEXTURE_MASK;
+ const uint64_t scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
+ const uint64_t render = BO_USE_RENDER_MASK;
+ const uint64_t texture_only = BO_USE_TEXTURE_MASK;
// HW protected buffers also need to be scanned out.
- hw_protected = i915->has_hw_protection ? (BO_USE_PROTECTED | BO_USE_SCANOUT) : 0;
+ const uint64_t hw_protected =
+ i915->has_hw_protection ? (BO_USE_PROTECTED | BO_USE_SCANOUT) : 0;
- uint64_t linear_mask =
- BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN;
+ const uint64_t linear_mask = BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN |
+ BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY |
+ BO_USE_SW_WRITE_RARELY;
- metadata.tiling = I915_TILING_NONE;
- metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_LINEAR;
+ struct format_metadata metadata_linear = { .tiling = I915_TILING_NONE,
+ .priority = 1,
+ .modifier = DRM_FORMAT_MOD_LINEAR };
drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
- &metadata, scanout_and_render);
+ &metadata_linear, scanout_and_render);
- drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata_linear,
+ render);
- drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
- texture_only);
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats),
+ &metadata_linear, texture_only);
drv_modify_linear_combinations(drv);
/* NV12 format for camera, display, decoding and encoding. */
/* IPU3 camera ISP supports only NV12 output. */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata_linear,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
hw_protected);
/* Android CTS tests require this. */
- drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
+ drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata_linear, BO_USE_SW_MASK);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
* from camera and input/output from hardware decoder/encoder.
*/
- drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata_linear,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
- render = unset_flags(render, linear_mask);
- scanout_and_render = unset_flags(scanout_and_render, linear_mask);
+ const uint64_t render_not_linear = unset_flags(render, linear_mask);
+ const uint64_t scanout_and_render_not_linear = render_not_linear | BO_USE_SCANOUT;
- metadata.tiling = I915_TILING_X;
- metadata.priority = 2;
- metadata.modifier = I915_FORMAT_MOD_X_TILED;
+ struct format_metadata metadata_x_tiled = { .tiling = I915_TILING_X,
+ .priority = 2,
+ .modifier = I915_FORMAT_MOD_X_TILED };
- drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata_x_tiled,
+ render_not_linear);
drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
- &metadata, scanout_and_render);
+ &metadata_x_tiled, scanout_and_render_not_linear);
- metadata.tiling = I915_TILING_Y;
- metadata.priority = 3;
- metadata.modifier = I915_FORMAT_MOD_Y_TILED;
+ struct format_metadata metadata_y_tiled = { .tiling = I915_TILING_Y,
+ .priority = 3,
+ .modifier = I915_FORMAT_MOD_Y_TILED };
- scanout_and_render =
- unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
/* Support y-tiled NV12 and P010 for libva */
#ifdef I915_SCANOUT_Y_TILED
- uint64_t nv12_usage =
+ const uint64_t nv12_usage =
BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | hw_protected;
- uint64_t p010_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | hw_protected;
+ const uint64_t p010_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | hw_protected |
+ (i915->gen >= 11 ? BO_USE_SCANOUT : 0);
#else
- uint64_t nv12_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER;
- uint64_t p010_usage = nv12_usage;
+ const uint64_t nv12_usage = BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER;
+ const uint64_t p010_usage = nv12_usage;
#endif
- drv_add_combination(drv, DRM_FORMAT_NV12, &metadata, nv12_usage);
- drv_add_combination(drv, DRM_FORMAT_P010, &metadata, p010_usage);
+ drv_add_combination(drv, DRM_FORMAT_NV12, &metadata_y_tiled, nv12_usage);
+ drv_add_combination(drv, DRM_FORMAT_P010, &metadata_y_tiled, p010_usage);
- scanout_and_render = unset_flags(scanout_and_render, BO_USE_SCANOUT);
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata_y_tiled,
+ render_not_linear);
- drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render);
+ // Y-tiled scanout isn't available on old platforms so we add
+ // |scanout_render_formats| without that USE flag.
drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
- &metadata, scanout_and_render);
+ &metadata_y_tiled, render_not_linear);
return 0;
}
@@ -232,6 +249,10 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid
*stride = horizontal_alignment;
}
+ /* stride must be power-of-two aligned for ADL-P tiled buffers*/
+ if (i915->is_adlp && (*stride > 1) && (tiling != I915_TILING_NONE))
+ *stride = 1 << (32 - __builtin_clz(*stride -1));
+
if (i915->gen <= 3 && *stride > 8192)
return -EINVAL;
@@ -253,7 +274,6 @@ static void i915_clflush(void *start, size_t size)
static int i915_init(struct driver *drv)
{
int ret;
- int device_id;
struct i915_device *i915;
drm_i915_getparam_t get_param = { 0 };
@@ -262,15 +282,16 @@ static int i915_init(struct driver *drv)
return -ENOMEM;
get_param.param = I915_PARAM_CHIPSET_ID;
- get_param.value = &device_id;
+ get_param.value = &(i915->device_id);
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
drv_log("Failed to get I915_PARAM_CHIPSET_ID\n");
free(i915);
return -EINVAL;
}
+ /* must call before i915->gen is used anywhere else */
+ i915_info_from_device_id(i915);
- i915->gen = i915_get_gen(device_id);
i915_get_modifier_order(i915);
memset(&get_param, 0, sizeof(get_param));
@@ -290,14 +311,32 @@ static int i915_init(struct driver *drv)
return i915_add_combinations(drv);
}
+/*
+ * Returns true if the height of a buffer of the given format should be aligned
+ * to the largest coded unit (LCU) assuming that it will be used for video. This
+ * is based on gmmlib's GmmIsYUVFormatLCUAligned().
+ */
+static bool i915_format_needs_LCU_alignment(uint32_t format, size_t plane, const struct i915_device* i915)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P016:
+ return (i915->gen == 11 || i915->gen == 12) && plane == 1;
+ }
+ return false;
+}
+
static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
{
uint32_t offset;
size_t plane;
int ret, pagesize;
+ struct i915_device *i915 = bo->drv->priv;
offset = 0;
pagesize = getpagesize();
+
for (plane = 0; plane < drv_num_planes_from_format(format); plane++) {
uint32_t stride = drv_stride_from_format(format, width, plane);
uint32_t plane_height = drv_height_from_format(format, height, plane);
@@ -309,6 +348,15 @@ static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, u
if (ret)
return ret;
+ if (i915_format_needs_LCU_alignment(format, plane, i915)) {
+ /*
+ * Align the height of the V plane for certain formats to the
+ * largest coded unit (assuming that this BO may be used for video)
+ * to be consistent with gmmlib.
+ */
+ plane_height = ALIGN(plane_height, 64);
+ }
+
bo->meta.strides[plane] = stride;
bo->meta.sizes[plane] = stride * plane_height;
bo->meta.offsets[plane] = offset;
diff --git a/mediatek.c b/mediatek.c
index a0b77e6..de492e2 100644
--- a/mediatek.c
+++ b/mediatek.c
@@ -65,7 +65,7 @@ static int mediatek_init(struct driver *drv)
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER);
-#if defined(MTK_MT8183) || defined(MTK_MT8192)
+#if defined(MTK_MT8183) || defined(MTK_MT8192) || defined(MTK_MT8195)
// TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER);
#endif
@@ -275,7 +275,7 @@ static uint32_t mediatek_resolve_format(struct driver *drv, uint32_t format, uin
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
-#ifdef MTK_MT8183
+#if defined(MTK_MT8183) || defined(MTK_MT8192) || defined(MTK_MT8195)
// TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
if (use_flags & (BO_USE_HW_VIDEO_DECODER)) {
return DRM_FORMAT_NV12;
diff --git a/minigbm_helpers.c b/minigbm_helpers.c
index 781d675..137e5a1 100644
--- a/minigbm_helpers.c
+++ b/minigbm_helpers.c
@@ -169,9 +169,6 @@ static int detect_device_info(unsigned int detect_flags, int fd, struct gbm_devi
} else if (strncmp("nouveau", version->name, version->name_len) == 0) {
info->dev_type_flags |=
GBM_DEV_TYPE_FLAG_DISPLAY | GBM_DEV_TYPE_FLAG_3D | GBM_DEV_TYPE_FLAG_DISCRETE;
- } else if (strncmp("tegra", version->name, version->name_len) == 0) {
- info->dev_type_flags |=
- GBM_DEV_TYPE_FLAG_DISPLAY | GBM_DEV_TYPE_FLAG_3D | GBM_DEV_TYPE_FLAG_ARMSOC;
} else if (strncmp("msm", version->name, version->name_len) == 0) {
info->dev_type_flags |=
GBM_DEV_TYPE_FLAG_DISPLAY | GBM_DEV_TYPE_FLAG_3D | GBM_DEV_TYPE_FLAG_ARMSOC;
diff --git a/msm.c b/msm.c
index cdc915d..16e781f 100644
--- a/msm.c
+++ b/msm.c
@@ -223,7 +223,14 @@ static int msm_init(struct driver *drv)
struct format_metadata metadata;
uint64_t render_use_flags = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER;
- uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_MASK | BO_USE_LINEAR);
+ /*
+ * NOTE: we actually could use tiled in the BO_USE_FRONT_RENDERING case,
+ * if we had a modifier for tiled-but-not-compressed. But we *cannot* use
+ * compressed in this case because the UBWC flags/meta data can be out of
+ * sync with pixel data while the GPU is writing a frame out to memory.
+ */
+ uint64_t sw_flags =
+ (BO_USE_RENDERSCRIPT | BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_FRONT_RENDERING);
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
&LINEAR_METADATA, render_use_flags);
@@ -267,8 +274,7 @@ static int msm_init(struct driver *drv)
&metadata, texture_use_flags);
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
- BO_USE_HW_VIDEO_ENCODER);
+ BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
return 0;
}
diff --git a/tegra.c b/tegra.c
deleted file mode 100644
index 9dd6b70..0000000
--- a/tegra.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifdef DRV_TEGRA
-
-#include <assert.h>
-#include <errno.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <tegra_drm.h>
-#include <xf86drm.h>
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-/*
- * GOB (Group Of Bytes) is the basic unit of the blocklinear layout.
- * GOBs are arranged to blocks, where the height of the block (measured
- * in GOBs) is configurable.
- */
-#define NV_BLOCKLINEAR_GOB_HEIGHT 8
-#define NV_BLOCKLINEAR_GOB_WIDTH 64
-#define NV_DEFAULT_BLOCK_HEIGHT_LOG2 4
-#define NV_PREFERRED_PAGE_SIZE (128 * 1024)
-
-// clang-format off
-enum nv_mem_kind
-{
- NV_MEM_KIND_PITCH = 0,
- NV_MEM_KIND_C32_2CRA = 0xdb,
- NV_MEM_KIND_GENERIC_16Bx2 = 0xfe,
-};
-
-enum tegra_map_type {
- TEGRA_READ_TILED_BUFFER = 0,
- TEGRA_WRITE_TILED_BUFFER = 1,
-};
-// clang-format on
-
-struct tegra_private_map_data {
- void *tiled;
- void *untiled;
-};
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
-
-static int compute_block_height_log2(int height)
-{
- int block_height_log2 = NV_DEFAULT_BLOCK_HEIGHT_LOG2;
-
- if (block_height_log2 > 0) {
- /* Shrink, if a smaller block height could cover the whole
- * surface height. */
- int proposed = NV_BLOCKLINEAR_GOB_HEIGHT << (block_height_log2 - 1);
- while (proposed >= height) {
- block_height_log2--;
- if (block_height_log2 == 0)
- break;
- proposed /= 2;
- }
- }
- return block_height_log2;
-}
-
-static void compute_layout_blocklinear(int width, int height, int format, enum nv_mem_kind *kind,
- uint32_t *block_height_log2, uint32_t *stride,
- uint32_t *size)
-{
- int pitch = drv_stride_from_format(format, width, 0);
-
- /* Align to blocklinear blocks. */
- pitch = ALIGN(pitch, NV_BLOCKLINEAR_GOB_WIDTH);
-
- /* Compute padded height. */
- *block_height_log2 = compute_block_height_log2(height);
- int block_height = 1 << *block_height_log2;
- int padded_height = ALIGN(height, NV_BLOCKLINEAR_GOB_HEIGHT * block_height);
-
- int bytes = pitch * padded_height;
-
- /* Pad the allocation to the preferred page size.
- * This will reduce the required page table size (see discussion in NV
- * bug 1321091), and also acts as a WAR for NV bug 1325421.
- */
- bytes = ALIGN(bytes, NV_PREFERRED_PAGE_SIZE);
-
- *kind = NV_MEM_KIND_C32_2CRA;
- *stride = pitch;
- *size = bytes;
-}
-
-static void compute_layout_linear(int width, int height, int format, uint32_t *stride,
- uint32_t *size)
-{
- *stride = ALIGN(drv_stride_from_format(format, width, 0), 64);
- *size = *stride * height;
-}
-
-static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum tegra_map_type type,
- uint32_t bytes_per_pixel, uint32_t gob_top, uint32_t gob_left,
- uint32_t gob_size_pixels, uint8_t *tiled_last)
-{
- uint8_t *tmp;
- uint32_t x, y, k;
- for (k = 0; k < gob_size_pixels; k++) {
- /*
- * Given the kth pixel starting from the tile specified by
- * gob_top and gob_left, unswizzle to get the standard (x, y)
- * representation.
- */
- x = gob_left + (((k >> 3) & 8) | ((k >> 1) & 4) | (k & 3));
- y = gob_top + ((k >> 7 << 3) | ((k >> 3) & 6) | ((k >> 2) & 1));
-
- if (tiled >= tiled_last)
- return;
-
- if (x >= bo->meta.width || y >= bo->meta.height) {
- tiled += bytes_per_pixel;
- continue;
- }
-
- tmp = untiled + y * bo->meta.strides[0] + x * bytes_per_pixel;
-
- if (type == TEGRA_READ_TILED_BUFFER)
- memcpy(tmp, tiled, bytes_per_pixel);
- else if (type == TEGRA_WRITE_TILED_BUFFER)
- memcpy(tiled, tmp, bytes_per_pixel);
-
- /* Move on to next pixel. */
- tiled += bytes_per_pixel;
- }
-}
-
-static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untiled,
- enum tegra_map_type type)
-{
- uint32_t gob_width, gob_height, gob_size_bytes, gob_size_pixels, gob_count_x, gob_count_y,
- gob_top, gob_left;
- uint32_t i, j, offset;
- uint8_t *tmp, *tiled_last;
- uint32_t bytes_per_pixel = drv_stride_from_format(bo->meta.format, 1, 0);
-
- /*
- * The blocklinear format consists of 8*(2^n) x 64 byte sized tiles,
- * where 0 <= n <= 4.
- */
- gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel);
- gob_height = NV_BLOCKLINEAR_GOB_HEIGHT * (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2);
- /* Calculate the height from maximum possible gob height */
- while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->meta.height)
- gob_height /= 2;
-
- gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH;
- gob_size_pixels = gob_height * gob_width;
-
- gob_count_x = DIV_ROUND_UP(bo->meta.strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
- gob_count_y = DIV_ROUND_UP(bo->meta.height, gob_height);
-
- tiled_last = tiled + bo->meta.total_size;
-
- offset = 0;
- for (j = 0; j < gob_count_y; j++) {
- gob_top = j * gob_height;
- for (i = 0; i < gob_count_x; i++) {
- tmp = tiled + offset;
- gob_left = i * gob_width;
-
- transfer_tile(bo, tmp, untiled, type, bytes_per_pixel, gob_top, gob_left,
- gob_size_pixels, tiled_last);
-
- offset += gob_size_bytes;
- }
- }
-}
-
-static int tegra_init(struct driver *drv)
-{
- struct format_metadata metadata;
- uint64_t use_flags = BO_USE_RENDER_MASK;
-
- metadata.tiling = NV_MEM_KIND_PITCH;
- metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_LINEAR;
-
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
-
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
-
- use_flags &= ~BO_USE_SW_WRITE_OFTEN;
- use_flags &= ~BO_USE_SW_READ_OFTEN;
- use_flags &= ~BO_USE_LINEAR;
-
- metadata.tiling = NV_MEM_KIND_C32_2CRA;
- metadata.priority = 2;
-
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
-
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
- return 0;
-}
-
-static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
-{
- uint32_t size, stride, block_height_log2 = 0;
- enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
- struct drm_tegra_gem_create gem_create = { 0 };
- int ret;
-
- if (use_flags &
- (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
- compute_layout_linear(width, height, format, &stride, &size);
- else
- compute_layout_blocklinear(width, height, format, &kind, &block_height_log2,
- &stride, &size);
-
- gem_create.size = size;
- gem_create.flags = 0;
-
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
- if (ret) {
- drv_log("DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
- return -errno;
- }
-
- bo->handles[0].u32 = gem_create.handle;
- bo->meta.offsets[0] = 0;
- bo->meta.total_size = bo->meta.sizes[0] = size;
- bo->meta.strides[0] = stride;
-
- if (kind != NV_MEM_KIND_PITCH) {
- struct drm_tegra_gem_set_tiling gem_tile = { 0 };
-
- gem_tile.handle = bo->handles[0].u32;
- gem_tile.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
- gem_tile.value = block_height_log2;
-
- ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_SET_TILING, &gem_tile,
- sizeof(gem_tile));
- if (ret < 0) {
- drv_gem_bo_destroy(bo);
- return ret;
- }
-
- /* Encode blocklinear parameters for EGLImage creation. */
- bo->meta.tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
- bo->meta.format_modifier = fourcc_mod_code(NV, bo->meta.tiling);
- }
-
- return 0;
-}
-
-static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
-{
- int ret;
- struct drm_tegra_gem_get_tiling gem_get_tiling = { 0 };
-
- ret = drv_prime_bo_import(bo, data);
- if (ret)
- return ret;
-
- /* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
- gem_get_tiling.handle = bo->handles[0].u32;
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_GET_TILING, &gem_get_tiling);
- if (ret) {
- drv_gem_bo_destroy(bo);
- return -errno;
- }
-
- /* NOTE(djmk): we only know about one tiled format, so if our drmIoctl call tells us we are
- tiled, assume it is this format (NV_MEM_KIND_C32_2CRA) otherwise linear (KIND_PITCH). */
- if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_PITCH) {
- bo->meta.tiling = NV_MEM_KIND_PITCH;
- } else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) {
- bo->meta.tiling = NV_MEM_KIND_C32_2CRA;
- } else {
- drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode);
- drv_gem_bo_destroy(bo);
- assert(0);
- }
-
- bo->meta.format_modifier = fourcc_mod_code(NV, bo->meta.tiling);
- return 0;
-}
-
-static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
-{
- int ret;
- struct drm_tegra_gem_mmap gem_map = { 0 };
- struct tegra_private_map_data *priv;
-
- gem_map.handle = bo->handles[0].u32;
- ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
- if (ret < 0) {
- drv_log("DRM_TEGRA_GEM_MMAP failed\n");
- return MAP_FAILED;
- }
-
- void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
- gem_map.offset);
- vma->length = bo->meta.total_size;
- if ((bo->meta.tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
- priv = calloc(1, sizeof(*priv));
- priv->untiled = calloc(1, bo->meta.total_size);
- priv->tiled = addr;
- vma->priv = priv;
- transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
- addr = priv->untiled;
- }
-
- return addr;
-}
-
-static int tegra_bo_unmap(struct bo *bo, struct vma *vma)
-{
- if (vma->priv) {
- struct tegra_private_map_data *priv = vma->priv;
- vma->addr = priv->tiled;
- free(priv->untiled);
- free(priv);
- vma->priv = NULL;
- }
-
- return munmap(vma->addr, vma->length);
-}
-
-static int tegra_bo_flush(struct bo *bo, struct mapping *mapping)
-{
- struct tegra_private_map_data *priv = mapping->vma->priv;
-
- if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
- transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
-
- return 0;
-}
-
-const struct backend backend_tegra = {
- .name = "tegra",
- .init = tegra_init,
- .bo_create = tegra_bo_create,
- .bo_destroy = drv_gem_bo_destroy,
- .bo_import = tegra_bo_import,
- .bo_map = tegra_bo_map,
- .bo_unmap = tegra_bo_unmap,
- .bo_flush = tegra_bo_flush,
-};
-
-#endif
diff --git a/virtgpu.c b/virtgpu.c
new file mode 100644
index 0000000..23e90b3
--- /dev/null
+++ b/virtgpu.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2021 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "external/virtgpu_drm.h"
+#include "helpers.h"
+#include "util.h"
+#include "virtgpu.h"
+
+#define PARAM(x) \
+ (struct virtgpu_param) \
+ { \
+ x, #x, 0 \
+ }
+
+struct virtgpu_param params[] = {
+ PARAM(VIRTGPU_PARAM_3D_FEATURES), PARAM(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+ PARAM(VIRTGPU_PARAM_RESOURCE_BLOB), PARAM(VIRTGPU_PARAM_HOST_VISIBLE),
+ PARAM(VIRTGPU_PARAM_CROSS_DEVICE), PARAM(VIRTGPU_PARAM_CONTEXT_INIT),
+ PARAM(VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs), PARAM(VIRTGPU_PARAM_CREATE_GUEST_HANDLE),
+ PARAM(VIRTGPU_PARAM_RESOURCE_SYNC), PARAM(VIRTGPU_PARAM_GUEST_VRAM),
+};
+
+extern const struct backend virtgpu_virgl;
+extern const struct backend virtgpu_cross_domain;
+
+static int virtgpu_init(struct driver *drv)
+{
+ int ret = 0;
+ const struct backend *virtgpu_backends[2] = {
+ &virtgpu_cross_domain,
+ &virtgpu_virgl,
+ };
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
+ struct drm_virtgpu_getparam get_param = { 0 };
+
+ get_param.param = params[i].param;
+ get_param.value = (uint64_t)(uintptr_t)&params[i].value;
+ int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &get_param);
+ if (ret)
+ drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
+ }
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(virtgpu_backends); i++) {
+ const struct backend *backend = virtgpu_backends[i];
+ ret = backend->init(drv);
+ if (ret)
+ continue;
+
+ drv->backend = backend;
+ return 0;
+ }
+
+ return ret;
+}
+
+const struct backend backend_virtgpu = {
+ .name = "virtio_gpu",
+ .init = virtgpu_init,
+};
diff --git a/virtgpu.h b/virtgpu.h
new file mode 100644
index 0000000..3f68731
--- /dev/null
+++ b/virtgpu.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2021 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+struct virtgpu_param {
+ uint64_t param;
+ const char *name;
+ uint32_t value;
+};
+
+enum virtgpu_param_id {
+ param_3d,
+ param_capset_fix,
+ param_resource_blob,
+ param_host_visible,
+ param_cross_device,
+ param_context_init,
+ param_supported_capset_ids,
+ param_create_guest_handle,
+ param_resource_sync,
+ param_guest_vram,
+ param_max,
+};
diff --git a/virtgpu_cross_domain.c b/virtgpu_cross_domain.c
new file mode 100644
index 0000000..b02a949
--- /dev/null
+++ b/virtgpu_cross_domain.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "external/virtgpu_cross_domain_protocol.h"
+#include "external/virtgpu_drm.h"
+#include "helpers.h"
+#include "util.h"
+#include "virtgpu.h"
+
+#define CAPSET_CROSS_DOMAIN 5
+#define CAPSET_CROSS_FAKE 30
+
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XRGB8888 };
+
+static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
+
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+extern struct virtgpu_param params[];
+
+struct cross_domain_private {
+ uint32_t ring_handle;
+ void *ring_addr;
+ struct drv_array *metadata_cache;
+};
+
+static void cross_domain_release_private(struct driver *drv)
+{
+ int ret;
+ struct cross_domain_private *priv = drv->priv;
+ struct drm_gem_close gem_close = { 0 };
+
+ if (priv->ring_addr != MAP_FAILED)
+ munmap(priv->ring_addr, PAGE_SIZE);
+
+ if (priv->ring_handle) {
+ gem_close.handle = priv->ring_handle;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ if (ret) {
+ drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
+ priv->ring_handle, ret);
+ }
+ }
+
+ drv_array_destroy(priv->metadata_cache);
+ free(priv);
+}
+
+static void add_combinations(struct driver *drv)
+{
+ struct format_metadata metadata;
+
+ // Linear metadata always supported.
+ metadata.tiling = 0;
+ metadata.priority = 1;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
+
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+
+ drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata,
+ BO_USE_RENDER_MASK);
+
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
+ BO_USE_TEXTURE_MASK);
+
+ /* Android CTS tests require this. */
+ drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
+
+ drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera and input/output from hardware decoder/encoder.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+
+ drv_modify_linear_combinations(drv);
+}
+
+static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
+{
+ int ret;
+ struct drm_virtgpu_3d_wait wait_3d = { 0 };
+ struct drm_virtgpu_execbuffer exec = { 0 };
+ struct cross_domain_private *priv = drv->priv;
+
+ exec.command = (uint64_t)&cmd[0];
+ exec.size = cmd_size;
+ if (wait) {
+ exec.flags = VIRTGPU_EXECBUF_FENCE_CONTEXT;
+ exec.bo_handles = (uint64_t)&priv->ring_handle;
+ exec.num_bo_handles = 1;
+ }
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
+ if (ret < 0) {
+ drv_log("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
+ return -EINVAL;
+ }
+
+ ret = -EAGAIN;
+ while (ret == -EAGAIN) {
+ wait_3d.handle = priv->ring_handle;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
+ }
+
+ if (ret < 0) {
+ drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
+{
+ if ((current->width == cached->width) && (current->height == cached->height) &&
+ (current->format == cached->format) && (current->use_flags == cached->use_flags))
+ return true;
+ return false;
+}
+
+static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
+{
+ int ret = 0;
+ struct bo_metadata *cached_data = NULL;
+ struct cross_domain_private *priv = drv->priv;
+ struct CrossDomainGetImageRequirements cmd_get_reqs;
+ uint32_t *addr = (uint32_t *)priv->ring_addr;
+ uint32_t plane, remaining_size;
+
+ memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
+ pthread_mutex_lock(&drv->driver_lock);
+ for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
+ cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
+ if (!metadata_equal(metadata, cached_data))
+ continue;
+
+ memcpy(metadata, cached_data, sizeof(*cached_data));
+ goto out_unlock;
+ }
+
+ cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
+ cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
+
+ cmd_get_reqs.width = metadata->width;
+ cmd_get_reqs.height = metadata->height;
+ cmd_get_reqs.drm_format =
+ (metadata->format == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : metadata->format;
+ cmd_get_reqs.flags = metadata->use_flags;
+
+ /*
+ * It is possible to avoid blocking other bo_create() calls by unlocking before
+ * cross_domain_submit_cmd() and re-locking afterwards. However, that would require
+ * another scan of the metadata cache before drv_array_append in case two bo_create() calls
+ * do the same metadata query. Until cross_domain functionality is more widely tested,
+ * leave this optimization out for now.
+ */
+ ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
+ true);
+ if (ret < 0)
+ goto out_unlock;
+
+ memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
+ memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
+ memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
+ memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
+ memcpy(&metadata->blob_id, &addr[12], sizeof(uint64_t));
+
+ metadata->map_info = addr[14];
+ metadata->memory_idx = addr[16];
+ metadata->physical_device_idx = addr[17];
+
+ remaining_size = metadata->total_size;
+ for (plane = 0; plane < metadata->num_planes; plane++) {
+ if (plane != 0) {
+ metadata->sizes[plane - 1] = metadata->offsets[plane];
+ remaining_size -= metadata->offsets[plane];
+ }
+ }
+
+ metadata->sizes[plane - 1] = remaining_size;
+ drv_array_append(priv->metadata_cache, metadata);
+
+out_unlock:
+ pthread_mutex_unlock(&drv->driver_lock);
+ return ret;
+}
+
+static int cross_domain_init(struct driver *drv)
+{
+ int ret;
+ struct cross_domain_private *priv;
+ struct drm_virtgpu_map map = { 0 };
+ struct drm_virtgpu_get_caps args = { 0 };
+ struct drm_virtgpu_context_init init = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+ struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
+
+ struct CrossDomainInit cmd_init;
+ struct CrossDomainCapabilities cross_domain_caps;
+
+ memset(&cmd_init, 0, sizeof(cmd_init));
+ if (!params[param_context_init].value)
+ return -ENOTSUP;
+
+ if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_DOMAIN)) == 0)
+ return -ENOTSUP;
+
+ if (!params[param_resource_blob].value)
+ return -ENOTSUP;
+
+ /// Need zero copy memory
+ if (!params[param_host_visible].value && !params[param_create_guest_handle].value)
+ return -ENOTSUP;
+
+ /*
+ * crosvm never reports the fake capset. This is just an extra check to make sure we
+ * don't use the cross-domain context by accident. Developers may remove this for
+ * testing purposes.
+ */
+ if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_FAKE)) == 0)
+ return -ENOTSUP;
+
+ priv = calloc(1, sizeof(*priv));
+ priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
+ priv->ring_addr = MAP_FAILED;
+ drv->priv = priv;
+
+ args.cap_set_id = CAPSET_CROSS_DOMAIN;
+ args.size = sizeof(struct CrossDomainCapabilities);
+ args.addr = (unsigned long long)&cross_domain_caps;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
+ goto free_private;
+ }
+
+ // When 3D features are avilable, but the host does not support external memory, fall back
+ // to the virgl minigbm backend. This typically means the guest side minigbm resource will
+ // be backed by a host OpenGL texture.
+ if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
+ ret = -ENOTSUP;
+ goto free_private;
+ }
+
+ // Intialize the cross domain context. Create one fence context to wait for metadata
+ // queries.
+ ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
+ ctx_set_params[0].value = CAPSET_CROSS_DOMAIN;
+ ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_FENCE_CONTEXTS;
+ ctx_set_params[1].value = 1;
+
+ init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
+ init.num_params = 2;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
+ goto free_private;
+ }
+
+ // Create a shared ring buffer to read metadata queries.
+ drm_rc_blob.size = PAGE_SIZE;
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ goto free_private;
+ }
+
+ priv->ring_handle = drm_rc_blob.bo_handle;
+
+ // Map shared ring buffer.
+ map.handle = priv->ring_handle;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
+ if (ret < 0) {
+ drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
+ goto free_private;
+ }
+
+ priv->ring_addr =
+ mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
+
+ if (priv->ring_addr == MAP_FAILED) {
+ drv_log("mmap failed with %s\n", strerror(errno));
+ goto free_private;
+ }
+
+ // Notify host about ring buffer
+ cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
+ cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
+ cmd_init.ring_id = drm_rc_blob.res_handle;
+ ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
+ if (ret < 0)
+ goto free_private;
+
+ // minigbm bookkeeping
+ add_combinations(drv);
+ return 0;
+
+free_private:
+ cross_domain_release_private(drv);
+ return ret;
+}
+
+static void cross_domain_close(struct driver *drv)
+{
+ cross_domain_release_private(drv);
+}
+
+static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ int ret;
+ uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+
+ ret = cross_domain_metadata_query(bo->drv, &bo->meta);
+ if (ret < 0) {
+ drv_log("Metadata query failed");
+ return ret;
+ }
+
+ if (use_flags & BO_USE_SW_MASK)
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
+
+ if (params[param_cross_device].value && (use_flags & BO_USE_NON_GPU_HW))
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
+
+ /// It may be possible to have host3d blobs and handles from guest memory at the same time.
+ /// But for the immediate use cases, we will either have one or the other. For now, just
+ /// prefer guest memory since adding that feature is more involved (requires --udmabuf
+ /// flag to crosvm), so developers would likely test that.
+ if (params[param_create_guest_handle].value) {
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE;
+ } else if (params[param_host_visible].value) {
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+ }
+
+ drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.blob_flags = blob_flags;
+ drm_rc_blob.blob_id = bo->meta.blob_id;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+ return 0;
+}
+
+static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ int ret;
+ struct drm_virtgpu_map gem_map = { 0 };
+
+ gem_map.handle = bo->handles[0].u32;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
+ return MAP_FAILED;
+ }
+
+ vma->length = bo->meta.total_size;
+ return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+}
+
+const struct backend virtgpu_cross_domain = {
+ .name = "virtgpu_cross_domain",
+ .init = cross_domain_init,
+ .close = cross_domain_close,
+ .bo_create = cross_domain_bo_create,
+ .bo_import = drv_prime_bo_import,
+ .bo_destroy = drv_gem_bo_destroy,
+ .bo_map = cross_domain_bo_map,
+ .bo_unmap = drv_bo_munmap,
+ .resolve_format = drv_resolve_format_helper,
+};
diff --git a/virtio_gpu.c b/virtgpu_virgl.c
index 4c3e7cb..32ca6a1 100644
--- a/virtio_gpu.c
+++ b/virtgpu_virgl.c
@@ -8,7 +8,6 @@
#include <errno.h>
#include <stdatomic.h>
#include <stdint.h>
-#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <xf86drm.h>
@@ -19,42 +18,13 @@
#include "external/virtgpu_drm.h"
#include "helpers.h"
#include "util.h"
+#include "virtgpu.h"
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 0x1000
-#endif
#define PIPE_TEXTURE_2D 2
#define MESA_LLVMPIPE_TILE_ORDER 6
#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
-struct feature {
- uint64_t feature;
- const char *name;
- uint32_t enabled;
-};
-
-enum feature_id {
- feat_3d,
- feat_capset_fix,
- feat_resource_blob,
- feat_host_visible,
- feat_host_cross_device,
- feat_max,
-};
-
-#define FEATURE(x) \
- (struct feature) \
- { \
- x, #x, 0 \
- }
-
-static struct feature features[] = {
- FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
- FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
- FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
-};
-
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
@@ -68,7 +38,9 @@ static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_N
DRM_FORMAT_R8, DRM_FORMAT_R16,
DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
-struct virtio_gpu_priv {
+extern struct virtgpu_param params[];
+
+struct virgl_priv {
int caps_is_v2;
union virgl_caps caps;
int host_gbm_enabled;
@@ -112,8 +84,8 @@ static uint32_t translate_format(uint32_t drm_fourcc)
}
}
-static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
- uint32_t drm_format)
+static bool virgl_bitmask_supports_format(struct virgl_supported_format_mask *supported,
+ uint32_t drm_format)
{
uint32_t virgl_format = translate_format(drm_format);
if (!virgl_format)
@@ -165,7 +137,7 @@ static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mas
// Additional note: the V-plane is not placed to the right of the U-plane due to some
// observed failures in media framework code which assumes the V-plane is not
// "row-interlaced" with the U-plane.
-static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
+static void virgl_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
{
uint32_t y_plane_height;
uint32_t c_plane_height;
@@ -235,9 +207,9 @@ struct virtio_transfers_params {
struct rectangle xfer_boxes[DRV_MAX_PLANES];
};
-static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
- const struct rectangle *transfer_box,
- struct virtio_transfers_params *xfer_params)
+static void virgl_get_emulated_transfers_params(const struct bo *bo,
+ const struct rectangle *transfer_box,
+ struct virtio_transfers_params *xfer_params)
{
uint32_t y_plane_height;
uint32_t c_plane_height;
@@ -245,7 +217,7 @@ static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
transfer_box->height == bo->meta.height) {
- virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+ virgl_get_emulated_metadata(bo, &emulated_metadata);
xfer_params->xfers_needed = 1;
xfer_params->xfer_boxes[0].x = 0;
@@ -308,24 +280,24 @@ static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
}
}
-static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
- uint64_t use_flags)
+static bool virgl_supports_combination_natively(struct driver *drv, uint32_t drm_format,
+ uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
if (priv->caps.max_version == 0)
return true;
if ((use_flags & BO_USE_RENDERING) &&
- !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format))
+ !virgl_bitmask_supports_format(&priv->caps.v1.render, drm_format))
return false;
if ((use_flags & BO_USE_TEXTURE) &&
- !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format))
+ !virgl_bitmask_supports_format(&priv->caps.v1.sampler, drm_format))
return false;
if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
- !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format))
+ !virgl_bitmask_supports_format(&priv->caps.v2.scanout, drm_format))
return false;
return true;
@@ -334,11 +306,10 @@ static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_
// For virtio backends that do not support formats natively (e.g. multi-planar formats are not
// supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
// format and usage combination can be handled as a blob (byte buffer).
-static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
- uint32_t drm_format,
- uint64_t use_flags)
+static bool virgl_supports_combination_through_emulation(struct driver *drv, uint32_t drm_format,
+ uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
// Only enable emulation on non-gbm virtio backends.
if (priv->host_gbm_enabled)
@@ -347,7 +318,7 @@ static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv
if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT))
return false;
- if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags))
+ if (!virgl_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags))
return false;
return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
@@ -356,21 +327,20 @@ static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv
// Adds the given buffer combination to the list of supported buffer combinations if the
// combination is supported by the virtio backend.
-static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
- struct format_metadata *metadata, uint64_t use_flags)
+static void virgl_add_combination(struct driver *drv, uint32_t drm_format,
+ struct format_metadata *metadata, uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
- if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
+ if (params[param_3d].value && priv->caps.max_version >= 1) {
if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
- !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
+ !virgl_supports_combination_natively(drv, drm_format, use_flags)) {
drv_log("Scanout format: %d\n", drm_format);
use_flags &= ~BO_USE_SCANOUT;
}
- if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
- !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
- use_flags)) {
+ if (!virgl_supports_combination_natively(drv, drm_format, use_flags) &&
+ !virgl_supports_combination_through_emulation(drv, drm_format, use_flags)) {
drv_log("Skipping unsupported combination format:%d\n", drm_format);
return;
}
@@ -381,14 +351,14 @@ static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
// Adds each given buffer combination to the list of supported buffer combinations if the
// combination supported by the virtio backend.
-static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
- uint32_t num_formats, struct format_metadata *metadata,
- uint64_t use_flags)
+static void virgl_add_combinations(struct driver *drv, const uint32_t *drm_formats,
+ uint32_t num_formats, struct format_metadata *metadata,
+ uint64_t use_flags)
{
uint32_t i;
for (i = 0; i < num_formats; i++)
- virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
+ virgl_add_combination(drv, drm_formats[i], metadata, use_flags);
}
static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
@@ -464,8 +434,8 @@ static uint32_t compute_virgl_bind_flags(uint64_t use_flags, uint32_t format)
return bind;
}
-static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int virgl_3d_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
int ret;
size_t i;
@@ -473,14 +443,13 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
struct drm_virtgpu_resource_create res_create = { 0 };
struct bo_metadata emulated_metadata;
- if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
+ if (virgl_supports_combination_natively(bo->drv, format, use_flags)) {
stride = drv_stride_from_format(format, width, 0);
drv_bo_from_format(bo, stride, height, format);
} else {
- assert(
- virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
+ assert(virgl_supports_combination_through_emulation(bo->drv, format, use_flags));
- virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+ virgl_get_emulated_metadata(bo, &emulated_metadata);
format = emulated_metadata.format;
width = emulated_metadata.width;
@@ -526,7 +495,7 @@ static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height
return 0;
}
-static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+static void *virgl_3d_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_virtgpu_map gem_map = { 0 };
@@ -543,14 +512,14 @@ static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, u
gem_map.offset);
}
-static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
+static int virgl_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
{
int ret;
struct drm_virtgpu_get_caps cap_args = { 0 };
*caps_is_v2 = 0;
cap_args.addr = (unsigned long long)caps;
- if (features[feat_capset_fix].enabled) {
+ if (params[param_capset_fix].value) {
*caps_is_v2 = 1;
cap_args.cap_set_id = 2;
cap_args.size = sizeof(union virgl_caps);
@@ -576,79 +545,73 @@ static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *
return ret;
}
-static void virtio_gpu_init_features_and_caps(struct driver *drv)
+static void virgl_init_params_and_caps(struct driver *drv)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
-
- for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
- struct drm_virtgpu_getparam params = { 0 };
-
- params.param = features[i].feature;
- params.value = (uint64_t)(uintptr_t)&features[i].enabled;
- int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &params);
- if (ret)
- drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
+ if (params[param_3d].value) {
+ virgl_get_caps(drv, &priv->caps, &priv->caps_is_v2);
+
+ // We use two criteria to determine whether host minigbm is used on the host for
+ // swapchain allocations.
+ //
+ // (1) Host minigbm is only available via virglrenderer, and only virglrenderer
+ // advertises capabilities.
+ // (2) Only host minigbm doesn't emulate YUV formats. Checking this is a bit of a
+ // proxy, but it works.
+ priv->host_gbm_enabled =
+ priv->caps.max_version > 0 &&
+ virgl_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
}
-
- if (features[feat_3d].enabled)
- virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
-
- priv->host_gbm_enabled =
- // 2D mode does not create resources on the host so it does not enable host gbm.
- features[feat_3d].enabled &&
- // Gfxstream does not enable host gbm. Virglrenderer sets caps while Gfxstream does not
- // so filter out if we are running with Gfxstream.
- priv->caps.max_version > 0 &&
- // Virglrenderer only supports multi-planar formats through host gbm.
- virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
}
-static int virtio_gpu_init(struct driver *drv)
+static int virgl_init(struct driver *drv)
{
- struct virtio_gpu_priv *priv;
+ struct virgl_priv *priv;
priv = calloc(1, sizeof(*priv));
drv->priv = priv;
- virtio_gpu_init_features_and_caps(drv);
+ virgl_init_params_and_caps(drv);
- if (features[feat_3d].enabled) {
+ if (params[param_3d].value) {
/* This doesn't mean host can scanout everything, it just means host
* hypervisor can show it. */
- virtio_gpu_add_combinations(drv, render_target_formats,
- ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
- BO_USE_RENDER_MASK | BO_USE_SCANOUT);
- virtio_gpu_add_combinations(drv, texture_source_formats,
- ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
- BO_USE_TEXTURE_MASK);
+ virgl_add_combinations(drv, render_target_formats,
+ ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ virgl_add_combinations(drv, texture_source_formats,
+ ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
} else {
/* Virtio primary plane only allows this format. */
- virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
- BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ virgl_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_SCANOUT);
/* Virtio cursor plane only allows this format and Chrome cannot live without
* ARGB888 renderable format. */
- virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
- BO_USE_RENDER_MASK | BO_USE_CURSOR);
+ virgl_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
+ BO_USE_RENDER_MASK | BO_USE_CURSOR);
/* Android needs more, but they cannot be bound as scanouts anymore after
* "drm/virtio: fix DRM_FORMAT_* handling" */
- virtio_gpu_add_combinations(drv, render_target_formats,
- ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
- BO_USE_RENDER_MASK);
- virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
- ARRAY_SIZE(dumb_texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_SW_MASK | BO_USE_LINEAR);
- virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
- BO_USE_SW_MASK | BO_USE_LINEAR);
+ virgl_add_combinations(drv, render_target_formats,
+ ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
+ BO_USE_RENDER_MASK);
+ virgl_add_combinations(drv, dumb_texture_source_formats,
+ ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+ virgl_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR);
+ virgl_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR);
}
/* Android CTS tests require this. */
- virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
- virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
- virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
+ virgl_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
+ virgl_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
+ virgl_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
- virtio_gpu_add_combination(drv, DRM_FORMAT_P010, &LINEAR_METADATA,
+ virgl_add_combination(drv, DRM_FORMAT_ABGR2101010, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
+ virgl_add_combination(drv, DRM_FORMAT_P010, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
@@ -682,20 +645,20 @@ static int virtio_gpu_init(struct driver *drv)
return drv_modify_linear_combinations(drv);
}
-static void virtio_gpu_close(struct driver *drv)
+static void virgl_close(struct driver *drv)
{
free(drv->priv);
drv->priv = NULL;
}
-static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+static int virgl_bo_create_blob(struct driver *drv, struct bo *bo)
{
int ret;
uint32_t stride;
uint32_t cur_blob_id;
uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
if (bo->meta.use_flags & BO_USE_SW_MASK)
@@ -740,7 +703,7 @@ static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
// TODO(gurchetansingh): remove once all minigbm users are blob-safe
#ifndef VIRTIO_GPU_NEXT
@@ -770,46 +733,46 @@ static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_fl
}
}
-static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
- if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+ if (params[param_resource_blob].value && params[param_host_visible].value &&
should_use_blob(bo->drv, format, use_flags))
- return virtio_gpu_bo_create_blob(bo->drv, bo);
+ return virgl_bo_create_blob(bo->drv, bo);
- if (features[feat_3d].enabled)
- return virtio_virgl_bo_create(bo, width, height, format, use_flags);
+ if (params[param_3d].value)
+ return virgl_3d_bo_create(bo, width, height, format, use_flags);
else
return virtio_dumb_bo_create(bo, width, height, format, use_flags);
}
-static int virtio_gpu_bo_destroy(struct bo *bo)
+static int virgl_bo_destroy(struct bo *bo)
{
- if (features[feat_3d].enabled)
+ if (params[param_3d].value)
return drv_gem_bo_destroy(bo);
else
return drv_dumb_bo_destroy(bo);
}
-static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+static void *virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
- if (features[feat_3d].enabled)
- return virtio_virgl_bo_map(bo, vma, plane, map_flags);
+ if (params[param_3d].value)
+ return virgl_3d_bo_map(bo, vma, plane, map_flags);
else
return drv_dumb_bo_map(bo, vma, plane, map_flags);
}
-static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
+static int virgl_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
size_t i;
struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
struct drm_virtgpu_3d_wait waitcmd = { 0 };
struct virtio_transfers_params xfer_params;
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)bo->drv->priv;
uint64_t host_write_flags;
- if (!features[feat_3d].enabled)
+ if (!params[param_3d].value)
return 0;
// Invalidate is only necessary if the host writes to the buffer. The encoder and
@@ -824,8 +787,7 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
if ((bo->meta.use_flags & host_write_flags) == 0)
return 0;
- if (features[feat_resource_blob].enabled &&
- (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ if (params[param_resource_blob].value && (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return 0;
xfer.bo_handle = mapping->vma->handle;
@@ -844,7 +806,7 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
// Unfortunately, the kernel doesn't actually pass the guest layer_stride
- // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
+ // and guest stride to the host (compare virgl.h and virtgpu_drm.h).
// For gbm based resources, we can work around this by using the level field
// to pass the stride to virglrenderer's gbm transfer code. However, we need
// to avoid doing this for resources which don't rely on that transfer code,
@@ -854,15 +816,14 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
xfer.level = bo->meta.strides[0];
}
- if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
- bo->meta.use_flags)) {
+ if (virgl_supports_combination_natively(bo->drv, bo->meta.format, bo->meta.use_flags)) {
xfer_params.xfers_needed = 1;
xfer_params.xfer_boxes[0] = mapping->rect;
} else {
- assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
- bo->meta.use_flags));
+ assert(virgl_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
- virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ virgl_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
}
for (i = 0; i < xfer_params.xfers_needed; i++) {
@@ -893,23 +854,22 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
return 0;
}
-static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
+static int virgl_bo_flush(struct bo *bo, struct mapping *mapping)
{
int ret;
size_t i;
struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
struct drm_virtgpu_3d_wait waitcmd = { 0 };
struct virtio_transfers_params xfer_params;
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ struct virgl_priv *priv = (struct virgl_priv *)bo->drv->priv;
- if (!features[feat_3d].enabled)
+ if (!params[param_3d].value)
return 0;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
- if (features[feat_resource_blob].enabled &&
- (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ if (params[param_resource_blob].value && (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return 0;
xfer.bo_handle = mapping->vma->handle;
@@ -927,20 +887,19 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
}
// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
- // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
+ // guest stride to the host (compare virgl.h and virtgpu_drm.h). We can use
// the level to work around this.
if (priv->host_gbm_enabled)
xfer.level = bo->meta.strides[0];
- if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
- bo->meta.use_flags)) {
+ if (virgl_supports_combination_natively(bo->drv, bo->meta.format, bo->meta.use_flags)) {
xfer_params.xfers_needed = 1;
xfer_params.xfer_boxes[0] = mapping->rect;
} else {
- assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
- bo->meta.use_flags));
+ assert(virgl_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
- virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ virgl_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
}
for (i = 0; i < xfer_params.xfers_needed; i++) {
@@ -975,7 +934,7 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
return 0;
}
-static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
+static uint32_t virgl_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
@@ -989,7 +948,7 @@ static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, u
* All of our host drivers prefer NV12 as their flexible media format.
* If that changes, this will need to be modified.
*/
- if (features[feat_3d].enabled)
+ if (params[param_3d].value)
return DRM_FORMAT_NV12;
else
return DRM_FORMAT_YVU420_ANDROID;
@@ -997,14 +956,13 @@ static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, u
return format;
}
}
-
-static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
- uint32_t offsets[DRV_MAX_PLANES])
+static int virgl_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
{
int ret;
struct drm_virtgpu_resource_info_cros res_info = { 0 };
- if (!features[feat_3d].enabled)
+ if (!params[param_3d].value)
return 0;
res_info.bo_handle = bo->handles[0].u32;
@@ -1025,21 +983,20 @@ static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLAN
offsets[plane] = res_info.offsets[plane];
}
}
+ *format_modifier = res_info.format_modifier;
return 0;
}
-const struct backend backend_virtio_gpu = {
- .name = "virtio_gpu",
- .init = virtio_gpu_init,
- .close = virtio_gpu_close,
- .bo_create = virtio_gpu_bo_create,
- .bo_destroy = virtio_gpu_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = virtio_gpu_bo_map,
- .bo_unmap = drv_bo_munmap,
- .bo_invalidate = virtio_gpu_bo_invalidate,
- .bo_flush = virtio_gpu_bo_flush,
- .resolve_format = virtio_gpu_resolve_format,
- .resource_info = virtio_gpu_resource_info,
-};
+const struct backend virtgpu_virgl = { .name = "virtgpu_virgl",
+ .init = virgl_init,
+ .close = virgl_close,
+ .bo_create = virgl_bo_create,
+ .bo_destroy = virgl_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = virgl_bo_map,
+ .bo_unmap = drv_bo_munmap,
+ .bo_invalidate = virgl_bo_invalidate,
+ .bo_flush = virgl_bo_flush,
+ .resolve_format = virgl_resolve_format,
+ .resource_info = virgl_resource_info };