diff options
author | Lingfeng Yang <lfy@google.com> | 2020-04-16 12:01:25 -0700 |
---|---|---|
committer | Alistair Delva <adelva@google.com> | 2020-04-22 16:27:11 -0700 |
commit | 53ba619e2f5b3d60379953433860517c3905a09b (patch) | |
tree | 25585060d9c791d50dc46b780006085b9217e33b | |
parent | 2c325456fed41d60e3877290bb367ee16c6f562a (diff) | |
download | cuttlefish-modules-53ba619e2f5b3d60379953433860517c3905a09b.tar.gz |
FROMLIST: drm/virtio: Support virtgpu exported resources
Add support for UUID-based resource sharing mechanism to virtgpu. This
implements the new virtgpu commands and hooks them up to dma-buf's
get_uuid callback.
Signed-off-by: David Stevens <stevensd@chromium.org>
(am from https://patchwork.kernel.org/patch/11431393/)
Fixes:
- Shove virtio_gpu_object in data_buf since objs isn't available
- Remove virtio specific dma_buf_ops, but make sure that
virtio_gpu_dma_buf_to_handle continues to work. This effectively
reverts b7f70332c5c52f614641a33418b9b2a16204f4e6.
BUG=b:136269340
TEST=boot ARCVM and launch play store
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2059087
Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
Commit-Queue: David Stevens <stevensd@chromium.org>
Tested-by: David Stevens <stevensd@chromium.org>
Bug: 153580313
Signed-off-by: Lingfeng Yang <lfy@google.com>
Change-Id: I621b2a123679320da153f665b435cd3810fb002e
-rw-r--r-- | virtio_gpu/virtgpu_drv.c | 4 | ||||
-rw-r--r-- | virtio_gpu/virtgpu_drv.h | 17 | ||||
-rw-r--r-- | virtio_gpu/virtgpu_kms.c | 5 | ||||
-rw-r--r-- | virtio_gpu/virtgpu_object.c | 4 | ||||
-rw-r--r-- | virtio_gpu/virtgpu_prime.c | 47 | ||||
-rw-r--r-- | virtio_gpu/virtgpu_vq.c | 61 |
6 files changed, 111 insertions, 27 deletions
diff --git a/virtio_gpu/virtgpu_drv.c b/virtio_gpu/virtgpu_drv.c index 4b5107c..eba4a46 100644 --- a/virtio_gpu/virtgpu_drv.c +++ b/virtio_gpu/virtgpu_drv.c @@ -179,6 +179,7 @@ static unsigned int features[] = { VIRTIO_GPU_F_VIRGL, #endif VIRTIO_GPU_F_EDID, + VIRTIO_GPU_F_RESOURCE_UUID, VIRTIO_GPU_F_RESOURCE_V2, VIRTIO_GPU_F_SHARED_GUEST, VIRTIO_GPU_F_HOST_COHERENT, @@ -229,12 +230,13 @@ static struct drm_driver driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = virtgpu_gem_prime_export, - .gem_prime_import = virtgpu_gem_prime_import, + .gem_prime_import = drm_gem_prime_import, .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, + .gem_prime_get_uuid = virtgpu_gem_prime_get_uuid, .gem_free_object_unlocked = virtio_gpu_gem_free_object, .gem_open_object = virtio_gpu_gem_object_open, diff --git a/virtio_gpu/virtgpu_drv.h b/virtio_gpu/virtgpu_drv.h index 2fabee4..fbf9f39 100644 --- a/virtio_gpu/virtgpu_drv.h +++ b/virtio_gpu/virtgpu_drv.h @@ -50,6 +50,10 @@ #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 +#define UUID_INITIALIZING 0 +#define UUID_INITIALIZED 1 +#define UUID_INITIALIZATION_FAILED 2 + struct virtio_gpu_object_params { uint32_t format; uint32_t width; @@ -93,6 +97,9 @@ struct virtio_gpu_object { bool created; enum virtio_gpu_memory_type guest_memory_type; enum virtio_gpu_caching_type caching_type; + + int uuid_state; + uuid_t uuid; }; #define gem_to_virtio_gpu_obj(gobj) \ container_of((gobj), struct virtio_gpu_object, gem_base) @@ -244,6 +251,8 @@ struct virtio_gpu_device { struct idr request_idr; spinlock_t request_idr_lock; + + spinlock_t resource_export_lock; }; struct virtio_gpu_fpriv { @@ -383,6 +392,10 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); void virtio_gpu_dequeue_cursor_func(struct work_struct *work); void virtio_gpu_dequeue_fence_func(struct work_struct *work); +int +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); + /* virtio_gpu_display.c */ int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, @@ -430,8 +443,6 @@ extern const struct dma_buf_ops virtgpu_dmabuf_ops; struct dma_buf *virtgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags); -struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, - struct dma_buf *buf); struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, @@ -440,6 +451,8 @@ void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj, + uuid_t *uuid); static inline struct virtio_gpu_object* virtio_gpu_object_ref(struct virtio_gpu_object *bo) diff --git a/virtio_gpu/virtgpu_kms.c b/virtio_gpu/virtgpu_kms.c index eab2df8..9b9535d 100644 --- a/virtio_gpu/virtgpu_kms.c +++ b/virtio_gpu/virtgpu_kms.c @@ -134,6 +134,7 @@ int virtio_gpu_init(struct drm_device *dev) spin_lock_init(&vgdev->display_info_lock); spin_lock_init(&vgdev->request_idr_lock); + spin_lock_init(&vgdev->resource_export_lock); ida_init(&vgdev->ctx_id_ida); ida_init(&vgdev->resource_ida); idr_init(&vgdev->request_idr); @@ -160,6 +161,10 @@ int virtio_gpu_init(struct drm_device *dev) vgdev->has_edid = true; DRM_INFO("EDID support available.\n"); } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { + vgdev->has_resource_assign_uuid = true; + DRM_INFO("Virtio cross device support available.\n"); + } if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_V2)) { if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_HOST_COHERENT)) { diff --git a/virtio_gpu/virtgpu_object.c b/virtio_gpu/virtgpu_object.c index 17d297b..3a3ac9d 100644 --- a/virtio_gpu/virtgpu_object.c +++ b/virtio_gpu/virtgpu_object.c @@ -27,6 +27,7 @@ #include <drm/ttm/ttm_execbuf_util.h> #include <linux/dma-buf.h> +#include <linux/uuid.h> #include "virtgpu_drv.h" static int virtio_gpu_virglrenderer_workaround = 1; @@ -285,8 +286,9 @@ int virtio_gpu_dma_buf_to_handle(struct dma_buf *dma_buf, bool no_wait, { struct virtio_gpu_object *qobj; struct virtio_gpu_device *vgdev; + uuid_t uuid; - if (dma_buf->ops != &virtgpu_dmabuf_ops) + if (dma_buf_get_uuid(dma_buf, &uuid) != 0) return -EINVAL; qobj = gem_to_virtio_gpu_obj(dma_buf->priv); diff --git a/virtio_gpu/virtgpu_prime.c b/virtio_gpu/virtgpu_prime.c index 3b43c4c..ddcc5cd 100644 --- a/virtio_gpu/virtgpu_prime.c +++ b/virtio_gpu/virtgpu_prime.c @@ -41,37 +41,38 @@ const struct dma_buf_ops virtgpu_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; -struct dma_buf *virtgpu_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, - int flags) +int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj, + uuid_t *uuid) { - struct dma_buf *buf; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + + wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING); + if (bo->uuid_state != UUID_INITIALIZED) + return -ENODEV; - buf = drm_gem_prime_export(dev, obj, flags); - if (!IS_ERR(buf)) - buf->ops = &virtgpu_dmabuf_ops; + uuid_copy(uuid, &bo->uuid); - return buf; + return 0; } -struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, - struct dma_buf *buf) +struct dma_buf *virtgpu_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags) { - struct drm_gem_object *obj; - - if (buf->ops == &virtgpu_dmabuf_ops) { - obj = buf->priv; - if (obj->dev == dev) { - /* - * Importing dmabuf exported from our own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_get(obj); - return obj; - } + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = dev->dev_private; + int ret = 0; + + if (vgdev->has_resource_assign_uuid) { + ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, bo); + if (ret) + return ERR_PTR(ret); + } else { + bo->uuid_state = UUID_INITIALIZATION_FAILED; } - return drm_gem_prime_import(dev, buf); + return drm_gem_prime_export(dev, obj, flags); } struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) diff --git a/virtio_gpu/virtgpu_vq.c b/virtio_gpu/virtgpu_vq.c index d6b657c..dcec716 100644 --- a/virtio_gpu/virtgpu_vq.c +++ b/virtio_gpu/virtgpu_vq.c @@ -1210,3 +1210,64 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, memcpy(cur_p, &output->cursor, sizeof(output->cursor)); virtio_gpu_queue_cursor(vgdev, vbuf); } + +static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_resp_resource_uuid *resp = + (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; + struct virtio_gpu_object *obj = + (struct virtio_gpu_object *)vbuf->data_buf; + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + /* + * Keeps the data_buf, which points to this virtio_gpu_object, from + * getting kfree'd after this cb returns. + */ + vbuf->data_buf = NULL; + + spin_lock(&vgdev->resource_export_lock); + if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && + obj->uuid_state == UUID_INITIALIZING) { + memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b)); + obj->uuid_state = UUID_INITIALIZED; + } else { + obj->uuid_state = UUID_INITIALIZATION_FAILED; + } + spin_unlock(&vgdev->resource_export_lock); + + drm_gem_object_put_unlocked(&obj->gem_base); + wake_up_all(&vgdev->resp_wq); +} + +int +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_assign_uuid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_resource_uuid *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) { + spin_lock(&vgdev->resource_export_lock); + bo->uuid_state = UUID_INITIALIZATION_FAILED; + spin_unlock(&vgdev->resource_export_lock); + return -ENOMEM; + } + + cmd_p = virtio_gpu_alloc_cmd_resp(vgdev, + virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + /* Reuse the data_buf pointer for the object pointer. */ + vbuf->data_buf = bo; + drm_gem_object_get(&bo->gem_base); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + |