summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLingfeng Yang <lfy@google.com>2020-04-16 11:45:01 -0700
committerAlistair Delva <adelva@google.com>2020-04-22 16:26:40 -0700
commit3dad80a7f34c4b2dafeb1a9cf8ac3790699ba6d5 (patch)
treed5b4db35850c7783f321dc0302b94402cf9edda1
parent6fd5a4f7b2aa8045f5fdbae49e34f4a2932b0b5f (diff)
downloadcuttlefish-modules-3dad80a7f34c4b2dafeb1a9cf8ac3790699ba6d5.tar.gz
CHROMIUM: drm/virtgpu: implement metadata allocation ioctl
No guest side buffers are allocated with this. This just sends and recieves metadata from/to host. With this, we can expose new features to userspace. BUG=chromium:924405 TEST=compile Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/1629917 Reviewed-by: Robert Tarasov <tutankhamen@chromium.org> Commit-Queue: Gurchetan Singh <gurchetansingh@chromium.org> Tested-by: Gurchetan Singh <gurchetansingh@chromium.org> Bug: 153580313 Signed-off-by: Lingfeng Yang <lfy@google.com> Change-Id: I1c55b4f4cc0a320c35586380ec37a6e03436274c
-rw-r--r--virtio_gpu/virtgpu_drv.h13
-rw-r--r--virtio_gpu/virtgpu_ioctl.c78
-rw-r--r--virtio_gpu/virtgpu_vq.c66
3 files changed, 156 insertions, 1 deletions
diff --git a/virtio_gpu/virtgpu_drv.h b/virtio_gpu/virtgpu_drv.h
index c0a7f7d..2fabee4 100644
--- a/virtio_gpu/virtgpu_drv.h
+++ b/virtio_gpu/virtgpu_drv.h
@@ -192,6 +192,12 @@ struct virtio_gpu_drv_cap_cache {
atomic_t is_valid;
};
+struct virtio_gpu_allocation_metadata_response {
+ bool callback_done;
+ struct virtio_gpu_resp_allocation_metadata info;
+ uint32_t response_data[];
+};
+
struct virtio_gpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -363,6 +369,13 @@ void
virtio_gpu_cmd_resource_v2_unref(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence *fence);
+int
+virtio_gpu_cmd_allocation_metadata(struct virtio_gpu_device *vgdev,
+ uint32_t request_id,
+ uint32_t request_size,
+ uint32_t response_size,
+ void *request,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
diff --git a/virtio_gpu/virtgpu_ioctl.c b/virtio_gpu/virtgpu_ioctl.c
index 9ff0a82..ca89bc4 100644
--- a/virtio_gpu/virtgpu_ioctl.c
+++ b/virtio_gpu/virtgpu_ioctl.c
@@ -696,13 +696,89 @@ err_free_obj:
static int virtio_gpu_allocation_metadata_request_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
+ void *request;
+ uint32_t request_id;
+ struct drm_virtgpu_allocation_metadata_request *amr = data;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_allocation_metadata_response *response;
+ void __user *params = u64_to_user_ptr(amr->request);
+
+ if (!amr->request_size)
+ return -EINVAL;
+
+ request = kzalloc(amr->request_size, GFP_KERNEL);
+ if (!request) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request, params,
+ amr->request_size)) {
+ kfree(request);
+ return -EFAULT;
+ }
+
+ if (amr->response_size) {
+ response = kzalloc(sizeof(struct virtio_gpu_allocation_metadata_response) +
+ amr->response_size, GFP_KERNEL);
+ if (!response) {
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ response->callback_done = false;
+ idr_preload(GFP_KERNEL);
+ spin_lock(&vgdev->request_idr_lock);
+ request_id = idr_alloc(&vgdev->request_idr, response, 1, 0,
+ GFP_NOWAIT);
+ spin_unlock(&vgdev->request_idr_lock);
+ idr_preload_end();
+ amr->request_id = request_id;
+ }
+
+ virtio_gpu_cmd_allocation_metadata(vgdev, request_id,
+ amr->request_size,
+ amr->response_size,
+ request,
+ NULL);
return 0;
}
static int virtio_gpu_allocation_metadata_response_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- return 0;
+ int ret = -EINVAL;
+ struct virtio_gpu_allocation_metadata_response *response;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_virtgpu_allocation_metadata_response *rcr = data;
+ void __user *user_data = u64_to_user_ptr(rcr->response);
+
+ spin_lock(&vgdev->request_idr_lock);
+ response = idr_find(&vgdev->request_idr, rcr->request_id);
+ spin_unlock(&vgdev->request_idr_lock);
+
+ if (!response)
+ goto out;
+
+ ret = wait_event_interruptible(vgdev->resp_wq,
+ response->callback_done);
+ if (ret)
+ goto out_remove;
+
+ if (copy_to_user(user_data, &response->response_data,
+ rcr->response_size)) {
+ ret = -EFAULT;
+ goto out_remove;
+ }
+
+ ret = 0;
+
+out_remove:
+ spin_lock(&vgdev->request_idr_lock);
+ response = idr_remove(&vgdev->request_idr, rcr->request_id);
+ spin_unlock(&vgdev->request_idr_lock);
+ kfree(response);
+out:
+ return ret;
}
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
diff --git a/virtio_gpu/virtgpu_vq.c b/virtio_gpu/virtgpu_vq.c
index f867f62..210b69f 100644
--- a/virtio_gpu/virtgpu_vq.c
+++ b/virtio_gpu/virtgpu_vq.c
@@ -937,6 +937,31 @@ finish_pending:
wake_up_all(&vgdev->resp_wq);
}
+static void virtio_gpu_cmd_allocation_metadata_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_allocation_metadata_response *response;
+ struct virtio_gpu_resp_allocation_metadata *resp =
+ (struct virtio_gpu_resp_allocation_metadata *)vbuf->resp_buf;
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+ uint32_t handle = le32_to_cpu(resp->request_id);
+ size_t total_size = sizeof(struct virtio_gpu_resp_allocation_metadata) +
+ le32_to_cpu(resp->response_size);
+
+ spin_lock(&vgdev->request_idr_lock);
+ response = idr_find(&vgdev->request_idr, handle);
+ spin_unlock(&vgdev->request_idr_lock);
+
+ if (!response)
+ return;
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA)
+ memcpy(&response->info, resp, total_size);
+
+ response->callback_done = true;
+ wake_up_all(&vgdev->resp_wq);
+}
+
int
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
@@ -981,6 +1006,47 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
return 0;
}
+int
+virtio_gpu_cmd_allocation_metadata(struct virtio_gpu_device *vgdev,
+ uint32_t request_id,
+ uint32_t request_size,
+ uint32_t response_size,
+ void *request,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_allocation_metadata *cmd_p;
+
+ if (response_size) {
+ struct virtio_gpu_resp_allocation_metadata *resp_buf;
+ size_t resp_size = sizeof(struct virtio_gpu_resp_allocation_metadata) +
+ response_size;
+ resp_buf = kzalloc(resp_size, GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp(vgdev,
+ &virtio_gpu_cmd_allocation_metadata_cb, &vbuf,
+ sizeof(*cmd_p), resp_size,
+ resp_buf);
+ resp_buf->request_id = cpu_to_le32(request_id);
+ } else {
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ }
+
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_ALLOCATION_METADATA);
+ cmd_p->request_id = cpu_to_le32(request_id);
+ cmd_p->request_size = request_size;
+ cmd_p->response_size = response_size;
+
+ vbuf->data_buf = request;
+ vbuf->data_size = request_size;
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ return 0;
+}
+
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id,