summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-01-09 06:05:47 +0000
committerCopybara-Service <copybara-worker@google.com>2023-01-09 04:17:08 -0800
commitfaa60e619e819769e825306a9cc25e8867023cce (patch)
treed44e1600b4f214516b2be01d9adc974f352eed47
parentf9810bce749c240728b36e833e58aea43291512e (diff)
downloadzuma-faa60e619e819769e825306a9cc25e8867023cce.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: return vdid on VD allocation ioctl Bug: 264827584 gxp: add VDID support Bug: 264827584 (repeat) gxp: remove a cleanup TODO from vd.h gxp: add refcount to virtual device Bug: 264739996 gxp: add sync fence interfaces Bug: 258876786 gxp: detach vd->domain when the state is invalid Bug: 263830035 gxp: call vd_block_unready when failed to run core fw Bug: 263830035 (repeat) gxp: apply clang-format to gxp-vd.c Bug: 263830035 (repeat) gxp: not send KCI commands when vd is invalid Bug: 263830035 (repeat) gxp: allow debugfs mailbox only with valid vd Bug: 264629015 gxp: add debugfs_client to client_list Bug: 264629015 (repeat) gxp: hold vd_semaphore only when direct mode Bug: 263830035 (repeat) gxp: fix vd_semaphore locking Bug: 263215610 gxp: revert VD state on after_vd_block_ready fail Bug: 263215610 (repeat) gcip: implement gcip_dma_fence_show Bug: 264220687 gcip: implement DMA fence status and signal Bug: 264220687 (repeat) gcip: add gcip-dma-fence.c Bug: 258876786 (repeat) GCIP_MAIN_REV_ID: e0508279555335c215d577814beca89bea0ba2cf gcip: fix GCIP_DMA_FENCE_LIST_UNLOCK Bug: 258876786 (repeat) gcip: correct path of gcip-dma-fence.h Bug: 258876786 (repeat) gcip: add gcip-dma-fence.h Bug: 258876786 (repeat) GCIP_HEADERS_REV_ID: b2a107cde8ef2f8fe4c173d2d609eb002a98483d gxp: amalthea uses different edgetpu path Bug: 263918299 gxp: fix uninit warn of gxp_debugfs_mailbox gcip: introduce firmware crash type Bug: 237739631 GCIP_HEADERS_REV_ID: 41e11f10e6e44013ecee5b021aef2d7960b01d11 gcip: style fix up GCIP_MAIN_REV_ID: a1895a61a873d1f52896f955e09d2e263b22a0e8 gxp: bump GXP version to 1.7 GitOrigin-RevId: bfd1694965b1db8adc521df6c54f3d18ecebea27 Change-Id: I508dc2f1cfa127cc953724c2401d1ca6357987c5
-rw-r--r--Makefile3
-rw-r--r--callisto-platform.c2
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile10
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c5
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c199
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c2
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-dma-fence.h139
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-firmware.h16
-rw-r--r--gxp-client.c2
-rw-r--r--gxp-common-platform.c14
-rw-r--r--gxp-dci.c3
-rw-r--r--gxp-debugfs.c47
-rw-r--r--gxp-internal.h3
-rw-r--r--gxp-vd.c131
-rw-r--r--gxp-vd.h35
-rw-r--r--gxp.h67
16 files changed, 624 insertions, 54 deletions
diff --git a/Makefile b/Makefile
index be9dd50..80592a1 100644
--- a/Makefile
+++ b/Makefile
@@ -50,6 +50,7 @@ gxp-objs += \
gxp-uci.o \
gxp-usage-stats.o
+GMODULE_PATH := $(OUT_DIR)/../private/google-modules
EDGETPU_CHIP := rio
@@ -94,7 +95,7 @@ ccflags-y += $(EXTRA_CFLAGS) $(gxp-flags)
KBUILD_OPTIONS += GXP_CHIP=$(GXP_CHIP) GXP_PLATFORM=$(GXP_PLATFORM)
# Access TPU driver's exported symbols.
-EXTRA_SYMBOLS += $(OUT_DIR)/../private/google-modules/edgetpu/$(EDGETPU_CHIP)/drivers/edgetpu/Module.symvers
+EXTRA_SYMBOLS += $(GMODULE_PATH)/edgetpu/$(EDGETPU_CHIP)/drivers/edgetpu/Module.symvers
modules modules_install:
$(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) gcip.o
diff --git a/callisto-platform.c b/callisto-platform.c
index 46a2e29..e9335b0 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -254,7 +254,7 @@ callisto_platform_before_vd_block_unready(struct gxp_dev *gxp,
{
if (gxp_is_direct_mode(gxp))
return;
- if (vd->client_id < 0)
+ if (vd->client_id < 0 || vd->state == GXP_VD_UNAVAILABLE)
return;
if (vd->tpu_client_id >= 0)
unlink_offload_vmbox(gxp, vd, vd->tpu_client_id,
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
index c3424ee..ab68776 100644
--- a/gcip-kernel-driver/drivers/gcip/Makefile
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -6,8 +6,14 @@
CONFIG_GCIP ?= m
obj-$(CONFIG_GCIP) += gcip.o
-gcip-objs := gcip-alloc-helper.o gcip-domain-pool.o gcip-firmware.o \
- gcip-image-config.o gcip-kci.o gcip-mailbox.o gcip-mem-pool.o \
+gcip-objs := gcip-alloc-helper.o \
+ gcip-dma-fence.o \
+ gcip-domain-pool.o \
+ gcip-firmware.o \
+ gcip-image-config.o \
+ gcip-kci.o \
+ gcip-mailbox.o \
+ gcip-mem-pool.o \
gcip-telemetry.o
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
index f79149f..33c95e2 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -25,9 +25,10 @@ static int gcip_vmalloc_to_pages(void *mem, size_t count, struct page **pages)
size_t i = 0;
while (count--) {
- pages[i++] = vmalloc_to_page(mem);
- if (!pages[i - 1])
+ pages[i] = vmalloc_to_page(mem);
+ if (!pages[i])
return -ENOMEM;
+ i++;
mem += PAGE_SIZE;
}
return 0;
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
new file mode 100644
index 0000000..9da1f24
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP support of DMA fences.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sync_file.h>
+#include <linux/time.h>
+
+#include <gcip/gcip-dma-fence.h>
+
+#define to_gfence(fence) container_of(fence, struct gcip_dma_fence, fence)
+
+static int _gcip_dma_fence_signal(struct dma_fence *fence, int error, bool ignore_signaled)
+{
+ int ret;
+
+ if (error > 0)
+ error = -error;
+ if (unlikely(error < -MAX_ERRNO))
+ return -EINVAL;
+
+ spin_lock_irq(fence->lock);
+ /* don't signal fence twice */
+ if (unlikely(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) {
+ ret = ignore_signaled ? 0 : -EBUSY;
+ goto out_unlock;
+ }
+ if (error)
+ dma_fence_set_error(fence, error);
+ ret = dma_fence_signal_locked(fence);
+
+out_unlock:
+ spin_unlock_irq(fence->lock);
+ return ret;
+}
+
+static const char *sync_status_str(int status)
+{
+ if (status < 0)
+ return "error";
+ if (status > 0)
+ return "signaled";
+ return "active";
+}
+
+struct gcip_dma_fence_manager *gcip_dma_fence_manager_create(struct device *dev)
+{
+ struct gcip_dma_fence_manager *mgr = devm_kzalloc(dev, sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&mgr->fence_list_head);
+ spin_lock_init(&mgr->fence_list_lock);
+ mgr->dev = dev;
+
+ return mgr;
+}
+
+const char *gcip_dma_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct gcip_dma_fence *gfence = to_gfence(fence);
+
+ return gfence->timeline_name;
+}
+
+bool gcip_dma_fence_always_true(struct dma_fence *fence)
+{
+ return true;
+}
+
+int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
+ struct gcip_dma_fence_data *data)
+{
+ size_t timeline_len = strlen(data->timeline_name);
+ unsigned long flags;
+ int fd;
+ struct sync_file *sync_file;
+ int ret;
+
+ if (timeline_len >= GCIP_FENCE_TIMELINE_NAME_LEN)
+ return -EINVAL;
+ memcpy(gfence->timeline_name, data->timeline_name, timeline_len);
+ gfence->timeline_name[timeline_len] = '\0';
+ spin_lock_init(&gfence->lock);
+ INIT_LIST_HEAD(&gfence->fence_list);
+ gfence->mgr = mgr;
+
+ dma_fence_init(&gfence->fence, data->ops, &gfence->lock, dma_fence_context_alloc(1),
+ data->seqno);
+ GCIP_DMA_FENCE_LIST_LOCK(mgr, flags);
+ list_add_tail(&gfence->fence_list, &mgr->fence_list_head);
+ GCIP_DMA_FENCE_LIST_UNLOCK(mgr, flags);
+
+ if (data->after_init) {
+ ret = data->after_init(gfence);
+ if (ret) {
+ dev_err(mgr->dev, "DMA fence init failed on after_init: %d", ret);
+ goto err_put_fence;
+ }
+ }
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ dev_err(mgr->dev, "Failed to get FD: %d", ret);
+ goto err_put_fence;
+ }
+ sync_file = sync_file_create(&gfence->fence);
+ if (!sync_file) {
+ dev_err(mgr->dev, "Failed to create sync file");
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+ /* sync_file holds the reference to fence, so we can drop our reference. */
+ dma_fence_put(&gfence->fence);
+
+ fd_install(fd, sync_file->file);
+ data->fence = fd;
+ return 0;
+
+err_put_fd:
+ put_unused_fd(fd);
+err_put_fence:
+ dma_fence_put(&gfence->fence);
+ return ret;
+}
+
+void gcip_dma_fence_exit(struct gcip_dma_fence *gfence)
+{
+ unsigned long flags;
+
+ GCIP_DMA_FENCE_LIST_LOCK(gfence->mgr, flags);
+ list_del(&gfence->fence_list);
+ GCIP_DMA_FENCE_LIST_UNLOCK(gfence->mgr, flags);
+}
+
+int gcip_dma_fence_status(int fence, int *status)
+{
+ struct dma_fence *fencep;
+
+ fencep = sync_file_get_fence(fence);
+ if (!fencep)
+ return -EBADF;
+ *status = dma_fence_get_status(fencep);
+ dma_fence_put(fencep);
+ return 0;
+}
+
+int gcip_dma_fence_signal(int fence, int error, bool ignore_signaled)
+{
+ struct dma_fence *fencep;
+ int ret;
+
+ fencep = sync_file_get_fence(fence);
+ if (!fencep)
+ return -EBADF;
+ ret = _gcip_dma_fence_signal(fencep, error, ignore_signaled);
+ dma_fence_put(fencep);
+ return ret;
+}
+
+int gcip_dma_fenceptr_signal(struct gcip_dma_fence *gfence, int error, bool ignore_signaled)
+{
+ return _gcip_dma_fence_signal(&gfence->fence, error, ignore_signaled);
+}
+
+void gcip_dma_fence_show(struct gcip_dma_fence *gfence, struct seq_file *s)
+{
+ struct dma_fence *fence = &gfence->fence;
+
+ spin_lock_irq(&gfence->lock);
+
+ seq_printf(s, "%s-%s %llu-%llu %s", fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->context, fence->seqno,
+ sync_status_str(dma_fence_get_status_locked(fence)));
+
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
+ struct timespec64 ts = ktime_to_timespec64(fence->timestamp);
+
+ seq_printf(s, " @%lld.%09ld", (s64)ts.tv_sec, ts.tv_nsec);
+ }
+
+ if (fence->error)
+ seq_printf(s, " err=%d", fence->error);
+
+ spin_unlock_irq(&gfence->lock);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
index 5fed69c..312bbdc 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -19,7 +19,7 @@
#define ADDR_MASK ~(BIT(ADDR_SHIFT) - 1u)
/* used by ns_iommu_mappings */
-#define CONFIG_TO_MBSIZE(a) (((a)&NS_SIZE_MASK) << 20)
+#define CONFIG_TO_MBSIZE(a) (((a) & NS_SIZE_MASK) << 20)
/* used by iommu_mappings */
static inline __u32 config_to_size(__u32 cfg)
diff --git a/gcip-kernel-driver/include/gcip/gcip-dma-fence.h b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
new file mode 100644
index 0000000..232360b
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP support of DMA fences.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GCIP_DMA_FENCE_H__
+#define __GCIP_DMA_FENCE_H__
+
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define GCIP_FENCE_TIMELINE_NAME_LEN 128
+
+/* Used before accessing the list headed by mgr->fence_list_head. */
+#define GCIP_DMA_FENCE_LIST_LOCK(mgr, flags) spin_lock_irqsave(&mgr->fence_list_lock, flags)
+#define GCIP_DMA_FENCE_LIST_UNLOCK(mgr, flags) spin_unlock_irqrestore(&mgr->fence_list_lock, flags)
+
+/*
+ * A macro to loop through all fences under a gcip_dma_fence_manager.
+ * @mgr: struct gcip_dma_fence_manager
+ * @gfence: struct gcip_dma_fence
+ *
+ * This macro must be wrapped by GCIP_DMA_FENCE_LIST_(UN)LOCK.
+ */
+#define gcip_for_each_fence(mgr, gfence) \
+ list_for_each_entry(gfence, &mgr->fence_list_head, fence_list)
+
+struct gcip_dma_fence_manager {
+ /* The list of all fence objects for debugging. */
+ struct list_head fence_list_head;
+ /* Protects the list headed by @fence_list_head. */
+ spinlock_t fence_list_lock;
+ /* For logging. */
+ struct device *dev;
+};
+
+struct gcip_dma_fence {
+ struct dma_fence fence;
+ /* The manager used to init this object. */
+ struct gcip_dma_fence_manager *mgr;
+ char timeline_name[GCIP_FENCE_TIMELINE_NAME_LEN];
+ /* Protects @fence. */
+ spinlock_t lock;
+ /* Is protected by manager->fence_list_lock. */
+ struct list_head fence_list;
+};
+
+struct gcip_dma_fence_data {
+ /*
+ * A null-terminated string with length less than GCIP_FENCE_TIMELINE_NAME_LEN.
+ * The content of this buffer will be copied so it's fine to release this pointer after
+ * the gcip_dma_fence_init() call.
+ */
+ char *timeline_name;
+ /*
+ * The DMA fence operators to initialize the fence with.
+ *
+ * Note: @ops->release is called when after_init() fails.
+ */
+ const struct dma_fence_ops *ops;
+ /* The sequence number to initialize the fence with. */
+ u32 seqno;
+ /* Output: The fd of the new sync_file with the new fence. */
+ int fence;
+ /*
+ * The callback to be called after @gfence is initialized, before an FD has been installed.
+ * Returns 0 on success. A non-zero return value will revert the initialization of
+ * @gfence and the returned error is returned by gcip_dma_fence_init().
+ *
+ * There is no 'before_exit' callback because the user is supposed to set a custom
+ * dma_fence_ops.release callback which does the revert of after_init and then call
+ * gcip_dma_fence_exit().
+ *
+ * This callback is optional.
+ */
+ int (*after_init)(struct gcip_dma_fence *gfence);
+};
+
+/*
+ * Allocates and returns a GCIP DMA fence manager. Memory is allocated as @dev managed so there is
+ * no release function of the manager.
+ *
+ * Returns a negative errno on error.
+ */
+struct gcip_dma_fence_manager *gcip_dma_fence_manager_create(struct device *dev);
+
+/* Helpers for setting dma_fence_ops. */
+
+/* Returns the timeline name. @fence must be contained within a gcip_dma_fence. */
+const char *gcip_dma_fence_get_timeline_name(struct dma_fence *fence);
+
+/* Always return true. Can be used for the enable_signaling callback. */
+bool gcip_dma_fence_always_true(struct dma_fence *fence);
+
+/* End of helpers for setting dma_fence_ops. */
+
+int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
+ struct gcip_dma_fence_data *data);
+
+/*
+ * Reverts gcip_dma_fence_init(). Removes @gfence from the manager's list.
+ * This function will not free @gfence.
+ */
+void gcip_dma_fence_exit(struct gcip_dma_fence *gfence);
+
+/*
+ * Sets @status to the DMA fence status of DMA fence FD @fence.
+ * @status is only set when this function returns 0.
+ *
+ * It is OK if @fence does not refer to a gcip_dma_fence.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gcip_dma_fence_status(int fence, int *status);
+
+/*
+ * Signals the fence error of DMA fence FD @fence.
+ *
+ * If the fence has been signaled,
+ * - if @ignore_signaled is true, this function does nothing.
+ * - otherwise, returns -EALREADY.
+ *
+ * It is OK if @fence does not refer to a gcip_dma_fence.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gcip_dma_fence_signal(int fence, int error, bool ignore_signaled);
+/* Identical to gcip_dma_fence_signal except this function accepts gcip_dma_fence as the input. */
+int gcip_dma_fenceptr_signal(struct gcip_dma_fence *gfence, int error, bool ignore_signaled);
+
+/* Prints data of @gfence to the sequence file @s. For debug purpose only. */
+void gcip_dma_fence_show(struct gcip_dma_fence *gfence, struct seq_file *s);
+
+#endif /* __GCIP_DMA_FENCE_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-firmware.h b/gcip-kernel-driver/include/gcip/gcip-firmware.h
index b856e5c..012a79a 100644
--- a/gcip-kernel-driver/include/gcip/gcip-firmware.h
+++ b/gcip-kernel-driver/include/gcip/gcip-firmware.h
@@ -35,6 +35,22 @@ enum gcip_fw_flavor {
GCIP_FW_FLAVOR_CUSTOM = 4,
};
+/* Type of firmware crash which will be sent by GCIP_RKCI_FIRMWARE_CRASH RKCI command. */
+enum gcip_fw_crash_type {
+ /* Assert happened. */
+ GCIP_FW_CRASH_ASSERT_FAIL = 0,
+ /* Data abort exception. */
+ GCIP_FW_CRASH_DATA_ABORT = 1,
+ /* Prefetch abort exception. */
+ GCIP_FW_CRASH_PREFETCH_ABORT = 2,
+ /* Undefined exception. */
+ GCIP_FW_CRASH_UNDEFINED_EXCEPTION = 3,
+ /* Exception which cannot be recovered by the firmware itself. */
+ GCIP_FW_CRASH_UNRECOVERABLE_FAULT = 4,
+ /* Used in debug dump. */
+ GCIP_FW_CRASH_DUMMY_CRASH_TYPE = 0xFF,
+};
+
/* Firmware info filled out via KCI FIRMWARE_INFO command. */
struct gcip_fw_info {
uint64_t fw_build_time; /* BuildData::Timestamp() */
diff --git a/gxp-client.c b/gxp-client.c
index f96a100..16fb5bc 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -181,7 +181,9 @@ int gxp_client_acquire_block_wakelock(struct gxp_client *client,
return ret;
*acquired_wakelock = true;
if (client->vd) {
+ down_write(&gxp->vd_semaphore);
ret = gxp_vd_block_ready(client->vd);
+ up_write(&gxp->vd_semaphore);
if (ret)
goto err_wakelock_release;
}
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index d9ac532..3cdb436 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -570,8 +570,20 @@ static int gxp_allocate_vd(struct gxp_client *client,
ret = gxp_client_allocate_virtual_device(client, ibuf.core_count,
ibuf.flags);
up_write(&client->semaphore);
+ if (ret)
+ return ret;
- return ret;
+ ibuf.vdid = client->vd->vdid;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ /*
+ * VD will be released once the client FD has been closed, we
+ * don't need to release VD here as this branch should never
+ * happen in usual cases.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
}
static int
diff --git a/gxp-dci.c b/gxp-dci.c
index 9fdf25c..8c0aa2d 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -26,6 +26,7 @@ static int gxp_dci_mailbox_manager_execute_cmd(
u32 cmd_flags, u8 num_cores, struct gxp_power_states power_states,
u64 *resp_seq, u16 *resp_status)
{
+ struct gxp_dev *gxp = client->gxp;
struct gxp_dci_command cmd;
struct gxp_dci_response resp;
struct gxp_dci_buffer_descriptor buffer;
@@ -40,7 +41,9 @@ static int gxp_dci_mailbox_manager_execute_cmd(
cmd.priority = cmd_priority; /* currently unused */
cmd.buffer_descriptor = buffer;
+ down_read(&gxp->vd_semaphore);
ret = gxp_dci_execute_cmd(mailbox, &cmd, &resp);
+ up_read(&gxp->vd_semaphore);
/* resp.seq and resp.status can be updated even though it failed to process the command */
if (resp_seq)
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index a719062..0497ee3 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -44,6 +44,7 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
u16 status;
struct gxp_dev *gxp = (struct gxp_dev *)data;
struct gxp_mailbox *mbx;
+ struct gxp_client *client;
struct gxp_power_states power_states = {
.power = GXP_POWER_STATE_NOM,
.memory = MEMORY_POWER_STATE_UNDEFINED,
@@ -52,8 +53,11 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
int ret;
mutex_lock(&gxp->debugfs_client_lock);
+ client = gxp->debugfs_client;
+#if GXP_HAS_MCU
if (gxp_is_direct_mode(gxp)) {
+#endif
core = val / 1000;
if (core >= GXP_NUM_CORES) {
dev_notice(gxp->dev,
@@ -72,17 +76,28 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
goto out;
}
+ /* Create a dummy client to access @client->gxp from the `execute_cmd` callback. */
+ if (!client)
+ client = gxp_client_create(gxp);
mbx = gxp->mailbox_mgr->mailboxes[core];
cmd_code = GXP_MBOX_CODE_DISPATCH;
#if GXP_HAS_MCU
} else {
- if (!gxp->debugfs_client) {
+ if (!client) {
dev_err(gxp->dev,
"You should load firmwares via gxp/firmware_run first\n");
ret = -EIO;
goto out;
}
+ down_read(&gxp->debugfs_client->semaphore);
+ if (!gxp_client_has_available_vd(gxp->debugfs_client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out;
+ }
+ up_read(&gxp->debugfs_client->semaphore);
+
mbx = to_mcu_dev(gxp)->mcu.uci.mbx;
if (!mbx) {
dev_err(gxp->dev, "UCI is not initialized.\n");
@@ -91,15 +106,12 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
}
cmd_code = CORE_COMMAND;
-#endif
}
+#endif
- down_read(&gxp->vd_semaphore);
- /* In direct mode, gxp->debugfs_client and core will be ignored. */
- retval = gxp->mailbox_mgr->execute_cmd(gxp->debugfs_client, mbx, core,
- cmd_code, 0, 0, 0, 0, 1,
- power_states, NULL, &status);
- up_read(&gxp->vd_semaphore);
+ retval = gxp->mailbox_mgr->execute_cmd(client, mbx, core, cmd_code, 0,
+ 0, 0, 0, 1, power_states, NULL,
+ &status);
dev_info(
gxp->dev,
@@ -107,6 +119,8 @@ static int gxp_debugfs_mailbox(void *data, u64 val)
core, status, retval);
ret = 0;
out:
+ if (client && client != gxp->debugfs_client)
+ gxp_client_destroy(client);
mutex_unlock(&gxp->debugfs_client_lock);
return ret;
}
@@ -166,9 +180,14 @@ static int gxp_firmware_run_set(void *data, u64 val)
}
gxp->debugfs_client = client;
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
+
down_write(&client->semaphore);
- ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES, 0);
+ ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES,
+ 0);
if (ret) {
dev_err(gxp->dev, "Failed to allocate VD\n");
goto err_destroy_client;
@@ -199,8 +218,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
* Cleaning up the client will stop the VD it owns and release
* the BLOCK wakelock it is holding.
*/
- gxp_client_destroy(gxp->debugfs_client);
- gxp->debugfs_client = NULL;
+ goto out_destroy_client;
}
out:
@@ -212,8 +230,13 @@ err_release_block_wakelock:
gxp_client_release_block_wakelock(client);
err_destroy_client:
up_write(&client->semaphore);
+out_destroy_client:
+ mutex_lock(&gxp->client_list_lock);
+ list_del(&gxp->debugfs_client->list_entry);
+ mutex_unlock(&gxp->client_list_lock);
+
/* Destroying a client cleans up any VDss or wakelocks it held. */
- gxp_client_destroy(client);
+ gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
mutex_unlock(&gxp->debugfs_client_lock);
return ret;
diff --git a/gxp-internal.h b/gxp-internal.h
index 6988bf8..df1b4e9 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -7,6 +7,7 @@
#ifndef __GXP_INTERNAL_H__
#define __GXP_INTERNAL_H__
+#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
@@ -142,6 +143,8 @@ struct gxp_dev {
struct gxp_usage_stats *usage_stats; /* Stores the usage stats */
void __iomem *sysreg_shareability; /* sysreg shareability csr base */
+ /* Next virtual device ID. */
+ atomic_t next_vdid;
/* callbacks for chip-dependent implementations */
diff --git a/gxp-vd.c b/gxp-vd.c
index 3bd01cd..66de461 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -5,8 +5,10 @@
* Copyright (C) 2021 Google LLC
*/
+#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/idr.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -43,6 +45,7 @@ void gxp_vd_init(struct gxp_dev *gxp)
/* All cores start as free */
for (core = 0; core < GXP_NUM_CORES; core++)
gxp->core_to_vd[core] = NULL;
+ atomic_set(&gxp->next_vdid, 0);
}
void gxp_vd_destroy(struct gxp_dev *gxp)
@@ -130,7 +133,7 @@ static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
}
static int map_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd)
+ struct gxp_virtual_device *vd)
{
if (!gxp->debug_dump_mgr)
return 0;
@@ -164,7 +167,8 @@ static int assign_cores(struct gxp_virtual_device *vd)
}
}
if (available_cores < vd->num_cores) {
- dev_err(gxp->dev, "Insufficient available cores. Available: %u. Requested: %u\n",
+ dev_err(gxp->dev,
+ "Insufficient available cores. Available: %u. Requested: %u\n",
available_cores, vd->num_cores);
return -EBUSY;
}
@@ -209,6 +213,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->client_id = -1;
vd->tpu_client_id = -1;
spin_lock_init(&vd->credit_lock);
+ refcount_set(&vd->refcount, 1);
vd->credit = GXP_COMMAND_CREDIT_PER_VD;
vd->first_open = true;
@@ -279,13 +284,15 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
err = map_debug_dump_buffer(gxp, vd);
if (err)
goto error_unmap_core_telemetry_buffer;
+ vd->vdid = atomic_inc_return(&gxp->next_vdid);
return vd;
error_unmap_core_telemetry_buffer:
unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
error_unmap_fw_data:
- gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
+ vd->fwdata_sgt);
error_unmap_core_resources:
gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
error_destroy_fw_data:
@@ -325,7 +332,8 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
unassign_cores(vd);
unmap_debug_dump_buffer(gxp, vd);
unmap_core_telemetry_buffers(gxp, vd, core_list);
- gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA, vd->fwdata_sgt);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, GXP_IOVA_PRIV_FW_DATA,
+ vd->fwdata_sgt);
gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
if (!IS_ERR_OR_NULL(vd->fw_app)) {
@@ -353,7 +361,8 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
if (vd->slice_index >= 0)
ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
- kfree(vd);
+ vd->state = GXP_VD_RELEASED;
+ gxp_vd_put(vd);
}
int gxp_vd_block_ready(struct gxp_virtual_device *vd)
@@ -361,6 +370,8 @@ int gxp_vd_block_ready(struct gxp_virtual_device *vd)
struct gxp_dev *gxp = vd->gxp;
int ret;
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+
if (vd->state == GXP_VD_SUSPENDED)
return 0;
if (vd->state != GXP_VD_OFF)
@@ -373,6 +384,7 @@ int gxp_vd_block_ready(struct gxp_virtual_device *vd)
ret = gxp->after_vd_block_ready(gxp, vd);
if (ret) {
gxp_dma_domain_detach_device(gxp, vd->domain);
+ vd->state = GXP_VD_OFF;
return ret;
}
}
@@ -395,17 +407,59 @@ int gxp_vd_run(struct gxp_virtual_device *vd)
lockdep_assert_held(&gxp->vd_semaphore);
if (vd->state != GXP_VD_READY && vd->state != GXP_VD_OFF)
return -EINVAL;
- if (vd->state == GXP_VD_OFF)
- gxp_vd_block_ready(vd);
+ if (vd->state == GXP_VD_OFF) {
+ ret = gxp_vd_block_ready(vd);
+ /*
+ * The failure of `gxp_vd_block_ready` function means following two things:
+ *
+ * 1. The MCU firmware is not working for some reason and if it was crash,
+ * @vd->state would be set to UNAVAILABLE by the crash handler. However, by the
+ * race, if this function holds @gxp->vd_semaphore earlier than that handler,
+ * it is reasonable to set @vd->state to UNAVAILABLE from here.
+ *
+ * 2. Some information of vd (or client) such as client_id, slice_index are
+ * incorrect or not allowed by the MCU firmware for some reasons and the
+ * `allocate_vmbox` or `link_offload_vmbox` has been failed. In this case,
+ * setting the @vd->state to UNAVAILABLE and letting the runtime close its fd
+ * and reallocate a vd would be better than setting @vd->state to OFF.
+ *
+ * Therefore, let's set @vd->state to UNAVAILABLE if it returns an error.
+ */
+ if (ret)
+ goto err_vd_unavailable;
+ }
ret = gxp_firmware_run(gxp, vd, vd->core_list);
if (ret)
- vd->state = GXP_VD_UNAVAILABLE;
- else
- vd->state = GXP_VD_RUNNING;
+ goto err_vd_block_unready;
+ vd->state = GXP_VD_RUNNING;
+ return ret;
+
+err_vd_block_unready:
+ gxp_vd_block_unready(vd);
+err_vd_unavailable:
+ vd->state = GXP_VD_UNAVAILABLE;
return ret;
}
-/* Caller must hold gxp->vd_semaphore */
+/*
+ * Caller must hold gxp->vd_semaphore.
+ *
+ * This function will be called from the `gxp_client_destroy` function if @vd->state is not
+ * GXP_VD_OFF.
+ *
+ * Note for the case of the MCU firmware crahses:
+ *
+ * In the MCU mode, the `gxp_vd_suspend` function will redirect to this function, but it will not
+ * happen when the @vd->state is GXP_VD_UNAVAILABLE. Therefore, if the MCU firmware crashes,
+ * @vd->state will be changed to GXP_VD_UNAVAILABLE and this function will not be called even
+ * though the runtime is going to release the vd wakelock.
+ *
+ * It means @vd->state will not be changed to GXP_VD_OFF when the vd wkelock is released (i.e., the
+ * state will be kept as GXP_VD_UNAVAILABLE) and when the `gxp_vd_block_unready` function is called
+ * by releasing the block wakelock, it will not send `release_vmbox` and `unlink_offload_vmbox` KCI
+ * commands to the crashed MCU firmware. This function will be finally called when the runtime
+ * closes the fd of the device file.
+ */
void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
@@ -421,7 +475,8 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
*/
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
- lpm_state = gxp_lpm_get_state(gxp, CORE_TO_PSM(core));
+ lpm_state = gxp_lpm_get_state(
+ gxp, CORE_TO_PSM(core));
if (lpm_state != LPM_PG_STATE)
hold_core_in_reset(gxp, core);
}
@@ -429,13 +484,19 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
}
gxp_firmware_stop(gxp, vd, vd->core_list);
- if (vd->state == GXP_VD_READY || vd->state == GXP_VD_RUNNING)
+ if (vd->state == GXP_VD_READY || vd->state == GXP_VD_RUNNING ||
+ vd->state == GXP_VD_UNAVAILABLE)
gxp_dma_domain_detach_device(gxp, vd->domain);
vd->state = GXP_VD_OFF;
}
/*
* Caller must have locked `gxp->vd_semaphore` for writing.
+ *
+ * This function will be called from the `gxp_client_release_vd_wakelock` function when the runtime
+ * is going to release the vd wakelock only if the @vd->state is not GXP_VD_UNAVAILABLE.
+ *
+ * In the MCU mode, this function will redirect to the `gxp_vd_stop` function.
*/
void gxp_vd_suspend(struct gxp_virtual_device *vd)
{
@@ -462,16 +523,19 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
*/
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
- if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(core), LPM_ACTIVE_STATE)) {
+ if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(core),
+ LPM_ACTIVE_STATE)) {
vd->state = GXP_VD_UNAVAILABLE;
failed_cores |= BIT(core);
hold_core_in_reset(gxp, core);
- dev_err(gxp->dev, "Core %u stuck at LPM_ACTIVE_STATE", core);
+ dev_err(gxp->dev,
+ "Core %u stuck at LPM_ACTIVE_STATE",
+ core);
continue;
}
/* Mark the boot mode as a suspend event */
- gxp_firmware_set_boot_mode(gxp, core,
- GXP_BOOT_MODE_REQUEST_SUSPEND);
+ gxp_firmware_set_boot_mode(
+ gxp, core, GXP_BOOT_MODE_REQUEST_SUSPEND);
/*
* Request a suspend event by sending a mailbox
* notification.
@@ -484,10 +548,11 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
if (!(failed_cores & BIT(core))) {
- if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(core),
+ if (!gxp_lpm_wait_state_eq(gxp,
+ CORE_TO_PSM(core),
LPM_PG_STATE)) {
boot_state = gxp_firmware_get_boot_mode(
- gxp, core);
+ gxp, core);
if (boot_state !=
GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED) {
dev_err(gxp->dev,
@@ -499,7 +564,8 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
}
} else {
/* Re-set PS1 as the default low power state. */
- gxp_lpm_enable_state(gxp, CORE_TO_PSM(core),
+ gxp_lpm_enable_state(gxp,
+ CORE_TO_PSM(core),
LPM_CG_STATE);
}
}
@@ -559,24 +625,28 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
* changed. If it's changed, it means the block is rebooted and
* therefore we need to set up the hardware again.
*/
- if (vd->blk_switch_count_when_suspended != curr_blk_switch_count) {
+ if (vd->blk_switch_count_when_suspended !=
+ curr_blk_switch_count) {
ret = gxp_firmware_setup_hw_after_block_off(
gxp, core, /*verbose=*/false);
if (ret) {
vd->state = GXP_VD_UNAVAILABLE;
failed_cores |= BIT(core);
- dev_err(gxp->dev, "Failed to power up core %u\n", core);
+ dev_err(gxp->dev,
+ "Failed to power up core %u\n",
+ core);
continue;
}
}
/* Mark this as a resume power-up event. */
- gxp_firmware_set_boot_mode(gxp, core,
- GXP_BOOT_MODE_REQUEST_RESUME);
+ gxp_firmware_set_boot_mode(
+ gxp, core, GXP_BOOT_MODE_REQUEST_RESUME);
/*
* Power on the core by explicitly switching its PSM to
* PS0 (LPM_ACTIVE_STATE).
*/
- gxp_lpm_set_state(gxp, CORE_TO_PSM(core), LPM_ACTIVE_STATE,
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(core),
+ LPM_ACTIVE_STATE,
/*verbose=*/false);
}
}
@@ -657,8 +727,7 @@ uint gxp_vd_phys_core_list(struct gxp_virtual_device *vd)
return core_list;
}
-int gxp_vd_mapping_store(struct gxp_virtual_device *vd,
- struct gxp_mapping *map)
+int gxp_vd_mapping_store(struct gxp_virtual_device *vd, struct gxp_mapping *map)
{
struct rb_node **link;
struct rb_node *parent = NULL;
@@ -824,3 +893,11 @@ void gxp_vd_release_credit(struct gxp_virtual_device *vd)
vd->credit++;
spin_unlock_irqrestore(&vd->credit_lock, flags);
}
+
+void gxp_vd_put(struct gxp_virtual_device *vd)
+{
+ if (!vd)
+ return;
+ if (refcount_dec_and_test(&vd->refcount))
+ kfree(vd);
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index 704e40f..d9c28c9 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -11,6 +11,7 @@
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/rwsem.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
@@ -48,6 +49,12 @@ enum gxp_virtual_device_state {
* Note: this state will only be set on suspend/resume failure.
*/
GXP_VD_UNAVAILABLE,
+ /*
+ * gxp_vd_release() has been called. VD with this state means it's
+ * waiting for the last reference to be put(). All fields in VD is
+ * invalid in this state.
+ */
+ GXP_VD_RELEASED,
};
struct gxp_virtual_device {
@@ -106,14 +113,12 @@ struct gxp_virtual_device {
/* Whether it's the first time allocating a VMBox for this VD. */
bool first_open;
bool is_secure;
+ refcount_t refcount;
+ /* A constant ID assigned after VD is allocated. For debug only. */
+ int vdid;
};
/*
- * TODO(b/193180931) cleanup the relationship between the internal GXP modules.
- * For example, whether or not gxp_vd owns the gxp_fw module, and if so, if
- * other modules are expected to access the gxp_fw directly or only via gxp_vd.
- */
-/*
* Initializes the device management subsystem and allocates resources for it.
* This is expected to be called once per driver lifecycle.
*/
@@ -145,12 +150,15 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
u16 requested_cores);
/**
- * gxp_vd_release() - Cleanup and free a struct gxp_virtual_device
+ * gxp_vd_release() - Cleanup a struct gxp_virtual_device
* @vd: The virtual device to be released
*
* The caller must have locked gxp->vd_semaphore for writing.
*
* A virtual device must be stopped before it can be released.
+ *
+ * If @vd's reference count is 1 before this call, this function frees @vd.
+ * Otherwise @vd's state is set to GXP_VD_RELEASED.
*/
void gxp_vd_release(struct gxp_virtual_device *vd);
@@ -330,4 +338,19 @@ bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd);
*/
void gxp_vd_release_credit(struct gxp_virtual_device *vd);
+/* Increases reference count of @vd by one and returns @vd. */
+static inline struct gxp_virtual_device *
+gxp_vd_get(struct gxp_virtual_device *vd)
+{
+ WARN_ON_ONCE(!refcount_inc_not_zero(&vd->refcount));
+ return vd;
+}
+
+/*
+ * Decreases reference count of @vd by one.
+ *
+ * If @vd->refcount becomes 0, @vd will be freed.
+ */
+void gxp_vd_put(struct gxp_virtual_device *vd);
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp.h b/gxp.h
index 0048584..707785d 100644
--- a/gxp.h
+++ b/gxp.h
@@ -13,7 +13,7 @@
/* Interface Version */
#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 6
+#define GXP_INTERFACE_VERSION_MINOR 7
#define GXP_INTERFACE_VERSION_BUILD 0
/*
@@ -932,4 +932,69 @@ struct gxp_mailbox_uci_response_ioctl {
#define GXP_MAILBOX_UCI_RESPONSE \
_IOR(GXP_IOCTL_BASE, 31, struct gxp_mailbox_uci_response_ioctl)
+/*
+ * struct gxp_create_sync_fence_data
+ * @seqno: the seqno to initialize the fence with
+ * @timeline_name: the name of the timeline the fence belongs to
+ * @fence: returns the fd of the new sync_file with the new fence
+ *
+ * Timeline names can be up to 128 characters (including trailing NUL byte)
+ * for gxp debugfs and kernel debug logs. These names are truncated to 32
+ * characters in the data returned by the standard SYNC_IOC_FILE_INFO
+ * ioctl.
+ */
+#define GXP_SYNC_TIMELINE_NAME_LEN 128
+struct gxp_create_sync_fence_data {
+ __u32 seqno;
+ char timeline_name[GXP_SYNC_TIMELINE_NAME_LEN];
+ __s32 fence;
+};
+
+/*
+ * Create a DMA sync fence, return the sync_file fd for the new fence.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_CREATE_SYNC_FENCE \
+ _IOWR(GXP_IOCTL_BASE, 32, struct gxp_create_sync_fence_data)
+
+/*
+ * struct gxp_signal_sync_fence_data
+ * @fence: fd of the sync_file for the fence
+ * @error: error status errno value or zero for success
+ */
+struct gxp_signal_sync_fence_data {
+ __s32 fence;
+ __s32 error;
+};
+
+/*
+ * Signal a DMA sync fence with optional error status.
+ * Can pass a sync_file fd created by any driver.
+ * Signals the first DMA sync fence in the sync file.
+ */
+#define GXP_SIGNAL_SYNC_FENCE \
+ _IOW(GXP_IOCTL_BASE, 33, struct gxp_signal_sync_fence_data)
+
+/*
+ * struct gxp_sync_fence_status
+ * @fence: fd of the sync_file for the fence
+ * @status: returns:
+ * 0 if active
+ * 1 if signaled with no error
+ * negative errno value if signaled with error
+ */
+struct gxp_sync_fence_status {
+ __s32 fence;
+ __s32 status;
+};
+
+/*
+ * Retrieve DMA sync fence status.
+ * Can pass a sync_file fd created by any driver.
+ * Returns the status of the first DMA sync fence in the sync file.
+ */
+#define GXP_SYNC_FENCE_STATUS \
+ _IOWR(GXP_IOCTL_BASE, 34, struct gxp_sync_fence_status)
+
#endif /* __GXP_H__ */