summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Peng <robinpeng@google.com>2022-10-22 13:43:22 +0000
committerRobin Peng <robinpeng@google.com>2022-10-22 13:43:22 +0000
commitb2c3ba8f0c9f660535adcf036755c5f8c997b20f (patch)
treebac1761e7c851ce376acc314e9d43337df9b3890
parentad2c605080db811f11b776b62cf436a095e78689 (diff)
parent917cacdabbd90bf94ebcdca5f6bf51d3b316f3a7 (diff)
downloadzuma-b2c3ba8f0c9f660535adcf036755c5f8c997b20f.tar.gz
Merge android13-gs-pixel-5.15 into android14-gs-pixel-5.15
Bug: 236259002 Signed-off-by: Robin Peng <robinpeng@google.com> Change-Id: I9b811be2d61ea51a0d94603542ea660509ca3575
-rw-r--r--Makefile12
-rw-r--r--callisto-platform.c260
-rw-r--r--callisto/config.h3
-rw-r--r--callisto/csrs.h4
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile3
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-kci.c7
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c19
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-telemetry.c267
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h2
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h8
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-telemetry.h123
-rw-r--r--gxp-client.c2
-rw-r--r--gxp-client.h9
-rw-r--r--gxp-common-platform.c299
-rw-r--r--gxp-config.h4
-rw-r--r--gxp-core-telemetry.c (renamed from gxp-telemetry.c)269
-rw-r--r--gxp-core-telemetry.h (renamed from gxp-telemetry.h)75
-rw-r--r--gxp-dci.c165
-rw-r--r--gxp-debug-dump.c14
-rw-r--r--gxp-debug-dump.h9
-rw-r--r--gxp-debugfs.c47
-rw-r--r--gxp-dma-iommu.c179
-rw-r--r--gxp-dma.h140
-rw-r--r--gxp-dmabuf.c2
-rw-r--r--gxp-dmabuf.h2
-rw-r--r--gxp-domain-pool.c57
-rw-r--r--gxp-domain-pool.h6
-rw-r--r--gxp-firmware-data.c73
-rw-r--r--gxp-firmware-data.h36
-rw-r--r--gxp-firmware.c39
-rw-r--r--gxp-host-device-structs.h34
-rw-r--r--gxp-internal.h46
-rw-r--r--gxp-kci.c231
-rw-r--r--gxp-kci.h89
-rw-r--r--gxp-lpm.h3
-rw-r--r--gxp-mailbox-driver.c5
-rw-r--r--gxp-mailbox-driver.h6
-rw-r--r--gxp-mailbox.c1030
-rw-r--r--gxp-mailbox.h241
-rw-r--r--gxp-mapping.c2
-rw-r--r--gxp-mapping.h4
-rw-r--r--gxp-mcu-firmware.c21
-rw-r--r--gxp-mcu-telemetry.c241
-rw-r--r--gxp-mcu-telemetry.h69
-rw-r--r--gxp-mcu.c21
-rw-r--r--gxp-mcu.h2
-rw-r--r--gxp-notification.h2
-rw-r--r--gxp-pm.c36
-rw-r--r--gxp-uci.c158
-rw-r--r--gxp-uci.h21
-rw-r--r--gxp-vd.c78
-rw-r--r--gxp-vd.h17
-rw-r--r--gxp-wakelock.h2
-rw-r--r--gxp.h977
54 files changed, 2821 insertions, 2650 deletions
diff --git a/Makefile b/Makefile
index 4e02b67..acbf0ed 100644
--- a/Makefile
+++ b/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_$(GXP_CHIP)) += gxp.o
gxp-objs += \
gxp-bpm.o \
gxp-client.o \
+ gxp-core-telemetry.o \
gxp-debug-dump.o \
gxp-debugfs.o \
gxp-dma-iommu.o \
@@ -30,19 +31,12 @@ gxp-objs += \
gxp-pm.o \
gxp-range-alloc.o \
gxp-ssmt.o \
- gxp-telemetry.o \
gxp-thermal.o \
gxp-vd.o \
gxp-wakelock.o
-ifeq ($(GXP_CHIP),AMALTHEA)
-gxp-objs += \
- gxp-platform.o
-
-EDGETPU_CHIP := janeiro
-
-else ifeq ($(GXP_CHIP),CALLISTO)
+ifeq ($(GXP_CHIP),CALLISTO)
USE_GCIP := TRUE
@@ -51,12 +45,14 @@ gxp-objs += \
gxp-dci.o \
gxp-kci.o \
gxp-mcu-firmware.o \
+ gxp-mcu-telemetry.o \
gxp-mcu.o \
gxp-uci.o \
gxp-usage-stats.o
EDGETPU_CHIP := rio
+
endif
ifeq ($(CONFIG_$(GXP_CHIP)),m)
diff --git a/callisto-platform.c b/callisto-platform.c
index 8a067ba..65b40ca 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -10,10 +10,13 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <gcip/gcip-telemetry.h>
+
#include "callisto-platform.h"
#include "gxp-common-platform.c"
#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
#include "gxp-uci.h"
#include "gxp-usage-stats.h"
@@ -25,12 +28,8 @@ static char *callisto_work_mode_name = "direct";
module_param_named(work_mode, callisto_work_mode_name, charp, 0660);
-/*
- * TODO(b/245238253):
- * Set default to false once we have most folks move to use B0 samples.
- */
-static bool zuma_a0 = true;
-module_param_named(a0, zuma_a0, bool, 0660);
+static char *zuma_revision = "a0";
+module_param_named(chip_rev, zuma_revision, charp, 0660);
static int callisto_platform_parse_dt(struct platform_device *pdev,
struct gxp_dev *gxp)
@@ -117,15 +116,31 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
cmd.core_command_params.memory_operating_point = ibuf->memory_power_state;
/* cmd.seq is assigned by mailbox implementation */
cmd.type = CORE_COMMAND;
- cmd.priority = 0; /* currently unused */
- cmd.client_id = iommu_aux_get_pasid(client->vd->domain, gxp->dev);
+ /* TODO(b/248179414): Remove core assignment when MCU fw re-enable sticky core scheduler. */
+ down_read(&gxp->vd_semaphore);
+ cmd.priority = gxp_vd_virt_core_to_phys_core(client->vd, ibuf->virtual_core_id);
+ up_read(&gxp->vd_semaphore);
+ if (cmd.priority < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ ibuf->virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd.client_id = client->vd->client_id;
+
+ /*
+ * TODO(b/248196344): Use the only one permitted eventfd for the virtual device
+ * when MCU fw re-enable sticky core scheduler.
+ */
ret = gxp_uci_send_command(
&callisto->mcu.uci, &cmd,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
- client->mb_eventfds[UCI_RESOURCE_ID]);
+ client->mb_eventfds[ibuf->virtual_core_id]);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
ret);
@@ -157,38 +172,6 @@ static int gxp_ioctl_uci_command(struct gxp_client *client,
return 0;
}
-static int gxp_ioctl_uci_command_compat(
- struct gxp_client *client,
- struct gxp_mailbox_command_compat_ioctl __user *argp)
-{
- struct gxp_mailbox_command_compat_ioctl ibuf;
- struct gxp_mailbox_command_ioctl mailbox_command_buf;
- int ret;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- mailbox_command_buf.num_cores = ibuf.num_cores;
- mailbox_command_buf.sequence_number = ibuf.sequence_number;
- mailbox_command_buf.device_address = ibuf.device_address;
- mailbox_command_buf.size = ibuf.size;
- mailbox_command_buf.flags = ibuf.flags;
- mailbox_command_buf.gxp_power_state = GXP_POWER_STATE_OFF;
- mailbox_command_buf.memory_power_state = MEMORY_POWER_STATE_UNDEFINED;
- mailbox_command_buf.power_flags = 0;
-
- ret = gxp_ioctl_uci_command_helper(client, &mailbox_command_buf);
- if (ret)
- return ret;
-
- ibuf.sequence_number = mailbox_command_buf.sequence_number;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
static int
gxp_ioctl_uci_response(struct gxp_client *client,
struct gxp_mailbox_response_ioctl __user *argp)
@@ -229,6 +212,42 @@ out:
return ret;
}
+static inline enum gcip_telemetry_type to_gcip_telemetry_type(u8 type)
+{
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ return GCIP_TELEMETRY_LOG;
+ else
+ return GCIP_TELEMETRY_TRACE;
+}
+
+static int gxp_register_mcu_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_mcu_telemetry_register_eventfd(
+ mcu, to_gcip_telemetry_type(ibuf.type), ibuf.eventfd);
+}
+
+static int gxp_unregister_mcu_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_mcu_telemetry_unregister_eventfd(
+ mcu, to_gcip_telemetry_type(ibuf.type));
+}
+
static long callisto_platform_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -241,12 +260,15 @@ static long callisto_platform_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_MAILBOX_COMMAND:
ret = gxp_ioctl_uci_command(client, argp);
break;
- case GXP_MAILBOX_COMMAND_COMPAT:
- ret = gxp_ioctl_uci_command_compat(client, argp);
- break;
case GXP_MAILBOX_RESPONSE:
ret = gxp_ioctl_uci_response(client, argp);
break;
+ case GXP_REGISTER_MCU_TELEMETRY_EVENTFD:
+ ret = gxp_register_mcu_telemetry_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_mcu_telemetry_eventfd(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -254,6 +276,31 @@ static long callisto_platform_ioctl(struct file *file, uint cmd, ulong arg)
return ret;
}
+static int callisto_platform_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gxp_client *client = file->private_data;
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ int ret;
+
+ if (gxp_is_direct_mode(client->gxp))
+ return -EOPNOTSUPP;
+
+ switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_MCU_LOG_BUFFER_OFFSET:
+ ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_LOG,
+ vma);
+ break;
+ case GXP_MMAP_MCU_TRACE_BUFFER_OFFSET:
+ ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_TRACE,
+ vma);
+ break;
+ default:
+ ret = -EOPNOTSUPP; /* unknown offset */
+ }
+
+ return ret;
+}
+
static int callisto_request_power_states(struct gxp_client *client,
struct gxp_power_states power_states)
{
@@ -270,7 +317,7 @@ static int callisto_request_power_states(struct gxp_client *client,
cmd.wakelock_command_params.memory_operating_point = power_states.memory;
cmd.type = WAKELOCK_COMMAND;
cmd.priority = 0; /* currently unused */
- cmd.client_id = iommu_aux_get_pasid(client->vd->domain, gxp->dev);
+ cmd.client_id = client->vd->client_id;
ret = gxp_uci_send_command(
&callisto->mcu.uci, &cmd,
@@ -286,29 +333,37 @@ static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp,
{
struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
int pasid, ret;
+ u8 operation = KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX;
if (gxp_is_direct_mode(gxp))
return 0;
- pasid = iommu_aux_get_pasid(vd->domain, gxp->dev);
- ret = gxp_kci_allocate_vmbox(kci, vd->num_cores, pasid,
- vd->slice_index);
+ if (vd->tpu_client_id >= 0)
+ operation |= KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX;
+
+ pasid = gxp_iommu_aux_get_pasid(gxp, vd->domain);
+ ret = gxp_kci_allocate_vmbox(kci, pasid, vd->num_cores, vd->slice_index,
+ vd->tpu_client_id, operation);
if (ret) {
+ if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
+ dev_err(gxp->dev,
+ "Failed to allocate VMBox for client %d, TPU client %d: %d",
+ pasid, vd->tpu_client_id, ret);
+ return ret;
+ }
+
/*
* TODO(241057541): Remove this conditional branch after the firmware side
* implements handling allocate_vmbox command.
*/
- if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) {
- dev_info(
- gxp->dev,
- "Allocating vmbox is not implemented from the firmware side");
- return 0;
- }
- dev_err(gxp->dev, "Failed to allocate virtual mailbox: ret=%d",
- ret);
+ dev_info(
+ gxp->dev,
+ "Allocating VMBox is not implemented from the firmware side");
}
- return ret;
+ vd->client_id = pasid;
+
+ return 0;
}
static void
@@ -316,27 +371,31 @@ callisto_platform_before_vd_block_unready(struct gxp_dev *gxp,
struct gxp_virtual_device *vd)
{
struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
- int pasid, ret;
+ int ret;
if (gxp_is_direct_mode(gxp))
return;
- pasid = iommu_aux_get_pasid(vd->domain, gxp->dev);
- ret = gxp_kci_release_vmbox(kci, pasid);
+ if (vd->client_id < 0)
+ return;
+
+ ret = gxp_kci_release_vmbox(kci, vd->client_id);
if (ret) {
/*
* TODO(241057541): Remove this conditional branch after the firmware side
* implements handling allocate_vmbox command.
*/
- if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED) {
+ if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED)
dev_info(
gxp->dev,
- "Releasing vmbox is not implemented from the firmware side");
- return;
- }
- dev_err(gxp->dev, "Failed to release virtual mailbox: ret=%d",
- ret);
+ "Releasing VMBox is not implemented from the firmware side");
+ else
+ dev_err(gxp->dev,
+ "Failed to release VMBox for client %d: %d",
+ vd->client_id, ret);
}
+
+ vd->client_id = -1;
}
static int callisto_wakelock_after_blk_on(struct gxp_dev *gxp)
@@ -357,6 +416,62 @@ static void callisto_wakelock_before_blk_off(struct gxp_dev *gxp)
gxp_mcu_firmware_stop(mcu_fw);
}
+static int callisto_after_map_tpu_mbx_queue(struct gxp_dev *gxp,
+ struct gxp_client *client)
+{
+ struct gxp_kci *kci = &(to_callisto_dev(gxp)->mcu.kci);
+ int tpu_client_id = -1, ret;
+
+ /*
+ * TODO(b/247923533): Get a client ID from the TPU kernel driver and remove this workaround
+ * condition.
+ */
+ if (tpu_client_id < 0)
+ return 0;
+
+ if (client->vd->client_id >= 0) {
+ ret = gxp_kci_allocate_vmbox(
+ kci, client->vd->client_id, 0, 0, tpu_client_id,
+ KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX);
+ if (ret) {
+ if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
+ dev_err(gxp->dev,
+ "Failed to link TPU VMbox client %d, TPU client %d: %d",
+ client->vd->client_id, tpu_client_id,
+ ret);
+ return ret;
+ }
+
+ /*
+ * TODO(241057541): Remove this conditional branch after the firmware side
+ * implements handling allocate_vmbox command.
+ */
+ dev_info(
+ gxp->dev,
+ "Linking TPU VNMBox is not implemented from the firmware side");
+ }
+ }
+
+ client->vd->tpu_client_id = tpu_client_id;
+
+ return 0;
+}
+
+static void callisto_before_unmap_tpu_mbx_queue(struct gxp_dev *gxp,
+ struct gxp_client *client)
+{
+ /*
+ * We don't have to care about the case that the client releases a TPU vmbox which is
+ * linked to the DSP client without notifying the DSP MCU firmware because the client will
+ * always release the DSP vmbox earlier than the TPU vmbox. (i.e, the `release_vmbox` KCI
+ * command will be always sent to the DSP MCU firmware to release the DSP vmbox before
+ * releasing the TPU vmbox and the firmware will stop TPU offloading.) Also, from Callisto,
+ * we don't have to care about mapping/unmapping the TPU mailbox buffer here neither.
+ * Therefore, just unset the TPU client ID here.
+ */
+ client->vd->tpu_client_id = -1;
+}
+
static int gxp_platform_probe(struct platform_device *pdev)
{
struct callisto_dev *callisto =
@@ -371,6 +486,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
callisto->gxp.after_probe = callisto_platform_after_probe;
callisto->gxp.before_remove = callisto_platform_before_remove;
callisto->gxp.handle_ioctl = callisto_platform_ioctl;
+ callisto->gxp.handle_mmap = callisto_platform_mmap;
callisto->gxp.after_vd_block_ready =
callisto_platform_after_vd_block_ready;
callisto->gxp.before_vd_block_unready =
@@ -379,6 +495,10 @@ static int gxp_platform_probe(struct platform_device *pdev)
callisto->gxp.wakelock_after_blk_on = callisto_wakelock_after_blk_on;
callisto->gxp.wakelock_before_blk_off =
callisto_wakelock_before_blk_off;
+ callisto->gxp.after_map_tpu_mbx_queue =
+ callisto_after_map_tpu_mbx_queue;
+ callisto->gxp.before_unmap_tpu_mbx_queue =
+ callisto_before_unmap_tpu_mbx_queue;
return gxp_common_platform_probe(pdev, &callisto->gxp);
}
@@ -437,9 +557,13 @@ bool gxp_is_direct_mode(struct gxp_dev *gxp)
return callisto->mode == DIRECT;
}
-bool gxp_is_a0(struct gxp_dev *gxp)
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
{
- return zuma_a0;
+ if (!strcmp(zuma_revision, "a0"))
+ return GXP_CHIP_A0;
+ if (!strcmp(zuma_revision, "b0"))
+ return GXP_CHIP_B0;
+ return GXP_CHIP_ANY;
}
enum callisto_work_mode callisto_dev_parse_work_mode(const char *work_mode)
diff --git a/callisto/config.h b/callisto/config.h
index b1d986d..cd47e1d 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -39,9 +39,6 @@
*/
#define GXP_IS_DMA_COHERENT
-/* Uses DCI from the direct mode */
-#define GXP_HAS_DCI
-
#include "context.h"
#include "csrs.h"
#include "iova.h"
diff --git a/callisto/csrs.h b/callisto/csrs.h
index fba6428..14659a8 100644
--- a/callisto/csrs.h
+++ b/callisto/csrs.h
@@ -82,6 +82,10 @@ enum gxp_csrs {
#define GXP_CORE_REG_ALT_RESET_VECTOR(core) GXP_CORE_REG(core, 0x4008)
#define GXP_CORE_REG_COMMON_INT_MASK_0(core) \
GXP_CORE_MCU_REG(core, 0x4010, 0x4020)
+#define GXP_CORE_REG_COMMON_INT_MASK_1(core) \
+ GXP_CORE_MCU_REG(core, 0x4014, 0x4024)
+#define GXP_CORE_REG_DEDICATED_INT_MASK(core) \
+ GXP_CORE_MCU_REG(core, 0x401c, 0x4028)
#define GXP_CORE_REG_ETM_PWRCTL(core) GXP_CORE_REG(core, 0xB020)
#define SYNC_BARRIER_SHADOW_OFFSET 0x800
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
index 1b988e4..7f6d2f0 100644
--- a/gcip-kernel-driver/drivers/gcip/Makefile
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -6,7 +6,8 @@
CONFIG_GCIP ?= m
obj-$(CONFIG_GCIP) += gcip.o
-gcip-objs := gcip-firmware.o gcip-image-config.o gcip-kci.o gcip-mailbox.o gcip-mem-pool.o
+gcip-objs := gcip-firmware.o gcip-image-config.o gcip-kci.o gcip-mailbox.o \
+ gcip-mem-pool.o gcip-telemetry.o
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
index 215c533..e11d2a1 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-kci.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -245,8 +245,9 @@ static bool gcip_kci_before_handle_resp(struct gcip_mailbox *mailbox, const void
int ret = gcip_reverse_kci_add_resp(kci, elem);
if (ret)
- dev_warn(kci->dev, "Failed to handle reverse KCI code %u (%d)\n",
- elem->code, ret);
+ dev_warn_ratelimited(kci->dev,
+ "Failed to handle reverse KCI code %u (%d)\n",
+ elem->code, ret);
return false;
}
@@ -448,7 +449,7 @@ static inline void gcip_kci_set_data(struct gcip_kci *kci, void *data)
kci->data = data;
}
-int gcip_kci_init(struct gcip_kci *kci, struct gcip_kci_args *args)
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args)
{
int ret;
struct gcip_mailbox_args mailbox_args;
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index ec19ef8..305407d 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -186,10 +186,11 @@ out:
* 2. #seq == @resp->seq:
* - Copy @resp, pop the head and we're done.
* 3. #seq < @resp->seq:
- * - Should not happen, this implies the sequence number of either entries in wait_list or
- * responses are out-of-order, or remote didn't respond to a command. In this case, the status
- * of response will be set to GCIP_MAILBOX_STATUS_NO_RESPONSE if the command is sync request.
- * - Pop until case 1. or 2.
+ * - If @mailbox->ignore_seq_order is specified, this is a normal case and the entry is skipped.
+ * - Otherwise, it *should* not happen, this implies the sequence number of either entries in
+ * wait_list or responses are out-of-order, or remote didn't respond to a command. In this
+ * case, the status of response will be set to GCIP_MAILBOX_STATUS_NO_RESPONSE. Then pop until
+ * case 1. or 2.
*/
static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
{
@@ -235,10 +236,11 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
kfree(cur);
break;
}
- /* cur_seq < seq */
- SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
- list_del(&cur->list);
- kfree(cur);
+ if (!mailbox->ignore_seq_order && cur_seq < seq) {
+ SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
+ list_del(&cur->list);
+ kfree(cur);
+ }
}
RELEASE_WAIT_LIST_LOCK(true, flags);
@@ -485,6 +487,7 @@ int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_ar
mailbox->resp_elem_size = args->resp_elem_size;
mailbox->timeout = args->timeout;
mailbox->cur_seq = 0;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
gcip_mailbox_set_data(mailbox, args->data);
ret = gcip_mailbox_set_ops(mailbox, args->ops);
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
new file mode 100644
index 0000000..f557c24
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/eventfd.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-telemetry.h>
+
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args)
+{
+ int err;
+
+ dev_dbg(tel->dev, "Sending KCI %s", tel->name);
+ err = send_kci(args);
+
+ if (err < 0) {
+ dev_err(tel->dev, "KCI %s failed - %d", tel->name, err);
+ return err;
+ }
+
+ if (err > 0) {
+ dev_err(tel->dev, "KCI %s returned %d", tel->name, err);
+ return -EBADMSG;
+ }
+
+ dev_dbg(tel->dev, "KCI %s Succeeded", tel->name);
+
+ return 0;
+}
+
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd)
+{
+ struct eventfd_ctx *ctx;
+ ulong flags;
+
+ ctx = eventfd_ctx_fdget(eventfd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = ctx;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+
+ return 0;
+}
+
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+}
+
+/* Copy data out of the log buffer with wrapping. */
+static void copy_with_wrap(struct gcip_telemetry_header *header, void *dest, u32 length, u32 size,
+ void *start)
+{
+ const u32 wrap_bit = size + sizeof(*header);
+ u32 remaining = 0;
+ u32 head = header->head & (wrap_bit - 1);
+
+ if (head + length < size) {
+ memcpy(dest, start + head, length);
+ header->head += length;
+ } else {
+ remaining = size - head;
+ memcpy(dest, start + head, remaining);
+ memcpy(dest + remaining, start, length - remaining);
+ header->head = (header->head & wrap_bit) ^ wrap_bit;
+ header->head |= length - remaining;
+ }
+}
+
+void gcip_telemetry_fw_log(struct gcip_telemetry *log)
+{
+ struct device *dev = log->dev;
+ struct gcip_telemetry_header *header = log->header;
+ struct gcip_log_entry_header entry;
+ u8 *start;
+ const size_t queue_size = header->size - sizeof(*header);
+ const size_t max_length = queue_size - sizeof(entry);
+ char *buffer = kmalloc(max_length + 1, GFP_ATOMIC);
+
+ if (!buffer) {
+ header->head = header->tail;
+ return;
+ }
+ start = (u8 *)header + sizeof(*header);
+
+ while (header->head != header->tail) {
+ copy_with_wrap(header, &entry, sizeof(entry), queue_size, start);
+ if (entry.length == 0 || entry.length > max_length) {
+ header->head = header->tail;
+ dev_err(dev, "log queue is corrupted");
+ break;
+ }
+ copy_with_wrap(header, buffer, entry.length, queue_size, start);
+ buffer[entry.length] = 0;
+
+ if (entry.code > GCIP_FW_DMESG_LOG_LEVEL)
+ continue;
+
+ switch (entry.code) {
+ case GCIP_FW_LOG_LEVEL_VERBOSE:
+ case GCIP_FW_LOG_LEVEL_DEBUG:
+ dev_dbg(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_WARN:
+ dev_warn(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_ERROR:
+ dev_err(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_INFO:
+ default:
+ dev_info(dev, "%s", buffer);
+ break;
+ }
+ }
+ kfree(buffer);
+}
+
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace)
+{
+ struct gcip_telemetry_header *header = trace->header;
+
+ header->head = header->tail;
+}
+
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel)
+{
+ spin_lock(&tel->state_lock);
+
+ if (tel->state == GCIP_TELEMETRY_ENABLED && tel->header->head != tel->header->tail)
+ schedule_work(&tel->work);
+
+ spin_unlock(&tel->state_lock);
+}
+
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif)
+{
+ mutex_lock(&tel->mmap_lock);
+ tel->mmapped_count += dif;
+ mutex_unlock(&tel->mmap_lock);
+}
+
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args)
+{
+ int ret;
+
+ mutex_lock(&tel->mmap_lock);
+
+ if (!tel->mmapped_count) {
+ ret = mmap(args);
+
+ if (!ret)
+ tel->mmapped_count = 1;
+ } else {
+ ret = -EBUSY;
+ dev_warn(tel->dev, "%s is already mmapped %ld times", tel->name,
+ tel->mmapped_count);
+ }
+
+ mutex_unlock(&tel->mmap_lock);
+
+ return ret;
+}
+
+/* Worker for processing log/trace buffers. */
+static void gcip_telemetry_worker(struct work_struct *work)
+{
+ struct gcip_telemetry *tel = container_of(work, struct gcip_telemetry, work);
+ u32 prev_head;
+ ulong flags;
+
+ /*
+ * Loops while telemetry enabled, there is data to be consumed, and the previous iteration
+ * made progress. If another IRQ arrives just after the last head != tail check we should
+ * get another worker schedule.
+ */
+ do {
+ spin_lock_irqsave(&tel->state_lock, flags);
+ if (tel->state != GCIP_TELEMETRY_ENABLED) {
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ return;
+ }
+
+ prev_head = tel->header->head;
+ if (tel->header->head != tel->header->tail) {
+ read_lock(&tel->ctx_lock);
+ if (tel->ctx)
+ eventfd_signal(tel->ctx, 1);
+ else
+ tel->fallback_fn(tel);
+ read_unlock(&tel->ctx_lock);
+ }
+
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ msleep(GCIP_TELEMETRY_LOG_RECHECK_DELAY);
+ } while (tel->header->head != tel->header->tail && tel->header->head != prev_head);
+}
+
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *))
+{
+ if (!is_power_of_2(size) || size <= sizeof(struct gcip_telemetry_header)) {
+ dev_err(dev,
+ "Size of GCIP telemetry buffer must be a power of 2 and greater than %zu.",
+ sizeof(struct gcip_telemetry_header));
+ return -EINVAL;
+ }
+
+ rwlock_init(&tel->ctx_lock);
+ tel->name = name;
+ tel->dev = dev;
+
+ tel->header = vaddr;
+ tel->header->head = 0;
+ tel->header->tail = 0;
+ tel->header->size = size;
+ tel->header->entries_dropped = 0;
+
+ tel->ctx = NULL;
+
+ spin_lock_init(&tel->state_lock);
+ INIT_WORK(&tel->work, gcip_telemetry_worker);
+ tel->fallback_fn = fallback_fn;
+ tel->state = GCIP_TELEMETRY_ENABLED;
+ mutex_init(&tel->mmap_lock);
+ tel->mmapped_count = 0;
+
+ return 0;
+}
+
+void gcip_telemetry_exit(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&tel->state_lock, flags);
+ /* Prevents racing with the IRQ handler or worker. */
+ tel->state = GCIP_TELEMETRY_INVALID;
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ cancel_work_sync(&tel->work);
+
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+}
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
index c4b78be..74e44ae 100644
--- a/gcip-kernel-driver/include/gcip/gcip-kci.h
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -281,7 +281,7 @@ struct gcip_kci_args {
};
/* Initializes a KCI object. */
-int gcip_kci_init(struct gcip_kci *kci, struct gcip_kci_args *args);
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);
/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
void gcip_kci_cancel_work_queues(struct gcip_kci *kci);
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 71a1e27..02af65e 100644
--- a/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -346,6 +346,12 @@ struct gcip_mailbox {
const struct gcip_mailbox_ops *ops;
/* User-defined data. */
void *data;
+
+ /*
+ * The flag to specify sequence numbers of command responses are not
+ * required to be in order.
+ */
+ bool ignore_seq_order;
};
/* Arguments for gcip_mailbox_init. See struct gcip_mailbox for details. */
@@ -362,6 +368,8 @@ struct gcip_mailbox_args {
u32 timeout;
const struct gcip_mailbox_ops *ops;
void *data;
+
+ bool ignore_seq_order;
};
/* Initializes a mailbox object. */
diff --git a/gcip-kernel-driver/include/gcip/gcip-telemetry.h b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
new file mode 100644
index 0000000..4556291
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_TELEMETRY_H__
+#define __GCIP_TELEMETRY_H__
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/mutex.h>
+#include <linux/rwlock_types.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Log level codes used by gcip firmware. */
+#define GCIP_FW_LOG_LEVEL_VERBOSE (2)
+#define GCIP_FW_LOG_LEVEL_DEBUG (1)
+#define GCIP_FW_LOG_LEVEL_INFO (0)
+#define GCIP_FW_LOG_LEVEL_WARN (-1)
+#define GCIP_FW_LOG_LEVEL_ERROR (-2)
+
+#define GCIP_FW_DMESG_LOG_LEVEL (GCIP_FW_LOG_LEVEL_WARN)
+
+/* When log data arrives, recheck for more log data after this delay. */
+#define GCIP_TELEMETRY_LOG_RECHECK_DELAY 200 /* ms */
+
+enum gcip_telemetry_state {
+ GCIP_TELEMETRY_DISABLED = 0,
+ GCIP_TELEMETRY_ENABLED = 1,
+ GCIP_TELEMETRY_INVALID = -1,
+};
+
+/* To specify the target of operation. */
+enum gcip_telemetry_type {
+ GCIP_TELEMETRY_LOG = 0,
+ GCIP_TELEMETRY_TRACE = 1,
+};
+
+struct gcip_telemetry_header {
+ u32 head;
+ u32 size;
+ u32 reserved0[14]; /* Place head and tail into different cache lines */
+ u32 tail;
+ u32 entries_dropped; /* Number of entries dropped due to buffer full */
+ u32 reserved1[14]; /* Pad to 128 bytes in total */
+};
+
+struct gcip_log_entry_header {
+ s16 code;
+ u16 length;
+ u64 timestamp;
+ u16 crc16;
+} __packed;
+
+struct gcip_telemetry {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+
+ /*
+ * State transitioning is to prevent racing in IRQ handlers. e.g. the interrupt comes when
+ * the kernel is releasing buffers.
+ */
+ enum gcip_telemetry_state state;
+ spinlock_t state_lock; /* protects state */
+
+ struct gcip_telemetry_header *header;
+
+ struct eventfd_ctx *ctx; /* signal this to notify the runtime */
+ rwlock_t ctx_lock; /* protects ctx */
+ const char *name; /* for debugging */
+
+ struct work_struct work; /* worker for handling data */
+ /* Fallback function to call for default log/trace handling. */
+ void (*fallback_fn)(struct gcip_telemetry *tel);
+ struct mutex mmap_lock; /* protects mmapped_count */
+ long mmapped_count; /* number of VMAs that are mapped to this telemetry buffer */
+};
+
+struct gcip_kci;
+
+struct gcip_telemetry_kci_args {
+ struct gcip_kci *kci;
+ u64 addr;
+ u32 size;
+};
+
+/* Sends telemetry KCI through send kci callback and args. */
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args);
+/* Sets the eventfd for telemetry. */
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd);
+/* Unsets the eventfd for telemetry. */
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel);
+/* Fallback to log messages from host CPU to dmesg. */
+void gcip_telemetry_fw_log(struct gcip_telemetry *log);
+/* Fallback to consumes the trace buffer. */
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace);
+/* Interrupt handler to schedule the worker when the buffer is not empty. */
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel);
+/* Increases the telemetry mmap count. */
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif);
+/* Mmaps the telemetry buffer through mmap callback and args. */
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args);
+/*
+ * Initializes struct gcip_telemetry.
+ *
+ * @vaddr: Virtual address of the queue buffer.
+ * @size: Size of the queue buffer. Must be power of 2 and greater than the size of struct
+ * gcip_telemetry_header.
+ * @fallback_fn: Fallback function to call for default log/trace handling.
+ */
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *));
+/* Exits and sets the telemetry state to GCIP_TELEMETRY_INVALID. */
+void gcip_telemetry_exit(struct gcip_telemetry *tel);
+
+#endif /* __GCIP_TELEMETRY_H__ */
diff --git a/gxp-client.c b/gxp-client.c
index 6fe02cb..148435b 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -103,7 +103,7 @@ int gxp_client_allocate_virtual_device(struct gxp_client *client,
up_write(&gxp->vd_semaphore);
if (client->has_block_wakelock) {
- ret = gxp_vd_block_ready(client->vd);
+ ret = gxp_vd_block_ready(vd);
if (ret) {
gxp_vd_release(vd);
goto error;
diff --git a/gxp-client.h b/gxp-client.h
index 219ce4f..c47de42 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -46,11 +46,12 @@ struct gxp_client {
pid_t pid;
/*
- * Indicates whether the driver needs to disable telemetry when this
- * client closes. For when the client fails to disable telemetry itself.
+ * Indicates whether the driver needs to disable core telemetry when
+ * this client closes. For when the client fails to disable core
+ * telemetry itself.
*/
- bool enabled_telemetry_logging;
- bool enabled_telemetry_tracing;
+ bool enabled_core_telemetry_logging;
+ bool enabled_core_telemetry_tracing;
};
/*
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 6082856..e7d5a27 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -32,6 +32,7 @@
#include "gxp-client.h"
#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
#include "gxp-dma.h"
@@ -45,13 +46,14 @@
#include "gxp-mapping.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-thermal.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
#include "gxp.h"
-#ifdef GXP_HAS_DCI
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
#include "gxp-dci.h"
#endif
@@ -195,10 +197,12 @@ static int gxp_release(struct inode *inode, struct file *file)
if (!client)
return 0;
- if (client->enabled_telemetry_logging)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_LOGGING);
- if (client->enabled_telemetry_tracing)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_TRACING);
+ if (client->enabled_core_telemetry_logging)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (client->enabled_core_telemetry_tracing)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_TRACING);
mutex_lock(&client->gxp->client_list_lock);
list_del(&client->list_entry);
@@ -382,81 +386,6 @@ out:
return ret;
}
-static int
-gxp_mailbox_command_compat(struct gxp_client *client,
- struct gxp_mailbox_command_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_command_compat_ioctl ibuf;
- int virt_core, phys_core;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
- dev_err(gxp->dev,
- "Unable to copy ioctl data from user-space\n");
- return -EFAULT;
- }
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- virt_core = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev,
- "Cannot process mailbox command for core %d when firmware isn't running\n",
- phys_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
- dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
- phys_core);
- ret = -EIO;
- goto out;
- }
-
- ret = gxp->mailbox_mgr->execute_cmd_async(
- client, gxp->mailbox_mgr->mailboxes[phys_core], virt_core,
- GXP_MBOX_CODE_DISPATCH, 0, ibuf.device_address, ibuf.size,
- ibuf.flags, off_states, &ibuf.sequence_number);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- ret = -EFAULT;
- goto out;
- }
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
static int gxp_mailbox_command(struct gxp_client *client,
struct gxp_mailbox_command_ioctl __user *argp)
{
@@ -879,8 +808,8 @@ out_unlock_client_semaphore:
return ret;
}
-static int gxp_enable_telemetry(struct gxp_client *client,
- __u8 __user *argp)
+static int gxp_enable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
{
struct gxp_dev *gxp = client->gxp;
__u8 type;
@@ -893,21 +822,22 @@ static int gxp_enable_telemetry(struct gxp_client *client,
type != GXP_TELEMETRY_TYPE_TRACING)
return -EINVAL;
- ret = gxp_telemetry_enable(gxp, type);
+ ret = gxp_core_telemetry_enable(gxp, type);
/*
- * Record what telemetry types this client enabled so they can be
+ * Record what core telemetry types this client enabled so they can be
* cleaned-up if the client closes without disabling them.
*/
if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = true;
+ client->enabled_core_telemetry_logging = true;
if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = true;
+ client->enabled_core_telemetry_tracing = true;
return ret;
}
-static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
+static int gxp_disable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
{
struct gxp_dev *gxp = client->gxp;
__u8 type;
@@ -920,16 +850,22 @@ static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
type != GXP_TELEMETRY_TYPE_TRACING)
return -EINVAL;
- ret = gxp_telemetry_disable(gxp, type);
+ ret = gxp_core_telemetry_disable(gxp, type);
if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = false;
+ client->enabled_core_telemetry_logging = false;
if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = false;
+ client->enabled_core_telemetry_tracing = false;
return ret;
}
+/*
+ * TODO(b/249440369): As the DSP KD will not get involved in the mapping the TPU mailbox buffer
+ * from Zuma, remove the corresponding logic from this function. Note that, we still have to do
+ * it from here in the direct mode. Also, we have to investigate whether it will be still proper
+ * to call the `ALLOCATE_EXTERNAL_MAILBOX` TPU external command from here in MCU mode.
+ */
static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
struct gxp_tpu_mbx_queue_ioctl __user *argp)
{
@@ -1018,18 +954,28 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
phys_core_list, mbx_info);
if (ret) {
dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
- fput(client->tpu_file);
- client->tpu_file = NULL;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- goto out_free;
+ goto err_fput;
}
client->mbx_desc.phys_core_list = phys_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
+ if (gxp->after_map_tpu_mbx_queue) {
+ ret = gxp->after_map_tpu_mbx_queue(gxp, client);
+ if (ret)
+ goto err_unmap;
+ }
+
+ goto out_free;
+
+err_unmap:
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
+err_fput:
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
out_free:
kfree(mbx_info);
@@ -1044,6 +990,7 @@ out_unlock_client_semaphore:
#endif
}
+/* TODO(b/249440369): The same as the `gxp_map_tpu_mbx_queue` function. */
static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
struct gxp_tpu_mbx_queue_ioctl __user *argp)
{
@@ -1071,6 +1018,9 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
goto out;
}
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+
gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
@@ -1089,7 +1039,7 @@ out:
#endif
}
-static int gxp_register_telemetry_eventfd(
+static int gxp_register_core_telemetry_eventfd(
struct gxp_client *client,
struct gxp_register_telemetry_eventfd_ioctl __user *argp)
{
@@ -1099,10 +1049,11 @@ static int gxp_register_telemetry_eventfd(
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- return gxp_telemetry_register_eventfd(gxp, ibuf.type, ibuf.eventfd);
+ return gxp_core_telemetry_register_eventfd(gxp, ibuf.type,
+ ibuf.eventfd);
}
-static int gxp_unregister_telemetry_eventfd(
+static int gxp_unregister_core_telemetry_eventfd(
struct gxp_client *client,
struct gxp_register_telemetry_eventfd_ioctl __user *argp)
{
@@ -1112,7 +1063,7 @@ static int gxp_unregister_telemetry_eventfd(
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- return gxp_telemetry_unregister_eventfd(gxp, ibuf.type);
+ return gxp_core_telemetry_unregister_eventfd(gxp, ibuf.type);
}
static int gxp_read_global_counter(struct gxp_client *client,
@@ -1156,99 +1107,6 @@ out:
return ret;
}
-static int gxp_acquire_wake_lock_compat(
- struct gxp_client *client,
- struct gxp_acquire_wakelock_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_acquire_wakelock_compat_ioctl ibuf;
- bool acquired_block_wakelock = false;
- struct gxp_power_states power_states;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
- ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
- dev_err(gxp->dev,
- "Requested memory power state %d is invalid\n",
- ibuf.memory_power_state);
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- down_write(&client->semaphore);
- if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
- (!client->vd)) {
- dev_err(gxp->dev,
- "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Acquire a BLOCK wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
- power_states.power = aur_state_array[ibuf.gxp_power_state];
- power_states.memory = aur_memory_state_array[ibuf.memory_power_state];
- power_states.low_clkmux = false;
- ret = gxp_client_acquire_block_wakelock(
- client, &acquired_block_wakelock, power_states);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
- ret);
- goto out;
- }
- }
-
- /* Acquire a VIRTUAL_DEVICE wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
- ret = gxp_client_acquire_vd_wakelock(client);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
- ret);
- goto err_acquiring_vd_wl;
- }
- }
-out:
- up_write(&client->semaphore);
-
- return ret;
-
-err_acquiring_vd_wl:
- /*
- * In a single call, if any wakelock acquisition fails, all of them do.
- * If the client was acquiring both wakelocks and failed to acquire the
- * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
- * wakelock, then release it before returning the error code.
- */
- if (acquired_block_wakelock)
- gxp_client_release_block_wakelock(client);
-
- up_write(&client->semaphore);
-
- return ret;
-}
-
static int gxp_acquire_wake_lock(struct gxp_client *client,
struct gxp_acquire_wakelock_ioctl __user *argp)
{
@@ -1659,9 +1517,6 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_SYNC_BUFFER:
ret = gxp_sync_buffer(client, argp);
break;
- case GXP_MAILBOX_COMMAND_COMPAT:
- ret = gxp_mailbox_command_compat(client, argp);
- break;
case GXP_MAILBOX_RESPONSE:
ret = gxp_mailbox_response(client, argp);
break;
@@ -1683,11 +1538,11 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_ETM_GET_TRACE_INFO_COMMAND:
ret = gxp_etm_get_trace_info_command(client, argp);
break;
- case GXP_ENABLE_TELEMETRY:
- ret = gxp_enable_telemetry(client, argp);
+ case GXP_ENABLE_CORE_TELEMETRY:
+ ret = gxp_enable_core_telemetry(client, argp);
break;
- case GXP_DISABLE_TELEMETRY:
- ret = gxp_disable_telemetry(client, argp);
+ case GXP_DISABLE_CORE_TELEMETRY:
+ ret = gxp_disable_core_telemetry(client, argp);
break;
case GXP_MAP_TPU_MBX_QUEUE:
ret = gxp_map_tpu_mbx_queue(client, argp);
@@ -1695,18 +1550,15 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_UNMAP_TPU_MBX_QUEUE:
ret = gxp_unmap_tpu_mbx_queue(client, argp);
break;
- case GXP_REGISTER_TELEMETRY_EVENTFD:
- ret = gxp_register_telemetry_eventfd(client, argp);
+ case GXP_REGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_register_core_telemetry_eventfd(client, argp);
break;
- case GXP_UNREGISTER_TELEMETRY_EVENTFD:
- ret = gxp_unregister_telemetry_eventfd(client, argp);
+ case GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_core_telemetry_eventfd(client, argp);
break;
case GXP_READ_GLOBAL_COUNTER:
ret = gxp_read_global_counter(client, argp);
break;
- case GXP_ACQUIRE_WAKE_LOCK_COMPAT:
- ret = gxp_acquire_wake_lock_compat(client, argp);
- break;
case GXP_RELEASE_WAKE_LOCK:
ret = gxp_release_wake_lock(client, argp);
break;
@@ -1744,19 +1596,24 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
{
struct gxp_client *client = file->private_data;
+ int ret;
if (!client)
return -ENODEV;
+ if (client->gxp->handle_mmap) {
+ ret = client->gxp->handle_mmap(file, vma);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ }
+
switch (vma->vm_pgoff << PAGE_SHIFT) {
- case GXP_MMAP_LOG_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_LOGGING,
- vma);
- case GXP_MMAP_TRACE_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_TRACING,
- vma);
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
default:
return -EINVAL;
}
@@ -1903,10 +1760,10 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
}
if (gxp_is_direct_mode(gxp)) {
-#ifdef GXP_HAS_DCI
- gxp_dci_init(gxp->mailbox_mgr);
-#else
+#if GXP_USE_LEGACY_MAILBOX
gxp_mailbox_init(gxp->mailbox_mgr);
+#else
+ gxp_dci_init(gxp->mailbox_mgr);
#endif
}
@@ -1976,7 +1833,7 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
}
gxp_fw_data_init(gxp);
- gxp_telemetry_init(gxp);
+ gxp_core_telemetry_init(gxp);
gxp_create_debugfs(gxp);
gxp->thermal_mgr = gxp_thermal_init(gxp);
if (!gxp->thermal_mgr)
diff --git a/gxp-config.h b/gxp-config.h
index ded8ca5..2f50bd8 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -30,6 +30,10 @@
#define SYNC_BARRIER_COUNT 16
+#ifndef GXP_USE_LEGACY_MAILBOX
+#define GXP_USE_LEGACY_MAILBOX 0
+#endif
+
/* LPM address space starts at lpm_version register */
#define GXP_LPM_BASE GXP_REG_LPM_VERSION
#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
diff --git a/gxp-telemetry.c b/gxp-core-telemetry.c
index 7f63c49..0572648 100644
--- a/gxp-telemetry.c
+++ b/gxp-core-telemetry.c
@@ -1,59 +1,58 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * GXP telemetry support
+ * GXP core telemetry support
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
#include <linux/slab.h>
#include <linux/wait.h>
#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-dma.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-host-device-structs.h"
#include "gxp-notification.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
-static inline bool is_core_telemetry_enabled(struct gxp_dev *gxp, uint core,
- u8 type)
+static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type)
{
u32 device_status =
- gxp_fw_data_get_telemetry_device_status(gxp, core, type);
+ gxp_fw_data_get_core_telemetry_device_status(gxp, core, type);
- return device_status & GXP_TELEMETRY_DEVICE_STATUS_ENABLED;
+ return device_status & GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED;
}
static void telemetry_status_notification_work(struct work_struct *work)
{
- struct gxp_telemetry_work *telem_work =
- container_of(work, struct gxp_telemetry_work, work);
+ struct gxp_core_telemetry_work *telem_work =
+ container_of(work, struct gxp_core_telemetry_work, work);
struct gxp_dev *gxp = telem_work->gxp;
uint core = telem_work->core;
- struct gxp_telemetry_manager *mgr = telem_work->gxp->telemetry_mgr;
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
- /* Wake any threads waiting on an telemetry disable ACK */
+ /* Wake any threads waiting on a core telemetry disable ACK */
wake_up(&mgr->waitq);
- /* Signal the appropriate eventfd for any active telemetry types */
+ /* Signal the appropriate eventfd for any active core telemetry types */
mutex_lock(&mgr->lock);
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
mgr->logging_efd)
eventfd_signal(mgr->logging_efd, 1);
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
mgr->tracing_efd)
eventfd_signal(mgr->tracing_efd, 1);
mutex_unlock(&mgr->lock);
}
-int gxp_telemetry_init(struct gxp_dev *gxp)
+int gxp_core_telemetry_init(struct gxp_dev *gxp)
{
- struct gxp_telemetry_manager *mgr;
+ struct gxp_core_telemetry_manager *mgr;
uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
@@ -70,12 +69,12 @@ int gxp_telemetry_init(struct gxp_dev *gxp)
}
init_waitqueue_head(&mgr->waitq);
- gxp->telemetry_mgr = mgr;
+ gxp->core_telemetry_mgr = mgr;
return 0;
}
-/* Wrapper struct to be used by the telemetry vma_ops. */
+/* Wrapper struct to be used by the core telemetry vma_ops. */
struct telemetry_vma_data {
struct gxp_dev *gxp;
struct buffer_data *buff_data;
@@ -83,22 +82,22 @@ struct telemetry_vma_data {
refcount_t ref_count;
};
-static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
+static void telemetry_vma_open(struct vm_area_struct *vma)
{
struct telemetry_vma_data *vma_data =
(struct telemetry_vma_data *)vma->vm_private_data;
struct gxp_dev *gxp = vma_data->gxp;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
refcount_inc(&vma_data->ref_count);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
}
static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
-static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
+static void telemetry_vma_close(struct vm_area_struct *vma)
{
struct telemetry_vma_data *vma_data =
(struct telemetry_vma_data *)vma->vm_private_data;
@@ -106,29 +105,29 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
struct buffer_data *buff_data = vma_data->buff_data;
u8 type = vma_data->type;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
if (!refcount_dec_and_test(&vma_data->ref_count))
goto out;
/*
- * Free the telemetry buffers if they are no longer in use.
+ * Free the core telemetry buffers if they are no longer in use.
*
- * If a client enabled telemetry, then closed their VMA without
+ * If a client enabled core telemetry, then closed their VMA without
* disabling it, firmware will still be expecting those buffers to be
- * mapped. If this is the case, telemetry will be disabled, and the
+ * mapped. If this is the case, core telemetry will be disabled, and the
* buffers freed, when the client is closed.
*
- * We cannot disable telemetry here, since attempting to lock the
+ * We cannot disable core telemetry here, since attempting to lock the
* `vd_semaphore` while holding the mmap lock can lead to deadlocks.
*/
if (refcount_dec_and_test(&buff_data->ref_count)) {
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
+ gxp->core_telemetry_mgr->logging_buff_data = NULL;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
+ gxp->core_telemetry_mgr->tracing_buff_data = NULL;
break;
default:
dev_warn(gxp->dev, "%s called with invalid type %u\n",
@@ -140,12 +139,12 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
kfree(vma_data);
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
}
-static const struct vm_operations_struct gxp_telemetry_vma_ops = {
- .open = gxp_telemetry_vma_open,
- .close = gxp_telemetry_vma_close,
+static const struct vm_operations_struct telemetry_vma_ops = {
+ .open = telemetry_vma_open,
+ .close = telemetry_vma_close,
};
/**
@@ -154,24 +153,24 @@ static const struct vm_operations_struct gxp_telemetry_vma_ops = {
* @gxp: The GXP device to check availability for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold the telemetry_manager's lock.
+ * Caller must hold the core telemetry_manager's lock.
*
* Return:
* * 0 - @type is valid and can have new buffers created
* * -EBUSY - Buffers already exist for @type
- * * -EINVAL - @type is not a valid telemetry type
+ * * -EINVAL - @type is not a valid core telemetry type
*/
static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
{
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_buff_data)
+ if (gxp->core_telemetry_mgr->logging_buff_data)
return -EBUSY;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_buff_data)
+ if (gxp->core_telemetry_mgr->tracing_buff_data)
return -EBUSY;
break;
default:
@@ -188,21 +187,20 @@ static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
* @gxp: The GXP device to allocate the buffers for
* @size: The size of buffer to allocate for each core
*
- * Caller must hold the telemetry_manager's lock.
+ * Caller must hold the core telemetry_manager's lock.
*
- * Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
+ * Return: A pointer to the `struct buffer_data` if successful, error otherwise
*/
static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
size_t size)
{
struct buffer_data *data;
int i;
- void *buf;
- dma_addr_t daddr;
+ int ret = 0;
size = size < PAGE_SIZE ? PAGE_SIZE : size;
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -211,14 +209,13 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
/* Allocate cache-coherent buffers for logging/tracing to */
for (i = 0; i < GXP_NUM_CORES; i++) {
/* Allocate a coherent buffer in the default domain */
- buf = dma_alloc_coherent(gxp->dev, size, &daddr, GFP_KERNEL);
- if (!buf) {
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, size, GFP_KERNEL, 0,
+ &data->buffers[i]);
+ if (ret) {
dev_err(gxp->dev,
- "Failed to allocate coherent buffer\n");
+ "Failed to allocate coherent buffer\n");
goto err_alloc;
}
- data->buffers[i] = buf;
- data->buffer_daddrs[i] = daddr;
}
data->size = size;
refcount_set(&data->ref_count, 1);
@@ -228,11 +225,10 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
err_alloc:
while (i--)
- dma_free_coherent(gxp->dev, size, data->buffers[i],
- data->buffer_daddrs[i]);
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
kfree(data);
- return NULL;
+ return ERR_PTR(ret);
}
/**
@@ -240,29 +236,28 @@ err_alloc:
* @gxp: The GXP device the buffers were allocated for
* @data: The descriptor of the buffers to unmap and free
*
- * Caller must hold the telemetry_manager's lock.
+ * Caller must hold the core telemetry_manager's lock.
*/
static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
{
int i;
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, data->size, data->buffers[i],
- data->buffer_daddrs[i]);
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
kfree(data);
}
/**
- * remap_telemetry_buffers() - Remaps a set of telemetry buffers into a
+ * remap_telemetry_buffers() - Remaps a set of core telemetry buffers into a
* user-space vm_area.
* @gxp: The GXP device the buffers were allocated for
* @vma: A vm area to remap the buffers into
- * @buff_data: The data describing a set of telemetry buffers to remap
+ * @buff_data: The data describing a set of core telemetry buffers to remap
*
- * Caller must hold the telemetry_manager's lock.
+ * Caller must hold the core telemetry_manager's lock.
*
* Return:
* * 0 - Success
@@ -278,7 +273,7 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
phys_addr_t phys;
int ret = 0;
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
/* mmap the buffers */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -301,7 +296,7 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
*/
phys = iommu_iova_to_phys(
iommu_get_domain_for_dev(gxp->dev),
- buff_data->buffer_daddrs[i] + offset);
+ buff_data->buffers[i].dma_addr + offset);
ret = remap_pfn_range(
vma,
vma->vm_start + buff_data->size * i + offset,
@@ -314,13 +309,13 @@ static int remap_telemetry_buffers(struct gxp_dev *gxp,
out:
vma->vm_pgoff = orig_pgoff;
- vma->vm_ops = &gxp_telemetry_vma_ops;
+ vma->vm_ops = &telemetry_vma_ops;
return ret;
}
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma)
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
{
int ret = 0;
struct telemetry_vma_data *vma_data;
@@ -329,14 +324,14 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
struct buffer_data *buff_data;
int i;
- if (!gxp->telemetry_mgr)
+ if (!gxp->core_telemetry_mgr)
return -ENODEV;
/* Total size must divide evenly into 1 page-aligned buffer per core */
if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
return -EINVAL;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
ret = check_telemetry_type_availability(gxp, type);
if (ret)
@@ -349,8 +344,8 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
}
buff_data = allocate_telemetry_buffers(gxp, size);
- if (!buff_data) {
- ret = -ENOMEM;
+ if (IS_ERR(buff_data)) {
+ ret = PTR_ERR(buff_data);
goto err_free_vma_data;
}
@@ -364,32 +359,30 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
refcount_set(&vma_data->ref_count, 1);
vma->vm_private_data = vma_data;
- /* Save book-keeping on the buffers in the telemetry manager */
+ /* Save book-keeping on the buffers in the core telemetry manager */
if (type == GXP_TELEMETRY_TYPE_LOGGING)
- gxp->telemetry_mgr->logging_buff_data = buff_data;
+ gxp->core_telemetry_mgr->logging_buff_data = buff_data;
else /* type == GXP_TELEMETRY_TYPE_TRACING */
- gxp->telemetry_mgr->tracing_buff_data = buff_data;
+ gxp->core_telemetry_mgr->tracing_buff_data = buff_data;
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
err_free_buffers:
for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, buff_data->size,
- buff_data->buffers[i],
- buff_data->buffer_daddrs[i]);
+ gxp_dma_free_coherent_buf(gxp, NULL, &buff_data->buffers[i]);
kfree(buff_data);
err_free_vma_data:
kfree(vma_data);
err:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type)
{
struct buffer_data *data;
int ret = 0;
@@ -397,18 +390,18 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
struct gxp_virtual_device *vd;
/*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
*/
down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
+ data = gxp->core_telemetry_mgr->logging_buff_data;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
+ data = gxp->core_telemetry_mgr->tracing_buff_data;
break;
default:
ret = -EINVAL;
@@ -425,17 +418,16 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
vd = gxp->core_to_vd[core];
if (vd != NULL) {
ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, data->buffers[core], vd->domain,
- data->size, data->buffer_daddrs[core], 0);
+ gxp, &data->buffers[core], vd->domain, 0);
if (ret)
goto err;
}
}
/* Populate the buffer fields in firmware-data */
- data->host_status |= GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- data->buffer_daddrs, data->size);
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status,
+ data->buffers, data->size);
/* Notify any running cores that firmware-data was updated */
for (core = 0; core < GXP_NUM_CORES; core++) {
@@ -453,12 +445,11 @@ err:
vd = gxp->core_to_vd[core];
if (vd)
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd->domain, data->size,
- data->buffer_daddrs[core]);
+ gxp, vd->domain, &data->buffers[core]);
}
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
up_read(&gxp->vd_semaphore);
return ret;
@@ -468,11 +459,11 @@ out:
* notify_core_and_wait_for_disable() - Notify a core that telemetry state has
* been changed by the host and wait for
* the core to stop using telemetry.
- * @gxp: The GXP device telemetry is changing for
+ * @gxp: The GXP device core telemetry is changing for
* @core: The core in @gxp to notify of the telemetry state change
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold `telemetry_mgr->lock`.
+ * Caller must hold `core_telemetry_mgr->lock`.
* Caller must hold `gxp->vd_semaphore` for reading only.
* It is not allowed to hold `gxp->vd_semaphore` for writing, since this
* function needs to release `gxp->vd_semaphore` at different points to sleep.
@@ -486,13 +477,13 @@ static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
{
uint retries_left = 50;
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
lockdep_assert_held_read(&gxp->vd_semaphore);
gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
/* Wait for ACK from firmware */
- while (is_core_telemetry_enabled(gxp, core, type) &&
+ while (is_telemetry_enabled(gxp, core, type) &&
gxp_is_fw_running(gxp, core) && retries_left) {
/* Release vd_semaphore while waiting */
up_read(&gxp->vd_semaphore);
@@ -505,19 +496,19 @@ static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
* If a core does stop running firmware while this function is
* asleep, it will be seen at the next timeout.
*/
- wait_event_timeout(gxp->telemetry_mgr->waitq,
- !is_core_telemetry_enabled(gxp, core, type),
+ wait_event_timeout(gxp->core_telemetry_mgr->waitq,
+ !is_telemetry_enabled(gxp, core, type),
msecs_to_jiffies(10));
retries_left--;
/*
* No function may attempt to acquire the `vd_semaphore` while
- * holding the telemetry lock, so it must be released, then
+ * holding the core telemetry lock, so it must be released, then
* re-acquired once the `vd_semaphore` is held.
*/
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
}
/*
@@ -525,7 +516,7 @@ static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
* considered disabled. If firmware is started on this core again, it
* is responsible for clearing its status.
*/
- if (unlikely(is_core_telemetry_enabled(gxp, core, type) &&
+ if (unlikely(is_telemetry_enabled(gxp, core, type) &&
gxp_is_fw_running(gxp, core)))
return -ENXIO;
@@ -534,13 +525,13 @@ static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
/**
* telemetry_disable_locked() - Helper function to break out the actual
- * process of disabling telemetry so that it
+ * process of disabling core telemetry so that it
* can be invoked by internal functions that are
- * already holding the telemetry lock.
+ * already holding the core telemetry lock.
* @gxp: The GXP device to disable either logging or tracing for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold `telemetry_mgr->lock`.
+ * Caller must hold `core_telemetry_mgr->lock`.
* Caller must hold `gxp->vd_semaphore` for reading only.
* It is not allowed to hold `gxp->vd_semaphore` for writing, since this
* function needs to release `gxp->vd_semaphore` at different points to sleep.
@@ -554,20 +545,19 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
{
struct buffer_data *data;
int ret = 0;
- dma_addr_t null_daddrs[GXP_NUM_CORES] = {0};
uint core;
struct gxp_virtual_device *vd;
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
lockdep_assert_held_read(&gxp->vd_semaphore);
- /* Cleanup telemetry manager's book-keeping */
+ /* Cleanup core telemetry manager's book-keeping */
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
+ data = gxp->core_telemetry_mgr->logging_buff_data;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
+ data = gxp->core_telemetry_mgr->tracing_buff_data;
break;
default:
return -EINVAL;
@@ -576,15 +566,14 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
if (!data)
return -ENXIO;
- if (!(data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED))
+ if (!(data->host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED))
return 0;
data->is_enabled = false;
/* Clear the log buffer fields in firmware-data */
- data->host_status &= ~GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- null_daddrs, 0);
+ data->host_status &= ~GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status, NULL, 0);
/* Notify any running cores that firmware-data was updated */
for (core = 0; core < GXP_NUM_CORES; core++) {
@@ -599,17 +588,16 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
vd = gxp->core_to_vd[core];
if (vd)
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd->domain, data->size,
- data->buffer_daddrs[core]);
+ gxp, vd->domain, &data->buffers[core]);
}
if (refcount_dec_and_test(&data->ref_count)) {
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
+ gxp->core_telemetry_mgr->logging_buff_data = NULL;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
+ gxp->core_telemetry_mgr->tracing_buff_data = NULL;
break;
default:
/* NO-OP, we returned above if `type` was invalid */
@@ -621,26 +609,26 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
return 0;
}
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type)
{
int ret;
/*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
*/
down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
ret = telemetry_disable_locked(gxp, type);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
up_read(&gxp->vd_semaphore);
return ret;
}
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
{
struct eventfd_ctx *new_ctx;
struct eventfd_ctx **ctx_to_set = NULL;
@@ -650,14 +638,14 @@ int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
if (IS_ERR(new_ctx))
return PTR_ERR(new_ctx);
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- ctx_to_set = &gxp->telemetry_mgr->logging_efd;
+ ctx_to_set = &gxp->core_telemetry_mgr->logging_efd;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- ctx_to_set = &gxp->telemetry_mgr->tracing_efd;
+ ctx_to_set = &gxp->core_telemetry_mgr->tracing_efd;
break;
default:
ret = -EINVAL;
@@ -665,49 +653,50 @@ int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
}
if (*ctx_to_set) {
- dev_warn(gxp->dev,
- "Replacing existing telemetry eventfd (type=%u)\n",
- type);
+ dev_warn(
+ gxp->dev,
+ "Replacing existing core telemetry eventfd (type=%u)\n",
+ type);
eventfd_ctx_put(*ctx_to_set);
}
*ctx_to_set = new_ctx;
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
{
int ret = 0;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->logging_efd);
- gxp->telemetry_mgr->logging_efd = NULL;
+ if (gxp->core_telemetry_mgr->logging_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->tracing_efd);
- gxp->telemetry_mgr->tracing_efd = NULL;
+ if (gxp->core_telemetry_mgr->tracing_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
break;
default:
ret = -EINVAL;
}
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core)
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core)
{
- struct gxp_telemetry_manager *mgr = gxp->telemetry_mgr;
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
if (!mgr || core >= GXP_NUM_CORES)
return NULL;
diff --git a/gxp-telemetry.h b/gxp-core-telemetry.h
index d2e63de..c512292 100644
--- a/gxp-telemetry.h
+++ b/gxp-core-telemetry.h
@@ -1,72 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * GXP telemetry support
+ * GXP core telemetry support
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
-#ifndef __GXP_TELEMETRY_H__
-#define __GXP_TELEMETRY_H__
+
+#ifndef __GXP_CORE_TELEMETRY_H__
+#define __GXP_CORE_TELEMETRY_H__
#include <linux/eventfd.h>
#include <linux/refcount.h>
#include <linux/types.h>
+#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp.h"
-struct gxp_telemetry_work {
+struct gxp_core_telemetry_work {
struct work_struct work;
struct gxp_dev *gxp;
uint core;
};
-struct gxp_telemetry_manager {
+struct gxp_core_telemetry_manager {
struct buffer_data {
u32 host_status;
- void *buffers[GXP_NUM_CORES];
- dma_addr_t buffer_daddrs[GXP_NUM_CORES];
+ struct gxp_coherent_buf buffers[GXP_NUM_CORES];
u32 size;
refcount_t ref_count;
bool is_enabled;
} *logging_buff_data, *tracing_buff_data;
/* Protects logging_buff_data and tracing_buff_data */
struct mutex lock;
- struct gxp_telemetry_work notification_works[GXP_NUM_CORES];
+ struct gxp_core_telemetry_work notification_works[GXP_NUM_CORES];
wait_queue_head_t waitq;
struct eventfd_ctx *logging_efd;
struct eventfd_ctx *tracing_efd;
};
/**
- * gxp_telemetry_init() - Initialize telemetry support
- * @gxp: The GXP device to initialize telemetry support for
+ * gxp_core_telemetry_init() - Initialize telemetry support
+ * @gxp: The GXP device to initialize core telemetry support for
*
* Return:
* * 0 - Success
* * -ENOMEM - Insufficient memory is available to initialize support
*/
-int gxp_telemetry_init(struct gxp_dev *gxp);
+int gxp_core_telemetry_init(struct gxp_dev *gxp);
/**
- * gxp_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core and
- * map them to their core and the user-space vma
+ * gxp_core_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core
+ * and map them to their core and the
+ * user-space vma
* @gxp: The GXP device to create the buffers for
* @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
* @vma: The vma from user-space which all cores' buffers will be mapped into
*
* Return:
* * 0 - Success
- * * -ENODEV - Telemetry support has not been initialized. Must explicitly
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
* check this, since this function is called based on user-input.
- * * -EBUSY - The requested telemetry @type is already in use
+ * * -EBUSY - The requested core telemetry @type is already in use
* * -EINVAL - Either the vma size is not aligned or @type is not valid
* * -ENOMEM - Insufficient memory is available to allocate and map the buffers
*/
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma);
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
/**
- * gxp_telemetry_enable() - Enable logging or tracing for all DSP cores
+ * gxp_core_telemetry_enable() - Enable logging or tracing for all DSP cores
* @gxp: The GXP device to enable either logging or tracing for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
@@ -75,10 +77,10 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
* * -EINVAL - The @type provided is not valid
* * -ENXIO - Buffers for @type have not been created/mapped yet
*/
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type);
/**
- * gxp_telemetry_disable() - Disable logging or tracing for all DSP cores
+ * gxp_core_telemetry_disable() - Disable logging or tracing for all DSP cores
* @gxp: The GXP device to disable either logging or tracing for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
@@ -87,12 +89,13 @@ int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
* * -EINVAL - The @type provided is not valid
* * -ENXIO - Buffers for @type have not been created/mapped yet
*/
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type);
/**
- * gxp_telemetry_register_eventfd() - Register an eventfd to be signaled when
- * telemetry notifications arrive while the
- * specified @type of telemetry is enabled
+ * gxp_core_telemetry_register_eventfd() - Register an eventfd to be signaled
+ * when core telemetry notifications
+ * arrive while the specified @type of
+ * core telemetry is enabled
* @gxp: The GXP device to register the eventfd for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
* @fd: A file descriptor for an eventfd from user-space
@@ -105,11 +108,11 @@ int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
* * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
* * -EINVAL - Invalid @type or @fd is not an eventfd
*/
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
/**
- * gxp_telemetry_unregister_eventfd() - Unregister and release a reference to
- * a previously registered eventfd
+ * gxp_core_telemetry_unregister_eventfd() - Unregister and release a reference
+ * to a previously registered eventfd
* @gxp: The GXP device to unregister the eventfd for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
@@ -117,19 +120,19 @@ int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
* * 0 - Success
* * -EINVAL - The @type provided is not valid
*/
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
/**
- * gxp_telemetry_get_notification_handler() - Get the notification handler work
- * for the specified core
+ * gxp_core_telemetry_get_notification_handler() - Get the notification handler
+ * work for the specified core
* @gxp: The GXP device to obtain the handler for
* @core: The physical core number to obtain the handler
*
* Return: A pointer to the work_struct for the @core's notification handler if
- * successful. NULL if telemetry has not been initialized or @core is
- * invalid.
+ * successful. NULL if core telemetry has not been initialized or @core
+ * is invalid.
*/
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core);
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core);
-#endif /* __GXP_TELEMETRY_H__ */
+#endif /* __GXP_CORE_TELEMETRY_H__ */
diff --git a/gxp-dci.c b/gxp-dci.c
index 7f284bb..58a64c2 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -201,8 +201,7 @@ static void gxp_dci_mailbox_manager_set_ops(struct gxp_mailbox_manager *mgr)
/* Private data structure of DCI mailbox. */
struct gxp_dci {
- struct gxp_mailbox *gxp_mbx;
- struct gcip_mailbox *gcip_mbx;
+ struct gxp_mailbox *mbx;
};
static u64 gxp_dci_get_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd)
@@ -262,12 +261,12 @@ static void gxp_dci_handle_async_resp_arrived(
struct gcip_mailbox *mailbox,
struct gcip_mailbox_async_response *gcip_async_resp)
{
- struct gxp_mailbox *gxp_mbx = mailbox->data;
+ struct gxp_mailbox *mbx = mailbox->data;
struct gxp_dci_async_response *async_resp = gcip_async_resp->data;
unsigned long flags;
gxp_pm_update_requested_power_states(
- gxp_mbx->gxp, async_resp->requested_states, off_states);
+ mbx->gxp, async_resp->requested_states, off_states);
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
@@ -294,7 +293,7 @@ static void gxp_dci_handle_async_resp_timedout(
struct gcip_mailbox *mailbox,
struct gcip_mailbox_async_response *gcip_async_resp)
{
- struct gxp_mailbox *gxp_mbx = mailbox->data;
+ struct gxp_mailbox *mbx = mailbox->data;
struct gxp_dci_async_response *async_resp = gcip_async_resp->data;
struct gxp_dci_response *resp = &async_resp->resp;
unsigned long flags;
@@ -314,7 +313,7 @@ static void gxp_dci_handle_async_resp_timedout(
spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
gxp_pm_update_requested_power_states(
- gxp_mbx->gxp, async_resp->requested_states, off_states);
+ mbx->gxp, async_resp->requested_states, off_states);
if (async_resp->eventfd) {
gxp_eventfd_signal(async_resp->eventfd);
@@ -381,12 +380,14 @@ static int gxp_dci_allocate_resources(struct gxp_mailbox *mailbox,
struct gxp_virtual_device *vd,
uint virt_core)
{
+ int ret;
+
/* Allocate and initialize the command queue */
- mailbox->cmd_queue = (struct gxp_dci_command *)gxp_dma_alloc_coherent(
+ ret = gxp_dma_alloc_coherent_buf(
mailbox->gxp, vd->domain,
sizeof(struct gxp_dci_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
- &(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->cmd_queue)
+ GFP_KERNEL, 0, &mailbox->cmd_queue_buf);
+ if (ret)
goto err_cmd_queue;
mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
@@ -394,11 +395,11 @@ static int gxp_dci_allocate_resources(struct gxp_mailbox *mailbox,
mutex_init(&mailbox->cmd_queue_lock);
/* Allocate and initialize the response queue */
- mailbox->resp_queue = (struct gxp_dci_response *)gxp_dma_alloc_coherent(
+ ret = gxp_dma_alloc_coherent_buf(
mailbox->gxp, vd->domain,
sizeof(struct gxp_dci_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
- &(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->resp_queue)
+ GFP_KERNEL, 0, &mailbox->resp_queue_buf);
+ if (ret)
goto err_resp_queue;
mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
@@ -406,114 +407,50 @@ static int gxp_dci_allocate_resources(struct gxp_mailbox *mailbox,
mutex_init(&mailbox->resp_queue_lock);
/* Allocate and initialize the mailbox descriptor */
- mailbox->descriptor =
- (struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_mailbox_descriptor),
- &(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
- if (!mailbox->descriptor)
+ ret = gxp_dma_alloc_coherent_buf(mailbox->gxp, vd->domain,
+ sizeof(struct gxp_mailbox_descriptor),
+ GFP_KERNEL, 0,
+ &mailbox->descriptor_buf);
+ if (ret)
goto err_descriptor;
+ mailbox->descriptor =
+ (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
+ mailbox->cmd_queue_buf.dsp_addr;
mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
+ mailbox->resp_queue_buf.dsp_addr;
mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
return 0;
err_descriptor:
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_dci_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
err_resp_queue:
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_dci_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
err_cmd_queue:
- return -ENOMEM;
+ return ret;
}
static void gxp_dci_release_resources(struct gxp_mailbox *mailbox,
struct gxp_virtual_device *vd,
uint virt_core)
{
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_dci_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_dci_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, vd->domain,
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
-}
-
-static int gxp_dci_init_consume_responses_work(struct gxp_mailbox *gxp_mbx)
-{
- struct gxp_dci *dci = gxp_mbx->data;
- struct gcip_mailbox_args args = {
- .dev = gxp_mbx->gxp->dev,
- .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
- .cmd_queue = gxp_mbx->cmd_queue,
- .cmd_elem_size = sizeof(struct gxp_dci_command),
- .resp_queue = gxp_mbx->resp_queue,
- .resp_elem_size = sizeof(struct gxp_dci_response),
- .timeout = MAILBOX_TIMEOUT,
- .ops = &gxp_dci_gcip_mbx_ops,
- .data = gxp_mbx,
- };
- int ret;
-
- dci->gcip_mbx = kzalloc(sizeof(*dci->gcip_mbx), GFP_KERNEL);
- if (!dci->gcip_mbx)
- return -ENOMEM;
-
- /* Initialize gcip_mailbox */
- ret = gcip_mailbox_init(dci->gcip_mbx, &args);
- if (ret) {
- kfree(dci->gcip_mbx);
- return ret;
- }
-
- return 0;
-}
-
-static void gxp_dci_release_consume_responses_work(struct gxp_mailbox *gxp_mbx)
-{
- struct gxp_dci *dci = gxp_mbx->data;
-
- /* Release gcip_mailbox */
- gcip_mailbox_release(dci->gcip_mbx);
- kfree(dci->gcip_mbx);
-}
-
-static void gxp_dci_consume_responses_work(struct gxp_mailbox *gxp_mbx)
-{
- struct gxp_dci *dci = gxp_mbx->data;
-
- if (gxp_is_a0(gxp_mbx->gxp))
- dma_sync_single_for_cpu(gxp_mbx->gxp->dev,
- gxp_mbx->resp_queue_device_addr,
- gxp_mbx->resp_queue_size *
- sizeof(struct gxp_dci_response),
- DMA_BIDIRECTIONAL);
- gcip_mailbox_consume_responses_work(dci->gcip_mbx);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->descriptor_buf);
}
static struct gxp_mailbox_ops gxp_dci_gxp_mbx_ops = {
.allocate_resources = gxp_dci_allocate_resources,
.release_resources = gxp_dci_release_resources,
- .init_consume_responses_work = gxp_dci_init_consume_responses_work,
- .release_consume_responses_work =
- gxp_dci_release_consume_responses_work,
- .consume_responses_work = gxp_dci_consume_responses_work,
+ .gcip_ops.mbx = &gxp_dci_gcip_mbx_ops,
};
void gxp_dci_init(struct gxp_mailbox_manager *mgr)
@@ -527,7 +464,11 @@ struct gxp_mailbox *gxp_dci_alloc(struct gxp_mailbox_manager *mgr,
{
struct gxp_dci *dci;
struct gxp_mailbox_args mbx_args = {
+ .type = GXP_MBOX_TYPE_GENERAL,
.ops = &gxp_dci_gxp_mbx_ops,
+ .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
+ .cmd_elem_size = sizeof(struct gxp_dci_command),
+ .resp_elem_size = sizeof(struct gxp_dci_response),
};
dci = kzalloc(sizeof(*dci), GFP_KERNEL);
@@ -535,38 +476,35 @@ struct gxp_mailbox *gxp_dci_alloc(struct gxp_mailbox_manager *mgr,
return ERR_PTR(-ENOMEM);
mbx_args.data = dci;
- dci->gxp_mbx =
- gxp_mailbox_alloc(mgr, vd, virt_core, core_id, &mbx_args);
- if (IS_ERR(dci->gxp_mbx))
+ dci->mbx = gxp_mailbox_alloc(mgr, vd, virt_core, core_id, &mbx_args);
+ if (IS_ERR(dci->mbx))
kfree(dci);
else
- gxp_mailbox_generate_device_interrupt(dci->gxp_mbx, BIT(0));
+ gxp_mailbox_generate_device_interrupt(dci->mbx, BIT(0));
- return dci->gxp_mbx;
+ return dci->mbx;
}
void gxp_dci_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
- struct gxp_mailbox *gxp_mbx)
+ struct gxp_mailbox *mbx)
{
- gxp_mailbox_release(mgr, vd, virt_core, gxp_mbx);
+ gxp_mailbox_release(mgr, vd, virt_core, mbx);
}
-int gxp_dci_execute_cmd(struct gxp_mailbox *gxp_mbx,
- struct gxp_dci_command *cmd,
+int gxp_dci_execute_cmd(struct gxp_mailbox *mbx, struct gxp_dci_command *cmd,
struct gxp_dci_response *resp)
{
- struct gxp_dci *dci = gxp_mbx->data;
int ret;
- ret = gcip_mailbox_send_cmd(dci->gcip_mbx, cmd, resp);
+ ret = gxp_mailbox_send_cmd(mbx, cmd, resp);
if (ret || !resp)
return ret;
return resp->retval;
}
-int gxp_dci_execute_cmd_async(struct gxp_mailbox *gxp_mbx,
+int gxp_dci_execute_cmd_async(struct gxp_mailbox *mbx,
struct gxp_dci_command *cmd,
struct list_head *resp_queue,
spinlock_t *queue_lock,
@@ -574,7 +512,6 @@ int gxp_dci_execute_cmd_async(struct gxp_mailbox *gxp_mbx,
struct gxp_power_states requested_states,
struct gxp_eventfd *eventfd)
{
- struct gxp_dci *dci = gxp_mbx->data;
struct gxp_dci_async_response *async_resp;
int ret;
@@ -591,10 +528,10 @@ int gxp_dci_execute_cmd_async(struct gxp_mailbox *gxp_mbx,
else
async_resp->eventfd = NULL;
- gxp_pm_update_requested_power_states(gxp_mbx->gxp, off_states,
+ gxp_pm_update_requested_power_states(mbx->gxp, off_states,
requested_states);
- async_resp->async_resp = gcip_mailbox_put_cmd(
- dci->gcip_mbx, cmd, &async_resp->resp, async_resp);
+ async_resp->async_resp =
+ gxp_mailbox_put_cmd(mbx, cmd, &async_resp->resp, async_resp);
if (IS_ERR(async_resp->async_resp)) {
ret = PTR_ERR(async_resp->async_resp);
goto err_free_resp;
@@ -603,7 +540,7 @@ int gxp_dci_execute_cmd_async(struct gxp_mailbox *gxp_mbx,
return 0;
err_free_resp:
- gxp_pm_update_requested_power_states(gxp_mbx->gxp, requested_states,
+ gxp_pm_update_requested_power_states(mbx->gxp, requested_states,
off_states);
kfree(async_resp);
return ret;
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 9bcba1f..646804e 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -730,7 +730,7 @@ struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
{
struct gxp_debug_dump_manager *mgr;
- int core;
+ int core, ret;
/* Don't initialize the debug dump subsystem unless it's enabled. */
if (!gxp_debug_dump_enable)
@@ -742,12 +742,11 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
gxp->debug_dump_mgr = mgr;
mgr->gxp = gxp;
- mgr->buf.vaddr =
- gxp_dma_alloc_coherent(gxp, NULL, DEBUG_DUMP_MEMORY_SIZE,
- &mgr->buf.daddr, GFP_KERNEL, 0);
- if (!mgr->buf.vaddr) {
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, DEBUG_DUMP_MEMORY_SIZE,
+ GFP_KERNEL, 0, &mgr->buf);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate memory for debug dump\n");
- return -ENODEV;
+ return ret;
}
mgr->buf.size = DEBUG_DUMP_MEMORY_SIZE;
@@ -782,8 +781,7 @@ void gxp_debug_dump_exit(struct gxp_dev *gxp)
}
kfree(gxp->debug_dump_mgr->common_dump);
- gxp_dma_free_coherent(gxp, NULL, DEBUG_DUMP_MEMORY_SIZE, mgr->buf.vaddr,
- mgr->buf.daddr);
+ gxp_dma_free_coherent_buf(gxp, NULL, &mgr->buf);
mutex_destroy(&mgr->debug_dump_lock);
devm_kfree(mgr->gxp->dev, mgr);
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index a3d2f34..2a5d1ce 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -16,6 +16,7 @@
#include <linux/platform_data/sscoredump.h>
#endif
+#include "gxp-dma.h"
#include "gxp-internal.h"
#define GXP_NUM_COMMON_SEGMENTS 2
@@ -173,15 +174,9 @@ struct gxp_debug_dump_work {
uint core_id;
};
-struct gxp_debug_dump_buffer {
- void *vaddr;
- dma_addr_t daddr;
- u32 size;
-};
-
struct gxp_debug_dump_manager {
struct gxp_dev *gxp;
- struct gxp_debug_dump_buffer buf;
+ struct gxp_coherent_buf buf; /* Buffer holding debug dump data */
struct gxp_debug_dump_work debug_dump_works[GXP_NUM_CORES];
struct gxp_core_dump *core_dump; /* start of the core dump */
struct gxp_common_dump *common_dump;
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 435bdb5..09d8d36 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -8,8 +8,10 @@
#include <linux/acpm_dvfs.h>
#include "gxp-client.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
+#include "gxp-dma.h"
#include "gxp-firmware-data.h"
#include "gxp-firmware.h"
#include "gxp-internal.h"
@@ -17,7 +19,6 @@
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
#include "gxp.h"
@@ -303,25 +304,24 @@ static int gxp_log_buff_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
int i;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
u64 *ptr;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data->buffers;
for (i = 0; i < GXP_NUM_CORES; i++) {
- ptr = buffers[i];
+ ptr = buffers[i].vaddr;
*ptr = val;
}
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -329,22 +329,21 @@ static int gxp_log_buff_set(void *data, u64 val)
static int gxp_log_buff_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data->buffers;
- *val = *buffers[0];
+ *val = *(u64 *)(buffers[0].vaddr);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -357,17 +356,17 @@ static int gxp_log_eventfd_signal_set(void *data, u64 val)
struct gxp_dev *gxp = (struct gxp_dev *)data;
int ret = 0;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_efd) {
+ if (!gxp->core_telemetry_mgr->logging_efd) {
ret = -ENODEV;
goto out;
}
- ret = eventfd_signal(gxp->telemetry_mgr->logging_efd, 1);
+ ret = eventfd_signal(gxp->core_telemetry_mgr->logging_efd, 1);
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
@@ -471,6 +470,8 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
void gxp_remove_debugfs(struct gxp_dev *gxp)
{
+ if (IS_GXP_TEST && !gxp->d_entry)
+ return;
debugfs_remove_recursive(gxp->d_entry);
/*
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 877789d..3ce768f 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -6,6 +6,7 @@
*/
#include <linux/bits.h>
+#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
@@ -22,7 +23,7 @@
struct gxp_dma_iommu_manager {
struct gxp_dma_manager dma_mgr;
- struct iommu_domain *default_domain;
+ struct gxp_iommu_domain *default_domain;
struct gxp_ssmt ssmt;
};
@@ -138,7 +139,6 @@ static int gxp_map_core_shared_buffer(struct gxp_dev *gxp,
shared_size, IOMMU_READ | IOMMU_WRITE);
}
-
/* Reverts gxp_map_core_shared_buffer. */
static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp,
struct iommu_domain *domain)
@@ -152,6 +152,30 @@ static void gxp_unmap_core_shared_buffer(struct gxp_dev *gxp,
/* gxp-dma.h Interface */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain)
+{
+ return iommu_aux_get_pasid(gdomain->domain, gxp->dev);
+}
+
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp)
+{
+ struct gxp_iommu_domain *gdomain = gxp->default_domain;
+
+ if (IS_ERR_OR_NULL(gdomain)) {
+ gdomain = devm_kzalloc(gxp->dev, sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain)
+ return ERR_PTR(-ENOMEM);
+ gdomain->domain = iommu_get_domain_for_dev(gxp->dev);
+ if (!gdomain->domain) {
+ devm_kfree(gxp->dev, gdomain);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return gdomain;
+}
+
int gxp_dma_init(struct gxp_dev *gxp)
{
struct gxp_dma_iommu_manager *mgr;
@@ -174,10 +198,10 @@ int gxp_dma_init(struct gxp_dev *gxp)
return ret;
}
- mgr->default_domain = iommu_get_domain_for_dev(gxp->dev);
- if (!mgr->default_domain) {
+ mgr->default_domain = gxp_iommu_get_domain_for_dev(gxp);
+ if (IS_ERR(mgr->default_domain)) {
dev_err(gxp->dev, "Failed to find default IOMMU domain\n");
- return -EIO;
+ return PTR_ERR(mgr->default_domain);
}
if (iommu_register_device_fault_handler(gxp->dev, sysmmu_fault_handler,
@@ -192,6 +216,15 @@ int gxp_dma_init(struct gxp_dev *gxp)
goto err_unreg_fault_handler;
}
+#if IS_ENABLED(CONFIG_ANDROID)
+ /* Enable best fit algorithm to minimize fragmentation */
+ ret = iommu_dma_enable_best_fit_algo(gxp->dev);
+ if (ret)
+ dev_warn(gxp->dev,
+ "Failed to enable best-fit IOVA allocator (%d)\n",
+ ret);
+#endif
+
gxp->dma_mgr = &(mgr->dma_mgr);
return 0;
@@ -211,9 +244,9 @@ void gxp_dma_exit(struct gxp_dev *gxp)
"Failed to unregister SysMMU fault handler\n");
}
-#define SYNC_BARRIERS_SIZE 0x100000
+#define SYNC_BARRIERS_SIZE 0x100000
#define SYNC_BARRIERS_TOP_OFFSET 0x100000
-#define EXT_TPU_MBX_SIZE 0x2000
+#define EXT_TPU_MBX_SIZE 0x2000
void gxp_dma_init_default_resources(struct gxp_dev *gxp)
{
@@ -229,29 +262,32 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp)
}
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct iommu_domain *domain, uint core_list)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
int ret;
- ret = iommu_aux_attach_device(domain, gxp->dev);
+ ret = iommu_aux_attach_device(gdomain->domain, gxp->dev);
if (ret)
goto out;
- gxp_dma_ssmt_program(gxp, domain, core_list);
+ gxp_dma_ssmt_program(gxp, gdomain->domain, core_list);
out:
return ret;
}
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct iommu_domain *domain)
+ struct gxp_iommu_domain *gdomain)
{
- iommu_aux_detach_device(domain, gxp->dev);
+ iommu_aux_detach_device(gdomain->domain, gxp->dev);
}
-int gxp_dma_map_core_resources(struct gxp_dev *gxp, struct iommu_domain *domain,
- uint core_list, u8 slice_index)
+int gxp_dma_map_core_resources(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index)
{
int ret;
uint i;
+ struct iommu_domain *domain = gdomain->domain;
ret = iommu_map(domain, gxp->regs.daddr, gxp->regs.paddr,
gxp->regs.size, IOMMU_READ | IOMMU_WRITE);
@@ -314,14 +350,16 @@ err:
* Any resource that hadn't been mapped yet will cause `iommu_unmap()`
* to return immediately, so its safe to try to unmap everything.
*/
- gxp_dma_unmap_core_resources(gxp, domain, core_list);
+ gxp_dma_unmap_core_resources(gxp, gdomain, core_list);
return ret;
}
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct iommu_domain *domain, uint core_list)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
uint i;
+ struct iommu_domain *domain = gdomain->domain;
/* Only unmap the TPU mailboxes if they were found on probe */
if (gxp->tpu_dev.mbx_paddr) {
@@ -351,10 +389,9 @@ void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
iommu_unmap(domain, gxp->regs.daddr, gxp->regs.size);
}
-static inline struct sg_table *
-alloc_sgt_for_buffer(void *ptr, size_t size,
- struct iommu_domain *domain,
- dma_addr_t daddr)
+static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size,
+ struct iommu_domain *domain,
+ dma_addr_t daddr)
{
struct sg_table *sgt;
ulong offset;
@@ -392,7 +429,7 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
*/
size_in_page = size > (PAGE_SIZE - offset_in_page(ptr)) ?
PAGE_SIZE - offset_in_page(ptr) :
- size;
+ size;
page = phys_to_page(iommu_iova_to_phys(domain, daddr));
sg_set_page(next, page, size_in_page, offset_in_page(ptr));
size -= size_in_page;
@@ -420,9 +457,10 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
return sgt;
}
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct iommu_domain *domain,
- uint core_list,
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
uint orig_core_list = core_list;
@@ -430,6 +468,7 @@ int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct iommu_domain *domain,
int core;
int ret;
int i = 0;
+ struct iommu_domain *domain = gdomain->domain;
while (core_list) {
phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
@@ -465,12 +504,13 @@ error:
}
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct iommu_domain *domain,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc)
{
uint core_list = mbx_desc.phys_core_list;
u64 queue_iova;
int core;
+ struct iommu_domain *domain = gdomain->domain;
while (core_list) {
core = ffs(core_list) - 1;
@@ -481,11 +521,11 @@ void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
mbx_desc.respq_size);
}
}
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct iommu_domain *domain,
- size_t size, dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags)
{
struct gxp_dma_iommu_manager *mgr = container_of(
@@ -493,9 +533,12 @@ int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
struct sg_table *sgt;
ssize_t size_mapped;
int ret = 0;
+ size_t size;
+ struct iommu_domain *domain = gdomain->domain;
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
- sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle);
+ size = buf->size;
+ sgt = alloc_sgt_for_buffer(buf->vaddr, buf->size,
+ mgr->default_domain->domain, buf->dma_addr);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to allocate sgt for coherent buffer\n");
@@ -507,10 +550,9 @@ int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
- size_mapped =
- (ssize_t)iommu_map_sg(domain, dma_handle,
- sgt->sgl, sgt->orig_nents,
- IOMMU_READ | IOMMU_WRITE);
+ size_mapped = (ssize_t)iommu_map_sg(domain, buf->dma_addr, sgt->sgl,
+ sgt->orig_nents,
+ IOMMU_READ | IOMMU_WRITE);
if (size_mapped != size)
ret = size_mapped < 0 ? -EINVAL : (int)size_mapped;
@@ -519,9 +561,10 @@ int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
return ret;
}
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
- size_t size, dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags)
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer)
{
void *buf;
dma_addr_t daddr;
@@ -533,42 +576,47 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
if (!buf) {
dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
- return NULL;
+ return -ENOMEM;
}
- if (domain != NULL) {
+
+ buffer->vaddr = buf;
+ buffer->size = size;
+ buffer->dma_addr = daddr;
+
+ if (gdomain != NULL) {
ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, buf, domain, size, daddr, gxp_dma_flags);
+ gxp, buffer, gdomain, gxp_dma_flags);
if (ret) {
+ buffer->vaddr = NULL;
+ buffer->size = 0;
dma_free_coherent(gxp->dev, size, buf, daddr);
- return NULL;
+ return ret;
}
}
- if (dma_handle)
- *dma_handle = daddr;
+ buffer->dsp_addr = daddr;
- return buf;
+ return 0;
}
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct iommu_domain *domain,
- size_t size, dma_addr_t dma_handle)
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
- if (size != iommu_unmap(domain, dma_handle, size))
+ if (buf->size != iommu_unmap(gdomain->domain, buf->dma_addr, buf->size))
dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
}
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
- size_t size, void *cpu_addr, dma_addr_t dma_handle)
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- if (domain != NULL)
- gxp_dma_unmap_allocated_coherent_buffer(gxp, domain, size,
- dma_handle);
- dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
+ if (gdomain != NULL)
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, gdomain, buf);
+ dma_free_coherent(gxp->dev, buf->size, buf->vaddr, buf->dma_addr);
}
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags)
@@ -589,7 +637,8 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
- size_mapped = (ssize_t)iommu_map_sg(domain, daddr, sg, nents, prot);
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, daddr, sg, nents, prot);
if (size_mapped <= 0)
goto err;
@@ -600,7 +649,7 @@ err:
return 0;
}
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
@@ -611,7 +660,7 @@ void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
for_each_sg (sg, s, nents, i)
size += sg_dma_len(s);
- if (!iommu_unmap(domain, sg_dma_address(sg), size))
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sg), size))
dev_warn(gxp->dev, "Failed to unmap sg\n");
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
@@ -632,7 +681,8 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
}
struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, struct iommu_domain *domain,
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
@@ -655,8 +705,9 @@ gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, struct iommu_domain *domain,
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
- size_mapped = (ssize_t)iommu_map_sg(domain, sg_dma_address(sgt->sgl),
- sgt->sgl, sgt->orig_nents, prot);
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, sg_dma_address(sgt->sgl),
+ sgt->sgl, sgt->orig_nents, prot);
if (size_mapped <= 0) {
dev_err(gxp->dev, "Failed to map dma-buf: %ld\n", size_mapped);
/*
@@ -676,7 +727,7 @@ err:
}
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct iommu_domain *domain,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
@@ -689,7 +740,7 @@ void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
for_each_sg (sgt->sgl, s, sgt->nents, i)
size += sg_dma_len(s);
- if (!iommu_unmap(domain, sg_dma_address(sgt->sgl), size))
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sgt->sgl), size))
dev_warn(gxp->dev, "Failed to unmap dma-buf\n");
/* Unmap the attachment from the default domain */
diff --git a/gxp-dma.h b/gxp-dma.h
index 11c8405..7b33121 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -12,12 +12,31 @@
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/types.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
#include <soc/google/tpu-ext.h>
#endif
#include "gxp-internal.h"
+struct gxp_iommu_domain {
+ struct iommu_domain *domain;
+ uint ctx_id;
+};
+
+struct gxp_coherent_buf {
+ void *vaddr; /* kernel VA, no allocation if NULL */
+ /* TODO(b/249030390): Use standard DMA-IOMMU APIs returned address */
+ dma_addr_t dma_addr; /* DMA handle obtained from DMA-IOMMU APIs. */
+ /*
+ * IOVA to be accessed by the device. Equal to @dma_addr when there is
+ * no self-managed IOMMU.
+ */
+ dma_addr_t dsp_addr;
+ u64 phys_addr; /* physical address, if available */
+ size_t size;
+};
+
struct gxp_dma_manager {
struct rb_root mapping_tree;
};
@@ -55,23 +74,24 @@ void gxp_dma_exit(struct gxp_dev *gxp);
* gxp_dma_domain_attach_device() - Attach the page table to the device and
* perform necessary initialization.
* @gxp: The GXP device to attach
- * @domain: The IOMMU domain to be attached.
+ * @gdomain: The IOMMU domain to be attached.
* @core_list: The physical cores to attach.
*
* Caller ensures a BLOCK wakelock is hold for the iommu attaching.
*/
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct iommu_domain *domain, uint core_list);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
/**
* gxp_dma_domain_detach_device() - Detach the page table from the device.
* @gxp: The GXP device to detach
- * @domain: The IOMMU domain to be detached
+ * @gdomain: The IOMMU domain to be detached
*
* Caller ensures a BLOCK wakelock is hold for the iommu detaching.
*/
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct iommu_domain *domain);
+ struct gxp_iommu_domain *gdomain);
/**
* gxp_dma_init_default_resources() - Set the various buffers/registers with
@@ -87,7 +107,7 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
* gxp_dma_map_core_resources() - Map the various buffers/registers with
* fixed IOVAs on the IOMMU domain.
* @gxp: The GXP device to set up the mappings for
- * @domain: The IOMMU domain to be mapped on
+ * @gdomain: The IOMMU domain to be mapped on
* @core_list: The physical cores that may use the domain
* @slice_index: The index of slice of shared buffer to be mapped
*
@@ -103,27 +123,30 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
* * 0 - Mappings created successfully
* * -EIO - Failed to create one or more of the mappings
*/
-int gxp_dma_map_core_resources(struct gxp_dev *gxp, struct iommu_domain *domain,
- uint core_list, u8 slice_index);
+int gxp_dma_map_core_resources(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index);
/**
* gxp_dma_unmap_core_resources() - Unmap the IOVAs mapped by
* gxp_dma_map_core_resources()
* @gxp: The GXP device that was passed to gxp_dma_map_core_resources()
- * @domain: The IOMMU domain to be unmapped
+ * @gdomain: The IOMMU domain to be unmapped
* @core_list: The physical cores the IOVAs were mapped for
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function releases all those mappings.
*/
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct iommu_domain *domain, uint core_list);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
/**
* gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
* @gxp: The GXP device to set up the mappings for
- * @domain: The IOMMU domain to be mapped on
+ * @gdomain: The IOMMU domain to be mapped on
* @core_list: A bitfield enumerating the physical cores the mapping is for
* @mbx_info: Structure holding TPU-DSP mailbox queue buffer information
*
@@ -131,69 +154,67 @@ void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
* * 0 - Mappings created successfully
* * -EIO - Failed to create the mappings
*/
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct iommu_domain *domain,
- uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info);
/**
* gxp_dma_unmap_tpu_buffer() - Unmap IOVAs mapped by gxp_dma_map_tpu_buffer()
* @gxp: The GXP device that was passed to gxp_dma_map_tpu_buffer()
- * @domain: The IOMMU domain the mappings were mapped on
+ * @gdomain: The IOMMU domain the mappings were mapped on
* @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes.
*/
-void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp, struct iommu_domain *domain,
+void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc);
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
/**
* gxp_dma_map_allocated_coherent_buffer() - Map a coherent buffer
* @gxp: The GXP device to map the allocated buffer for
- * @domain: The IOMMU domain to be mapped on
- * @size: The size of the allocated buffer, in bytes
- * @dma_handle: The allocated device IOVA
+ * @buf: The coherent buffer
+ * @gdomain: The IOMMU domain to be mapped on
* @gxp_dma_flags: The type of mapping to create; currently unused
*
- * Return: Kernel virtual address of the mapped buffer
+ * Return: 0 on success else error code
*/
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct iommu_domain *domain,
- size_t size, dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_allocated_coherent_buffer() - Unmap a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @domain: The IOMMU domain the mapping was mapped
- * @size: The size of the buffer, in bytes
- * @dma_handle: The device IOVA
+ * @gdomain: The IOMMU domain the mapping was mapped
+ * @buf: The coherent buffer
*/
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct iommu_domain *domain,
- size_t size,
- dma_addr_t dma_handle);
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
* @gxp: The GXP device to map the allocated buffer for
- * @domain: The IOMMU domain the mapping to be mapped on
+ * @gdomain: The IOMMU domain the mapping to be mapped on
* @size: The size of the buffer to be allocated, in bytes
- * @dma_handle: Reference to a variable to be set to the allocated IOVA
* @flag: The type of memory to allocate (see kmalloc)
* @gxp_dma_flags: The type of mapping to create; Currently unused
+ * @buffer: The coherent buffer
*
- * Return: Kernel virtual address of the allocated/mapped buffer
+ * Return: 0 on success else error code
*
* If the passed @domain is a null pointer, this function will only allocate a
* buffer but not map it to the domain.
+ * Note: Allocated buffers size may be larger than the requested size.
*/
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
- size_t size, dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags);
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer);
/**
* gxp_dma_free_coherent() - Unmap and free a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @domain: The IOMMU domain the mapping was mapped to
- * @size: The size of the buffer, in bytes, passed to `gxp_dma_alloc_coherent()`
- * @cpu_addr: The kernel virtual address returned by `gxp_dma_alloc_coherent()`
- * @dma_handle: The device IOVA, set by `gxp_dma_alloc_coherent()`
+ * @gdomain: The IOMMU domain the mapping was mapped to
+ * @buf: The coherent buffer
*
* If the buffer is mapped via `gxp_dma_map_allocated_coherent_buffer`, the
* caller must call `gxp_dma_unmap_allocated_coherent_buffer` to unmap before
@@ -202,13 +223,14 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
* If the passed @domain is a null pointer, this function will only free the
* buffer but not do any unmapping.
*/
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
- size_t size, void *cpu_addr, dma_addr_t dma_handle);
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_map_sg() - Create a mapping for a scatter-gather list
* @gxp: The GXP device to map the scatter-gather list for
- * @domain: The IOMMU domain to be mapped
+ * @gdomain: The IOMMU domain to be mapped
* @sg: The scatter-gather list of the buffer to be mapped
* @nents: The number of entries in @sg
* @direction: DMA direction
@@ -217,21 +239,21 @@ void gxp_dma_free_coherent(struct gxp_dev *gxp, struct iommu_domain *domain,
*
* Return: The number of scatter-gather entries mapped to
*/
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_sg() - Unmap a scatter-gather list
* @gxp: The GXP device the scatter-gather list was mapped for
- * @domain: The IOMMU domain mapping was mapped on
+ * @gdomain: The IOMMU domain mapping was mapped on
* @sg: The scatter-gather list to unmap; The same one passed to
* `gxp_dma_map_sg()`
* @nents: The number of entries in @sg; Same value passed to `gxp_dma_map_sg()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_sg()`
* @attrs: The same set of flags used by the base DMA API
*/
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct iommu_domain *domain,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs);
@@ -257,7 +279,7 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
/**
* gxp_dma_map_dmabuf_attachment() - Create a mapping for a dma-buf
* @gxp: The GXP device to map the dma-buf for
- * @domain: The IOMMU domain the dma-buf to be mapped on
+ * @gdomain: The IOMMU domain the dma-buf to be mapped on
* @attachment: An attachment, representing the dma-buf, obtained from
* `dma_buf_attach()`
* @direction: DMA direction
@@ -266,14 +288,15 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
* into the default IOMMU domain. Returns ERR_PTR on failure.
*/
struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, struct iommu_domain *domain,
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
enum dma_data_direction direction);
/**
* gxp_dma_unmap_dmabuf_attachment() - Unmap a dma-buf
* @gxp: The GXP device the dma-buf was mapped for
- * @domain: The IOMMU domain the buffer was mapped on
+ * @gdomain: The IOMMU domain the buffer was mapped on
* @attachment: The attachment, representing the dma-buf, that was passed to
* `gxp_dma_map_dmabuf_attachment()` to create the mapping
* @sgt: The scatter-gather table returned by `gxp_dma_map_dmabuf_attachment()`
@@ -281,9 +304,26 @@ gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, struct iommu_domain *domain,
* @direction: DMA direction
*/
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct iommu_domain *domain,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction);
+/**
+ * gxp_iommu_get_domain_for_dev() - Get default domain
+ * @gxp: The GXP device to get the default domain for
+ *
+ * Return: Domain embedding default IOMMU domain information.
+ */
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp);
+
+/**
+ * gxp_iommu_aux_get_pasid() - Get PASID corresponding to gdomain
+ * @gxp: The GXP device attached to IOMMU
+ * @gdomain: The IOMMU domain to get the PASID for
+ *
+ * Return: PASID of the passed domain
+ */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain);
#endif /* __GXP_DMA_H__ */
diff --git a/gxp-dmabuf.c b/gxp-dmabuf.c
index b68df85..db20c07 100644
--- a/gxp-dmabuf.c
+++ b/gxp-dmabuf.c
@@ -50,7 +50,7 @@ static void destroy_dmabuf_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct iommu_domain *domain, int fd,
+ struct gxp_iommu_domain *domain, int fd,
u32 flags, enum dma_data_direction dir)
{
struct dma_buf *dmabuf;
diff --git a/gxp-dmabuf.h b/gxp-dmabuf.h
index 8600c0e..8e1e056 100644
--- a/gxp-dmabuf.h
+++ b/gxp-dmabuf.h
@@ -27,7 +27,7 @@
* mapping of the dma-buf. Returns ERR_PTR on failure.
*/
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct iommu_domain *domain, int fd,
+ struct gxp_iommu_domain *domain, int fd,
u32 flags, enum dma_data_direction dir);
#endif /* __GXP_DMABUF_H__ */
diff --git a/gxp-domain-pool.c b/gxp-domain-pool.c
index 4fddd9b..ada1775 100644
--- a/gxp-domain-pool.c
+++ b/gxp-domain-pool.c
@@ -9,14 +9,40 @@
#include <linux/iommu.h>
#include <linux/slab.h>
+#include "gxp-dma.h"
#include "gxp-domain-pool.h"
#include "gxp-internal.h"
+static struct gxp_iommu_domain *gxp_domain_alloc(struct gxp_dev *gxp)
+{
+ struct iommu_domain *domain;
+ struct gxp_iommu_domain *gdomain;
+
+ gdomain = kmalloc(sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain)
+ return ERR_PTR(-ENOMEM);
+
+ domain = iommu_domain_alloc(gxp->dev->bus);
+ if (!domain) {
+ kfree(gdomain);
+ return ERR_PTR(-ENOMEM);
+ }
+ gdomain->domain = domain;
+
+ return gdomain;
+}
+
+static void gxp_domain_free(struct gxp_iommu_domain *gdomain)
+{
+ iommu_domain_free(gdomain->domain);
+ kfree(gdomain);
+}
+
int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
unsigned int size)
{
unsigned int i;
- struct iommu_domain *domain;
+ struct gxp_iommu_domain *gdomain;
int __maybe_unused ret;
pool->size = size;
@@ -34,45 +60,44 @@ int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
return -ENOMEM;
}
for (i = 0; i < size; i++) {
- domain = iommu_domain_alloc(pool->gxp->dev->bus);
- if (!domain) {
+ gdomain = gxp_domain_alloc(pool->gxp);
+ if (IS_ERR(gdomain)) {
dev_err(pool->gxp->dev,
- "Failed to allocate iommu domain %d of %u\n",
+ "Failed to allocate gxp iommu domain %d of %u\n",
i + 1, size);
gxp_domain_pool_destroy(pool);
return -ENOMEM;
}
-
#if IS_ENABLED(CONFIG_GXP_GEM5)
/*
* Gem5 uses arm-smmu-v3 which requires domain finalization to do iommu map. Calling
* iommu_aux_attach_device to finalize the allocated domain and detach the device
* right after that.
*/
- ret = iommu_aux_attach_device(domain, pool->gxp->dev);
+ ret = iommu_aux_attach_device(gdomain->domain, pool->gxp->dev);
if (ret) {
- dev_err(pool->gxp->dev,
+ dev_err(gxp->dev,
"Failed to attach device to iommu domain %d of %u, ret=%d\n",
i + 1, size, ret);
- iommu_domain_free(domain);
+ gxp_domain_free(gdomain);
gxp_domain_pool_destroy(pool);
return ret;
}
- iommu_aux_detach_device(domain, pool->gxp->dev);
+ iommu_aux_detach_device(gdomain->domain, pool->gxp->dev);
#endif /* CONFIG_GXP_GEM5 */
- pool->array[i] = domain;
+ pool->array[i] = gdomain;
}
return 0;
}
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
{
int id;
if (!pool->size)
- return iommu_domain_alloc(pool->gxp->dev->bus);
+ return gxp_domain_alloc(pool->gxp);
id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
@@ -88,16 +113,16 @@ struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
return pool->array[id];
}
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain)
+void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct gxp_iommu_domain *gdomain)
{
int id;
if (!pool->size) {
- iommu_domain_free(domain);
+ gxp_domain_free(gdomain);
return;
}
for (id = 0; id < pool->size; id++) {
- if (pool->array[id] == domain) {
+ if (pool->array[id] == gdomain) {
dev_dbg(pool->gxp->dev, "Released domain from pool with id = %d\n", id);
ida_free(&pool->idp, id);
return;
@@ -117,7 +142,7 @@ void gxp_domain_pool_destroy(struct gxp_domain_pool *pool)
for (i = 0; i < pool->size; i++) {
if (pool->array[i])
- iommu_domain_free(pool->array[i]);
+ gxp_domain_free(pool->array[i]);
}
ida_destroy(&pool->idp);
diff --git a/gxp-domain-pool.h b/gxp-domain-pool.h
index ee95155..2a262ff 100644
--- a/gxp-domain-pool.h
+++ b/gxp-domain-pool.h
@@ -20,7 +20,7 @@ struct gxp_domain_pool {
* dynamic domain allocation using the IOMMU API directly.
*/
unsigned int size;
- struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
+ struct gxp_iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
struct gxp_dev *gxp; /* The gxp device used for logging warnings/errors. */
};
@@ -42,10 +42,10 @@ int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
* Allocates a domain from the pool
* returns NULL on error.
*/
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool);
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool);
/* Releases a domain from the pool. */
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain);
+void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct gxp_iommu_domain *domain);
/* Cleans up all resources used by the domain pool. */
void gxp_domain_pool_destroy(struct gxp_domain_pool *pool);
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index bb13d89..6f22f8d 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -87,7 +87,7 @@ struct gxp_fw_data_manager {
struct fw_memory_allocator *allocator;
struct fw_memory sys_desc_mem;
struct fw_memory wdog_mem;
- struct fw_memory telemetry_mem;
+ struct fw_memory core_telemetry_mem;
struct fw_memory debug_dump_mem;
};
@@ -266,18 +266,18 @@ static struct fw_memory init_watchdog(struct gxp_fw_data_manager *mgr)
return mem;
}
-static struct fw_memory init_telemetry(struct gxp_fw_data_manager *mgr)
+static struct fw_memory init_core_telemetry(struct gxp_fw_data_manager *mgr)
{
- struct gxp_telemetry_descriptor *tel_region;
+ struct gxp_core_telemetry_descriptor *tel_region;
struct fw_memory mem;
mem_alloc_allocate(mgr->allocator, &mem, sizeof(*tel_region),
- __alignof__(struct gxp_telemetry_descriptor));
+ __alignof__(struct gxp_core_telemetry_descriptor));
tel_region = mem.host_addr;
/*
- * Telemetry is disabled for now.
+ * Core telemetry is disabled for now.
* Subsuequent calls to the FW data module can be used to populate or
* depopulate the descriptor pointers on demand.
*/
@@ -292,7 +292,7 @@ static struct fw_memory init_debug_dump(struct gxp_dev *gxp)
if (gxp->debug_dump_mgr) {
mem.host_addr = gxp->debug_dump_mgr->buf.vaddr;
- mem.device_addr = gxp->debug_dump_mgr->buf.daddr;
+ mem.device_addr = gxp->debug_dump_mgr->buf.dsp_addr;
mem.sz = gxp->debug_dump_mgr->buf.size;
} else {
mem.host_addr = 0;
@@ -589,9 +589,10 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
mgr->wdog_mem = init_watchdog(mgr);
mgr->system_desc->watchdog_dev_addr = mgr->wdog_mem.device_addr;
- /* Allocate the descriptor for device-side telemetry */
- mgr->telemetry_mem = init_telemetry(mgr);
- mgr->system_desc->telemetry_dev_addr = mgr->telemetry_mem.device_addr;
+ /* Allocate the descriptor for device-side core telemetry */
+ mgr->core_telemetry_mem = init_core_telemetry(mgr);
+ mgr->system_desc->core_telemetry_dev_addr =
+ mgr->core_telemetry_mem.device_addr;
/* Set the debug dump region parameters if available */
mgr->debug_dump_mem = init_debug_dump(gxp);
@@ -710,7 +711,7 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
if (!mgr)
return;
- mem_alloc_free(mgr->allocator, &mgr->telemetry_mem);
+ mem_alloc_free(mgr->allocator, &mgr->core_telemetry_mem);
mem_alloc_free(mgr->allocator, &mgr->wdog_mem);
mem_alloc_free(mgr->allocator, &mgr->sys_desc_mem);
mem_alloc_destroy(mgr->allocator);
@@ -730,15 +731,16 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
}
}
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size)
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
- struct telemetry_descriptor *core_descriptors;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ gxp->data_mgr->core_telemetry_mem.host_addr;
+ struct core_telemetry_descriptor *core_descriptors;
uint core;
+ bool enable;
if (type == GXP_TELEMETRY_TYPE_LOGGING)
core_descriptors = descriptor->per_core_loggers;
@@ -747,26 +749,37 @@ int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
else
return -EINVAL;
- /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (buffer_addrs[core] > U32_MAX)
- return -EINVAL;
- }
+ enable = (host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED);
+
+ if (enable) {
+ /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (buffers && buffers[core].dsp_addr > U32_MAX &&
+ buffers[core].size == per_buffer_size)
+ return -EINVAL;
+ }
- for (core = 0; core < GXP_NUM_CORES; core++) {
- core_descriptors[core].host_status = host_status;
- core_descriptors[core].buffer_addr = (u32)buffer_addrs[core];
- core_descriptors[core].buffer_size = per_buffer_size;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = (u32)buffers[core].dsp_addr;
+ core_descriptors[core].buffer_size = per_buffer_size;
+ }
+ } else {
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = 0;
+ core_descriptors[core].buffer_size = 0;
+ }
}
return 0;
}
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type)
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ gxp->data_mgr->core_telemetry_mem.host_addr;
if (core >= GXP_NUM_CORES)
return 0;
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index e9851ed..a947cb8 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -9,6 +9,7 @@
#ifndef __GXP_FIRMWARE_DATA_H__
#define __GXP_FIRMWARE_DATA_H__
+#include "gxp-dma.h"
#include "gxp-internal.h"
/**
@@ -54,42 +55,43 @@ void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application);
void gxp_fw_data_destroy(struct gxp_dev *gxp);
/**
- * gxp_fw_data_set_telemetry_descriptors() - Set new logging or tracing buffers
- * for firmware to write to.
+ * gxp_fw_data_set_core_telemetry_descriptors() - Set new logging or tracing
+ * buffers for firmware to write
+ * to.
* @gxp: The GXP device to set buffer descriptors for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @host_status: Bitfield describing the host's telemetry status. See the
+ * @host_status: Bitfield describing the host's core telemetry status. See the
* bit definitions in gxp-host-device-structs.h.
- * @buffer_addrs: An array containing the IOVA each physical core can access
- * its logging or tracing buffer at
+ * @buffers: An array of coherent buffers for logging and tracing
* @per_buffer_size: The size of each core's logging or tracing buffer in bytes
*
* `gxp_fw_data_init()` must have been called before this function.
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return:
* 0 - Success
* -EINVAL - Invalid @type provided or @buffer_addrs are not addressable by @gxp
*/
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size);
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size);
/**
- * gxp_fw_data_get_telemetry_device_status() - Returns a bitfield describing a
- * core's telemetry status.
- * @gxp: The GXP device to get device telemetry status for
- * @core: The core in @gxp to get the device telemetry status for
+ * gxp_fw_data_get_core_telemetry_device_status() - Returns a bitfield
+ * describing a core's
+ * telemetry status.
+ * @gxp: The GXP device to get core telemetry status for
+ * @core: The core in @gxp to get the core telemetry status for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return: The bitfield describing @core's telemetry status. If @core or @type
* are invalid, the result will always be 0.
*/
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type);
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type);
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index e9790d2..2ceac9e 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -18,6 +18,7 @@
#include "gxp-bpm.h"
#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-doorbell.h"
#include "gxp-firmware.h"
@@ -27,7 +28,6 @@
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
#if IS_ENABLED(CONFIG_GXP_TEST)
@@ -642,6 +642,9 @@ void gxp_fw_destroy(struct gxp_dev *gxp)
uint core;
struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ if (IS_GXP_TEST && !mgr)
+ return;
+
device_remove_group(gxp->dev, &gxp_firmware_attr_group);
for (core = 0; core < GXP_NUM_CORES; core++) {
@@ -699,6 +702,24 @@ err_authenticate_firmware:
return ret;
}
+/* TODO(b/253464747): Refactor these interrupts handlers and gxp-doorbell.c. */
+static void enable_core_interrupts(struct gxp_dev *gxp, uint core)
+{
+ /*
+ * GXP_CORE_REG_COMMON_INT_MASK_0 is handled in doorbell module, so we
+ * don't need to enable it here.
+ */
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0xffffffff);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0xffffffff);
+}
+
+static void disable_core_interrupts(struct gxp_dev *gxp, uint core)
+{
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0);
+}
+
static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
{
int ret = 0;
@@ -724,8 +745,10 @@ static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
gxp_firmware_unload(gxp, core);
+ return ret;
}
+ enable_core_interrupts(gxp, core);
return ret;
}
@@ -782,10 +805,10 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
gxp_notification_register_handler(
gxp, core, HOST_NOTIF_DEBUG_DUMP_READY, work);
- work = gxp_telemetry_get_notification_handler(gxp, core);
+ work = gxp_core_telemetry_get_notification_handler(gxp, core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_TELEMETRY_STATUS, work);
+ gxp, core, HOST_NOTIF_CORE_TELEMETRY_STATUS, work);
mgr->firmware_running |= BIT(core);
@@ -810,7 +833,7 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
gxp_notification_unregister_handler(gxp, core,
HOST_NOTIF_DEBUG_DUMP_READY);
gxp_notification_unregister_handler(gxp, core,
- HOST_NOTIF_TELEMETRY_STATUS);
+ HOST_NOTIF_CORE_TELEMETRY_STATUS);
if (gxp->mailbox_mgr->release_mailbox) {
gxp->mailbox_mgr->release_mailbox(
@@ -819,8 +842,14 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
dev_notice(gxp->dev, "Mailbox %u released\n", core);
}
- if (vd->state == GXP_VD_RUNNING)
+ if (vd->state == GXP_VD_RUNNING) {
+ /*
+ * Disable interrupts to prevent cores from being woken up
+ * unexpectedly.
+ */
+ disable_core_interrupts(gxp, core);
gxp_pm_core_off(gxp, core);
+ }
gxp_firmware_unload(gxp, core);
}
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index 8e4723c..1c993f1 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -17,13 +17,13 @@
#define MAX_NUM_CORES 4
#define NUM_SYSTEM_SEMAPHORES 64
-/* Bit masks for the status fields in the telemetry structures. */
-/* The telemetry buffers have been setup by the host. */
-#define GXP_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
-/* The telemetry buffers are being used by the device. */
-#define GXP_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
+/* Bit masks for the status fields in the core telemetry structures. */
+/* The core telemetry buffers have been setup by the host. */
+#define GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
+/* The core telemetry buffers are being used by the device. */
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
/* There was an attempt to use the buffers but their content was invalid. */
-#define GXP_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
/* Definitions for host->device boot mode requests */
/*
@@ -130,20 +130,20 @@ struct gxp_watchdog_descriptor {
};
/*
- * A structure describing the telemetry (logging and tracing) parameters and
- * buffers.
+ * A structure describing the core telemetry (logging and tracing) parameters
+ * and buffers.
*/
-struct gxp_telemetry_descriptor {
- /* A struct for describing the parameters for telemetry buffers */
- struct telemetry_descriptor {
+struct gxp_core_telemetry_descriptor {
+ /* A struct for describing the parameters for core telemetry buffers. */
+ struct core_telemetry_descriptor {
/*
- * The telemetry status from the host's point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the host's point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t host_status;
/*
- * The telemetry status from the device point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the device point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t device_status;
/*
@@ -263,8 +263,8 @@ struct gxp_system_descriptor {
uint32_t app_descriptor_dev_addr[MAX_NUM_CORES];
/* A device address for the watchdog descriptor. */
uint32_t watchdog_dev_addr;
- /* A device address for the telemetry descriptor */
- uint32_t telemetry_dev_addr;
+ /* A device address for the core telemetry descriptor */
+ uint32_t core_telemetry_dev_addr;
/* A device address for the common debug dump region */
uint32_t debug_dump_dev_addr;
};
diff --git a/gxp-internal.h b/gxp-internal.h
index f7b40a6..0b18c1e 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -24,6 +24,15 @@
#include "gxp-config.h"
+#define IS_GXP_TEST IS_ENABLED(CONFIG_GXP_TEST)
+
+enum gxp_chip_revision {
+ GXP_CHIP_A0,
+ GXP_CHIP_B0,
+ /* used when the revision is not explicitly specified */
+ GXP_CHIP_ANY,
+};
+
/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
uint phys_core_list;
@@ -52,11 +61,12 @@ struct gxp_domain_pool;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
-struct gxp_telemetry_manager;
+struct gxp_core_telemetry_manager;
struct gxp_thermal_manager;
struct gxp_wakelock_manager;
struct gxp_usage_stats;
struct gxp_power_states;
+struct gxp_iommu_domain;
struct gxp_dev {
struct device *dev; /* platform bus device */
@@ -95,8 +105,9 @@ struct gxp_dev {
struct gxp_dma_manager *dma_mgr;
struct gxp_fw_data_manager *data_mgr;
struct gxp_tpu_dev tpu_dev;
- struct gxp_telemetry_manager *telemetry_mgr;
+ struct gxp_core_telemetry_manager *core_telemetry_mgr;
struct gxp_wakelock_manager *wakelock_mgr;
+ struct gxp_iommu_domain *default_domain;
/*
* Pointer to GSA device for firmware authentication.
* May be NULL if the chip does not support firmware authentication
@@ -164,6 +175,14 @@ struct gxp_dev {
*/
long (*handle_ioctl)(struct file *file, uint cmd, ulong arg);
/*
+ * Device mmap handler for chip-dependent mmap calls.
+ * Should return -EOPNOTSUPP when the mmap should be handled by common
+ * device mmap handler.
+ *
+ * This callback is optional.
+ */
+ int (*handle_mmap)(struct file *file, struct vm_area_struct *vma);
+ /*
* Called for sending power states request.
*
* Return a non-zero value can fail the block wakelock acquisition.
@@ -206,6 +225,25 @@ struct gxp_dev {
* This callback is optional.
*/
void (*wakelock_before_blk_off)(struct gxp_dev *gxp);
+ /*
+ * Called in gxp_map_tpu_mbx_queue(), after the TPU mailbox buffers are mapped.
+ *
+ * This function is called with holding the write lock of @client->semaphore and the read
+ * lock of @gxp->vd_semaphore.
+ *
+ * This callback is optional.
+ */
+ int (*after_map_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
+ /*
+ * Called in gxp_unmap_tpu_mbx_queue(), before unmapping the TPU mailbox buffers.
+ *
+ * This function is called with holding the write lock of @client->semaphore.
+ *
+ * This callback is optional.
+ */
+ void (*before_unmap_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
};
/* GXP device IO functions */
@@ -246,8 +284,8 @@ static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
bool gxp_is_direct_mode(struct gxp_dev *gxp);
/*
- * Whether the target chip is A0.
+ * Returns the chip revision.
*/
-bool gxp_is_a0(struct gxp_dev *gxp);
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp);
#endif /* __GXP_INTERNAL_H__ */
diff --git a/gxp-kci.c b/gxp-kci.c
index 793716c..2aeacad 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <gcip/gcip-telemetry.h>
+
#include "gxp-config.h"
#include "gxp-dma.h"
#include "gxp-kci.h"
@@ -20,19 +22,6 @@
#include "gxp-pm.h"
#include "gxp-usage-stats.h"
-/* Timeout for KCI responses from the firmware (milliseconds) */
-#ifdef GXP_KCI_TIMEOUT
-
-#define KCI_TIMEOUT GXP_KCI_TIMEOUT
-
-#elif IS_ENABLED(CONFIG_GXP_TEST)
-/* fake-firmware could respond in a short time */
-#define KCI_TIMEOUT (200)
-#else
-/* 5 secs. */
-#define KCI_TIMEOUT (5000)
-#endif
-
#define GXP_MCU_USAGE_BUFFER_SIZE 4096
#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
@@ -44,52 +33,52 @@
static u32 gxp_kci_get_cmd_queue_head(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- return gxp_mailbox_read_cmd_queue_head(gkci->mailbox);
+ return gxp_mailbox_read_cmd_queue_head(mbx);
}
static u32 gxp_kci_get_cmd_queue_tail(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- return gkci->mailbox->cmd_queue_tail;
+ return mbx->cmd_queue_tail;
}
static void gxp_kci_inc_cmd_queue_tail(struct gcip_kci *kci, u32 inc)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- gxp_mailbox_inc_cmd_queue_tail_nolock(gkci->mailbox, inc,
+ gxp_mailbox_inc_cmd_queue_tail_nolock(mbx, inc,
CIRCULAR_QUEUE_WRAP_BIT);
}
static u32 gxp_kci_get_resp_queue_size(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- return gkci->mailbox->resp_queue_size;
+ return mbx->resp_queue_size;
}
static u32 gxp_kci_get_resp_queue_head(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- return gkci->mailbox->resp_queue_head;
+ return mbx->resp_queue_head;
}
static u32 gxp_kci_get_resp_queue_tail(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- return gxp_mailbox_read_resp_queue_tail(gkci->mailbox);
+ return gxp_mailbox_read_resp_queue_tail(mbx);
}
static void gxp_kci_inc_resp_queue_head(struct gcip_kci *kci, u32 inc)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
- gxp_mailbox_inc_resp_queue_head_nolock(gkci->mailbox, inc,
+ gxp_mailbox_inc_resp_queue_head_nolock(mbx, inc,
CIRCULAR_QUEUE_WRAP_BIT);
}
@@ -98,8 +87,8 @@ static void
gxp_reverse_kci_handle_response(struct gcip_kci *kci,
struct gcip_kci_response_element *resp)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
- struct gxp_dev *gxp = gkci->mailbox->gxp;
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
+ struct gxp_dev *gxp = mbx->gxp;
if (resp->code <= GCIP_RKCI_CHIP_CODE_LAST) {
/* TODO(b/239638427): Handle reverse kci */
@@ -124,7 +113,8 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci,
static int gxp_kci_update_usage_wrapper(struct gcip_kci *kci)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
+ struct gxp_kci *gkci = mbx->data;
return gxp_kci_update_usage(gkci);
}
@@ -133,10 +123,10 @@ static inline void
gxp_kci_trigger_doorbell(struct gcip_kci *kci,
enum gcip_kci_doorbell_reason reason)
{
- struct gxp_kci *gkci = gcip_kci_get_data(kci);
+ struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
/* triggers doorbell */
- gxp_mailbox_generate_device_interrupt(gkci->mailbox, BIT(0));
+ gxp_mailbox_generate_device_interrupt(mbx, BIT(0));
}
static const struct gcip_kci_ops kci_ops = {
@@ -167,8 +157,8 @@ static int gxp_kci_allocate_resources(struct gxp_mailbox *mailbox,
MBOX_CMD_QUEUE_NUM_ENTRIES);
if (ret)
goto err_cmd_queue;
- mailbox->cmd_queue = gkci->cmd_queue_mem.vaddr;
- mailbox->cmd_queue_device_addr = gkci->cmd_queue_mem.daddr;
+ mailbox->cmd_queue_buf.vaddr = gkci->cmd_queue_mem.vaddr;
+ mailbox->cmd_queue_buf.dsp_addr = gkci->cmd_queue_mem.daddr;
mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
mailbox->cmd_queue_tail = 0;
@@ -178,8 +168,8 @@ static int gxp_kci_allocate_resources(struct gxp_mailbox *mailbox,
MBOX_RESP_QUEUE_NUM_ENTRIES);
if (ret)
goto err_resp_queue;
- mailbox->resp_queue = gkci->resp_queue_mem.vaddr;
- mailbox->resp_queue_device_addr = gkci->resp_queue_mem.daddr;
+ mailbox->resp_queue_buf.vaddr = gkci->resp_queue_mem.vaddr;
+ mailbox->resp_queue_buf.dsp_addr = gkci->resp_queue_mem.daddr;
mailbox->resp_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
mailbox->resp_queue_head = 0;
@@ -189,12 +179,13 @@ static int gxp_kci_allocate_resources(struct gxp_mailbox *mailbox,
if (ret)
goto err_descriptor;
- mailbox->descriptor = gkci->descriptor_mem.vaddr;
- mailbox->descriptor_device_addr = gkci->descriptor_mem.daddr;
+ mailbox->descriptor_buf.vaddr = gkci->descriptor_mem.vaddr;
+ mailbox->descriptor_buf.dsp_addr = gkci->descriptor_mem.daddr;
+ mailbox->descriptor = (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
+ mailbox->cmd_queue_buf.dsp_addr;
mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
+ mailbox->resp_queue_buf.dsp_addr;
mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
@@ -219,76 +210,54 @@ static void gxp_kci_release_resources(struct gxp_mailbox *mailbox,
gxp_mcu_mem_free_data(gkci->mcu, &gkci->cmd_queue_mem);
}
-static int gxp_kci_init_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_kci *gkci = mailbox->data;
- struct gcip_kci_args kci_args = {
- .dev = gkci->gxp->dev,
- .cmd_queue = mailbox->cmd_queue,
- .resp_queue = mailbox->resp_queue,
- .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
- .rkci_buffer_size = REVERSE_KCI_BUFFER_SIZE,
- .timeout = KCI_TIMEOUT,
- .ops = &kci_ops,
- .data = gkci,
- };
- int ret;
-
- gkci->kci = kzalloc(sizeof(*gkci->kci), GFP_KERNEL);
- if (!gkci->kci)
- return -ENOMEM;
-
- ret = gcip_kci_init(gkci->kci, &kci_args);
- if (ret) {
- kfree(gkci->kci);
- return ret;
- }
-
- return 0;
-}
-
-static void gxp_kci_release_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_kci *gkci = mailbox->data;
-
- /* Release gcip_kci. */
- gxp_kci_cancel_work_queues(gkci);
- gcip_kci_release(gkci->kci);
- kfree(gkci->kci);
- gkci->kci = NULL;
-}
-
-static void gxp_kci_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_kci *gkci = mailbox->data;
-
- gcip_kci_handle_irq(gkci->kci);
-}
-
static struct gxp_mailbox_ops mbx_ops = {
.allocate_resources = gxp_kci_allocate_resources,
.release_resources = gxp_kci_release_resources,
- .init_consume_responses_work = gxp_kci_init_consume_responses_work,
- .release_consume_responses_work =
- gxp_kci_release_consume_responses_work,
- .consume_responses_work = gxp_kci_consume_responses_work,
+ .gcip_ops.kci = &kci_ops,
};
+/*
+ * Wrapper function of the `gxp_mailbox_send_cmd` which passes @resp as NULL.
+ *
+ * KCI sends all commands as synchronous, but the caller will not utilize the responses by passing
+ * the pointer of `struct gcip_kci_response_element` to the @resp of the `gxp_mailbox_send_cmd`
+ * function which is the simple wrapper function of the `gcip_kci_send_cmd` function.
+ *
+ * Even though the caller passes the pointer of `struct gcip_kci_response_element`, it will be
+ * ignored. The `gcip_kci_send_cmd` function creates a temporary instance of that struct internally
+ * and returns @code of the instance as its return value.
+ *
+ * If the caller needs the `struct gcip_kci_response_element` as the response, it should use the
+ * `gcip_kci_send_cmd_return_resp` function directly.
+ * (See the implementation of `gcip-kci.c`.)
+ *
+ * In some commands, such as the `fw_info` KCI command, if the firmware should have to return
+ * a response which is not fit into the `struct gcip_kci_response_element`, the caller will
+ * allocate a buffer for it to @cmd->dma and the firmware will write the response to it.
+ */
+static inline int gxp_kci_send_cmd(struct gxp_mailbox *mailbox,
+ struct gcip_kci_command_element *cmd)
+{
+ return gxp_mailbox_send_cmd(mailbox, cmd, NULL);
+}
+
int gxp_kci_init(struct gxp_mcu *mcu)
{
struct gxp_dev *gxp = mcu->gxp;
struct gxp_kci *gkci = &mcu->kci;
struct gxp_mailbox_args mbx_args = {
+ .type = GXP_MBOX_TYPE_KCI,
.ops = &mbx_ops,
+ .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
.data = gkci,
};
gkci->gxp = gxp;
gkci->mcu = mcu;
- gkci->mailbox = gxp_mailbox_alloc(gxp->mailbox_mgr, NULL, 0,
- KCI_MAILBOX_ID, &mbx_args);
- if (IS_ERR(gkci->mailbox))
- return PTR_ERR(gkci->mailbox);
+ gkci->mbx = gxp_mailbox_alloc(gxp->mailbox_mgr, NULL, 0, KCI_MAILBOX_ID,
+ &mbx_args);
+ if (IS_ERR(gkci->mbx))
+ return PTR_ERR(gkci->mbx);
return 0;
}
@@ -301,13 +270,15 @@ int gxp_kci_reinit(struct gxp_kci *gkci)
void gxp_kci_cancel_work_queues(struct gxp_kci *gkci)
{
- gcip_kci_cancel_work_queues(gkci->kci);
+ gcip_kci_cancel_work_queues(gkci->mbx->mbx_impl.gcip_kci);
}
void gxp_kci_exit(struct gxp_kci *gkci)
{
- gxp_mailbox_release(gkci->gxp->mailbox_mgr, NULL, 0, gkci->mailbox);
- gkci->mailbox = NULL;
+ if (IS_GXP_TEST && (!gkci || !gkci->mbx))
+ return;
+ gxp_mailbox_release(gkci->gxp->mailbox_mgr, NULL, 0, gkci->mbx);
+ gkci->mbx = NULL;
}
enum gcip_fw_flavor gxp_kci_fw_info(struct gxp_kci *gkci,
@@ -338,7 +309,7 @@ enum gcip_fw_flavor gxp_kci_fw_info(struct gxp_kci *gkci,
cmd.dma.size = sizeof(*fw_info);
}
- ret = gcip_kci_send_cmd(gkci->kci, &cmd);
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
if (buf.paddr) {
memcpy(fw_info, buf.vaddr, sizeof(*fw_info));
gxp_mcu_mem_free_data(gkci->mcu, &buf);
@@ -408,6 +379,11 @@ fw_unlock:
return ret;
}
+void gxp_kci_update_usage_async(struct gxp_kci *gkci)
+{
+ gcip_kci_update_usage_async(gkci->mbx->mbx_impl.gcip_kci);
+}
+
int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
{
struct gxp_dev *gxp = gkci->gxp;
@@ -421,7 +397,7 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
struct gxp_mapped_resource buf;
int ret;
- if (!gkci || !gkci->kci)
+ if (!gkci || !gkci->mbx)
return -ENODEV;
ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf,
@@ -435,7 +411,7 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
cmd.dma.address = buf.daddr;
cmd.dma.size = GXP_MCU_USAGE_BUFFER_SIZE;
memset(buf.vaddr, 0, sizeof(struct gxp_usage_header));
- ret = gcip_kci_send_cmd(gkci->kci, &cmd);
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED ||
ret == GCIP_KCI_ERROR_UNAVAILABLE)
@@ -450,36 +426,30 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci)
return ret;
}
-int gxp_kci_map_log_buffer(struct gxp_kci *gkci, dma_addr_t daddr, u32 size)
+int gxp_kci_map_mcu_log_buffer(struct gcip_telemetry_kci_args *args)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_MAP_LOG_BUFFER,
.dma = {
- .address = daddr,
- .size = size,
+ .address = args->addr,
+ .size = args->size,
},
};
- if (!gkci || !gkci->kci)
- return -ENODEV;
-
- return gcip_kci_send_cmd(gkci->kci, &cmd);
+ return gcip_kci_send_cmd(args->kci, &cmd);
}
-int gxp_kci_map_trace_buffer(struct gxp_kci *gkci, dma_addr_t daddr, u32 size)
+int gxp_kci_map_mcu_trace_buffer(struct gcip_telemetry_kci_args *args)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_MAP_TRACE_BUFFER,
.dma = {
- .address = daddr,
- .size = size,
+ .address = args->addr,
+ .size = args->size,
},
};
- if (!gkci || !gkci->kci)
- return -ENODEV;
-
- return gcip_kci_send_cmd(gkci->kci, &cmd);
+ return gcip_kci_send_cmd(args->kci, &cmd);
}
int gxp_kci_shutdown(struct gxp_kci *gkci)
@@ -488,14 +458,14 @@ int gxp_kci_shutdown(struct gxp_kci *gkci)
.code = GCIP_KCI_CODE_SHUTDOWN,
};
- if (!gkci || !gkci->kci)
+ if (!gkci || !gkci->mbx)
return -ENODEV;
- return gcip_kci_send_cmd(gkci->kci, &cmd);
+ return gxp_kci_send_cmd(gkci->mbx, &cmd);
}
-int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 num_cores, u8 client_id,
- u8 slice_index)
+int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
+ u8 slice_index, u8 tpu_client_id, u8 operation)
{
struct gcip_kci_command_element cmd = {
.code = GCIP_KCI_CODE_ALLOCATE_VMBOX,
@@ -504,7 +474,7 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 num_cores, u8 client_id,
struct gxp_mapped_resource buf;
int ret;
- if (!gkci || !gkci->kci)
+ if (!gkci || !gkci->mbx)
return -ENODEV;
ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf, sizeof(*detail));
@@ -512,14 +482,23 @@ int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 num_cores, u8 client_id,
return -ENOMEM;
detail = buf.vaddr;
- detail->num_cores = num_cores;
+ detail->operation = operation;
detail->client_id = client_id;
- detail->slice_index = slice_index;
+
+ if (detail->operation & KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX) {
+ detail->num_cores = num_cores;
+ detail->slice_index = slice_index;
+ }
+
+ if (detail->operation & KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX) {
+ detail->offload_client_id = tpu_client_id;
+ detail->offload_type = KCI_ALLOCATE_VMBOX_OFFLOAD_TYPE_TPU;
+ }
cmd.dma.address = buf.daddr;
cmd.dma.size = sizeof(*detail);
- ret = gcip_kci_send_cmd(gkci->kci, &cmd);
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
gxp_mcu_mem_free_data(gkci->mcu, &buf);
return ret;
@@ -534,7 +513,7 @@ int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id)
struct gxp_mapped_resource buf;
int ret;
- if (!gkci || !gkci->kci)
+ if (!gkci || !gkci->mbx)
return -ENODEV;
ret = gxp_mcu_mem_alloc_data(gkci->mcu, &buf, sizeof(*detail));
@@ -547,7 +526,7 @@ int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id)
cmd.dma.address = buf.daddr;
cmd.dma.size = sizeof(*detail);
- ret = gcip_kci_send_cmd(gkci->kci, &cmd);
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
gxp_mcu_mem_free_data(gkci->mcu, &buf);
return ret;
@@ -561,8 +540,8 @@ int gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
.code = GCIP_KCI_CODE_RKCI_ACK,
};
- if (!gkci || !gkci->kci)
+ if (!gkci || !gkci->mbx)
return -ENODEV;
- return gcip_kci_send_cmd(gkci->kci, &cmd);
+ return gxp_kci_send_cmd(gkci->mbx, &cmd);
}
diff --git a/gxp-kci.h b/gxp-kci.h
index cd91a85..ab8c753 100644
--- a/gxp-kci.h
+++ b/gxp-kci.h
@@ -8,7 +8,10 @@
#ifndef __GXP_KCI_H__
#define __GXP_KCI_H__
+#include <linux/bits.h>
+
#include <gcip/gcip-kci.h>
+#include <gcip/gcip-telemetry.h>
#include "gxp-internal.h"
#include "gxp-mailbox.h"
@@ -19,15 +22,36 @@
* Maximum number of outstanding KCI requests from firmware
* This is used to size a circular buffer, so it must be a power of 2
*/
-#define REVERSE_KCI_BUFFER_SIZE (32)
+#define GXP_REVERSE_KCI_BUFFER_SIZE (32)
+
+/* Timeout for KCI responses from the firmware (milliseconds) */
+#ifndef GXP_KCI_TIMEOUT
+#if IS_ENABLED(CONFIG_GXP_TEST)
+#define GXP_KCI_TIMEOUT (200) /* Fake firmware could respond in a short time. */
+#else
+#define GXP_KCI_TIMEOUT (5000) /* 5 secs. */
+#endif
+#endif /* GXP_KCI_TIMEOUT */
+
+/*
+ * Operations of `allocate_vmbox` KCI command.
+ * The bits of @operation of `struct gxp_kci_allocate_vmbox_detail` will be set with these.
+ */
+#define KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX BIT(0)
+#define KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX BIT(1)
+
+/*
+ * Type of chip to link offload virtual mailbox.
+ * @offload_type of `struct gxp_kci_allocate_vmbox_detail` will be set with these.
+ */
+#define KCI_ALLOCATE_VMBOX_OFFLOAD_TYPE_TPU 0
struct gxp_mcu;
struct gxp_kci {
struct gxp_dev *gxp;
struct gxp_mcu *mcu;
- struct gcip_kci *kci;
- struct gxp_mailbox *mailbox;
+ struct gxp_mailbox *mbx;
struct gxp_mapped_resource cmd_queue_mem;
struct gxp_mapped_resource resp_queue_mem;
@@ -36,6 +60,25 @@ struct gxp_kci {
/* Used when sending the details about allocate_vmbox KCI command. */
struct gxp_kci_allocate_vmbox_detail {
+ /*
+ * Operations of command.
+ * The operations below can be sent in one command, but also separately according to how
+ * the bits of this field are set.
+ *
+ * Bitfields:
+ * [0:0] - Virtual mailbox allocation.
+ * 0 = Ignore.
+ * 1 = Allocate a virtual mailbox.
+ * @client_id, @num_cores and @slice_index are mandatory.
+ * [1:1] - Offload virtual mailbox linkage.
+ * 0 = Ignore.
+ * 1 = Link an offload virtual mailbox.
+ * This operation cannot be called before allocating the virtual mailbox
+ * for both DSP and offload chip.
+ * @client_id, @offload_client_id and @offload_type are mandatory.
+ * [7:2] - RESERVED
+ */
+ u8 operation;
/* Client ID. */
u8 client_id;
/* The number of required cores. */
@@ -45,8 +88,15 @@ struct gxp_kci_allocate_vmbox_detail {
* used for MCU<->core mailbox.
*/
u8 slice_index;
+ /* Client ID of offload chip. */
+ u8 offload_client_id;
+ /*
+ * Type of offload chip.
+ * 0: TPU
+ */
+ u8 offload_type;
/* Reserved */
- u8 reserved[61];
+ u8 reserved[58];
} __packed;
/* Used when sending the details about release_vmbox KCI command. */
@@ -106,6 +156,7 @@ enum gcip_fw_flavor gxp_kci_fw_info(struct gxp_kci *gkci,
* Returns KCI response code on success or < 0 on error (typically -ETIMEDOUT).
*/
int gxp_kci_update_usage(struct gxp_kci *gkci);
+void gxp_kci_update_usage_async(struct gxp_kci *gkci);
/*
* Works the same as gxp_kci_update_usage() except the caller of this
@@ -120,29 +171,42 @@ int gxp_kci_update_usage_locked(struct gxp_kci *gkci);
*
* Returns the code of response, or a negative errno on error.
*/
-int gxp_kci_map_log_buffer(struct gxp_kci *gkci, dma_addr_t daddr, u32 size);
+int gxp_kci_map_mcu_log_buffer(struct gcip_telemetry_kci_args *args);
/*
* Sends the "Map Trace Buffer" command and waits for remote response.
*
* Returns the code of response, or a negative errno on error.
*/
-int gxp_kci_map_trace_buffer(struct gxp_kci *gkci, dma_addr_t daddr, u32 size);
+int gxp_kci_map_mcu_trace_buffer(struct gcip_telemetry_kci_args *args);
/* Send shutdown request to firmware */
int gxp_kci_shutdown(struct gxp_kci *gkci);
/*
- * Allocates a virtual mailbox to communicate with MCU firmware.
+ * Allocates a virtual mailbox to communicate with MCU firmware. According to @operation, it links
+ * the TPU virtual mailbox of @tpu_client_id to the DSP client of @client_id to offload TPU
+ * commands from the firmware side.
*
* A new client wants to run a workload on DSP, it needs to allocate a virtual mailbox. Creating
* mailbox will be initiated from the application by calling GXP_ALLOCATE_VIRTUAL_DEVICE ioctl.
- * Allocated virtual mailbox should be released by calling `gxp_kci_release_vmbox`.
+ * Allocated virtual mailbox should be released by calling `gxp_kci_release_vmbox`. To allocate a
+ * virtual mailbox, @client_id, @num_cores and @slice_index must be passed and @operation must be
+ * masked with `KCI_ALLOCATE_VMBOX_OP_ALLOCATE_VMBOX`.
+ *
+ * To offload TPU commands, the virtual mailbox which is allocated from the TPU side should be
+ * linked to the DSP client. Therefore, by passing @client_id which is a client ID of DSP,
+ * @tpu_client_id which can be fetched from the TPU driver to this function and masking
+ * @operation with `KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX`, the TPU virtual mailbox will be
+ * linked to the DSP client.
+ *
+ * Allocating a virtual mailbox and linking a TPU virtual mailbox can be done with the same
+ * function call, but also can be done separately. It depends on how @operation is set.
*
* Returns the code of response, or a negative errno on error.
*/
-int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 num_cores, u8 client_id,
- u8 slice_index);
+int gxp_kci_allocate_vmbox(struct gxp_kci *gkci, u8 client_id, u8 num_cores,
+ u8 slice_index, u8 tpu_client_id, u8 operation);
/*
* Releases a virtual mailbox which is allocated by `gxp_kci_allocate_vmbox`.
@@ -161,9 +225,4 @@ int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id);
int gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
struct gcip_kci_response_element *rkci_cmd);
-static inline void gxp_kci_update_usage_async(struct gxp_kci *gkci)
-{
- gcip_kci_update_usage_async(gkci->kci);
-}
-
#endif /* __GXP_KCI_H__ */
diff --git a/gxp-lpm.h b/gxp-lpm.h
index bab76fc..8f8f108 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -38,9 +38,6 @@ enum lpm_state {
#define CORE_WAKEUP_DOORBELL(__core__) (0 + (__core__))
-#define AUR_DVFS_DEBUG_REQ (1 << 31)
-#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
-
#define PSM_INIT_DONE_MASK 0x80
#define PSM_CURR_STATE_MASK 0x0F
#define PSM_STATE_VALID_MASK 0x10
diff --git a/gxp-mailbox-driver.c b/gxp-mailbox-driver.c
index 12820ab..23788f7 100644
--- a/gxp-mailbox-driver.c
+++ b/gxp-mailbox-driver.c
@@ -12,6 +12,7 @@
#include <linux/of_irq.h>
#include <linux/spinlock.h>
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
#include "gxp-mailbox-driver.h"
#include "gxp-mailbox-regs.h"
#include "gxp-mailbox.h"
@@ -413,7 +414,7 @@ int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
return gxp_mailbox_inc_resp_queue_head_nolock(mailbox, inc, wrap_bit);
}
-#ifdef GXP_HAS_DCI
+#if !GXP_USE_LEGACY_MAILBOX
u32 gxp_mailbox_gcip_ops_get_cmd_queue_head(struct gcip_mailbox *mailbox)
{
struct gxp_mailbox *gxp_mbx = mailbox->data;
@@ -560,4 +561,4 @@ void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
if (num_resps == size)
gxp_mailbox_generate_device_interrupt(gxp_mbx, BIT(0));
}
-#endif /* GXP_HAS_DCI */
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox-driver.h b/gxp-mailbox-driver.h
index a79e32f..30292d2 100644
--- a/gxp-mailbox-driver.h
+++ b/gxp-mailbox-driver.h
@@ -10,7 +10,7 @@
#include "gxp-config.h"
#include "gxp-mailbox.h"
-#ifdef GXP_HAS_DCI
+#if !GXP_USE_LEGACY_MAILBOX
#include <gcip/gcip-mailbox.h>
#endif
@@ -133,7 +133,7 @@ int gxp_mailbox_inc_resp_queue_head_nolock(struct gxp_mailbox *mailbox, u32 inc,
int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
u32 wrap_bit);
-#ifdef GXP_HAS_DCI
+#if !GXP_USE_LEGACY_MAILBOX
/*
* Following functions are used when setting the operators of `struct gcip_mailbox_ops`.
* To use these functions, @mailbox->data should be set as an instance of `struct gxp_mailbox`.
@@ -168,6 +168,6 @@ int gxp_mailbox_gcip_ops_after_enqueue_cmd(struct gcip_mailbox *mailbox,
void *cmd);
void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
u32 num_resps);
-#endif /* GXP_HAS_DCI */
+#endif /* !GXP_USE_LEGACY_MAILBOX */
#endif /* __GXP_MAILBOX_DRIVER_H__ */
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 375876f..b0661ac 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
@@ -21,226 +22,20 @@
#include "gxp-pm.h"
#include "gxp.h"
-/*
- * TODO(b/237908672): gxp-mailbox must be able to be compiled without gcip-mailbox on the old
- * chip.
- */
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include <gcip/gcip-mailbox.h>
+#include <gcip/gcip-kci.h>
+
+#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
+#endif
/* Timeout of 8s by default to account for slower emulation platforms */
int gxp_mbx_timeout = 8000;
module_param_named(mbx_timeout, gxp_mbx_timeout, int, 0660);
-#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
-
-#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
-#define MBOX_CMD_QUEUE_SIZE \
- (sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES)
-
-#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
-#define MBOX_RESP_QUEUE_SIZE \
- (sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES)
-
-/*
- * Following codes are the implementation of the mailbox manager for the legacy mailbox. They will
- * be compiled only when the `GXP_HAS_DCI` is not defined.
- */
-
-#ifndef GXP_HAS_DCI
-
-static struct gxp_mailbox *
-gxp_mailbox_manager_allocate_mailbox(struct gxp_mailbox_manager *mgr,
- struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
-{
- struct gxp_mailbox *mailbox = gxp_mailbox_alloc(
- mgr, vd, virt_core, core_id, &gxp_mailbox_default_args);
-
- if (!IS_ERR(mailbox))
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- return mailbox;
-}
-
-static int gxp_mailbox_manager_execute_cmd(struct gxp_mailbox *mailbox,
- u16 cmd_code, u8 cmd_priority,
- u64 cmd_daddr, u32 cmd_size,
- u32 cmd_flags, u64 *resp_seq,
- u16 *resp_status)
-{
- struct gxp_command cmd;
- struct gxp_response resp;
- struct buffer_descriptor buffer;
- int ret;
-
- /* Pack the command structure */
- buffer.address = cmd_daddr;
- buffer.size = cmd_size;
- buffer.flags = cmd_flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = cmd_code; /* All IOCTL commands are dispatch */
- cmd.priority = cmd_priority; /* currently unused */
- cmd.buffer_descriptor = buffer;
-
- ret = gxp_mailbox_execute_cmd(mailbox, &cmd, &resp);
-
- /* resp.seq and resp.status can be updated even though it failed to process the command */
- if (resp_seq)
- *resp_seq = resp.seq;
- if (resp_status)
- *resp_status = resp.status;
-
- return ret;
-}
-
-static int gxp_mailbox_manager_execute_cmd_async(
- struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
- u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
- u32 cmd_flags, struct gxp_power_states power_states, u64 *cmd_seq)
-{
- struct gxp_command cmd;
- struct buffer_descriptor buffer;
- struct mailbox_resp_queue *resp_queue =
- &client->vd->mailbox_resp_queues[virt_core];
- struct gxp_eventfd *eventfd = client->mb_eventfds[virt_core];
- int ret;
-
- /* Pack the command structure */
- buffer.address = cmd_daddr;
- buffer.size = cmd_size;
- buffer.flags = cmd_flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = cmd_code; /* All IOCTL commands are dispatch */
- cmd.priority = cmd_priority; /* currently unused */
- cmd.buffer_descriptor = buffer;
-
- ret = gxp_mailbox_execute_cmd_async(mailbox, &cmd, &resp_queue->queue,
- &resp_queue->lock,
- &resp_queue->waitq, power_states,
- eventfd);
-
- if (cmd_seq)
- *cmd_seq = cmd.seq;
-
- return ret;
-}
-
-static int gxp_mailbox_manager_wait_async_resp(struct gxp_client *client,
- int virt_core, u64 *resp_seq,
- u16 *resp_status,
- u32 *resp_retval,
- u16 *error_code)
-{
- struct gxp_async_response *resp_ptr;
- struct mailbox_resp_queue *resp_queue =
- &client->vd->mailbox_resp_queues[virt_core];
- long timeout;
-
- spin_lock_irq(&resp_queue->lock);
-
- /*
- * The "exclusive" version of wait_event is used since each wake
- * corresponds to the addition of exactly one new response to be
- * consumed. Therefore, only one waiting response ioctl can ever
- * proceed per wake event.
- */
- timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
- resp_queue->waitq, !list_empty(&resp_queue->queue),
- resp_queue->lock, msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (timeout <= 0) {
- spin_unlock_irq(&resp_queue->lock);
- /* unusual case - this only happens when there is no command pushed */
- return timeout ? -ETIMEDOUT : timeout;
- }
- resp_ptr = list_first_entry(&resp_queue->queue,
- struct gxp_async_response, list_entry);
-
- /* Pop the front of the response list */
- list_del(&(resp_ptr->list_entry));
-
- spin_unlock_irq(&resp_queue->lock);
-
- if (resp_seq)
- *resp_seq = resp_ptr->resp.seq;
- if (resp_status)
- *resp_status = resp_ptr->resp.status;
-
- switch (resp_ptr->resp.status) {
- case GXP_RESP_OK:
- if (error_code)
- *error_code = GXP_RESPONSE_ERROR_NONE;
- /* retval is only valid if status == GXP_RESP_OK */
- if (resp_retval)
- *resp_retval = resp_ptr->resp.retval;
- break;
- case GXP_RESP_CANCELLED:
- if (error_code)
- *error_code = GXP_RESPONSE_ERROR_TIMEOUT;
- break;
- default:
- /* No other status values are valid at this point */
- WARN(true, "Completed response had invalid status %hu",
- resp_ptr->resp.status);
- if (error_code)
- *error_code = GXP_RESPONSE_ERROR_INTERNAL;
- break;
- }
-
- /*
- * We must be absolutely sure the timeout work has been cancelled
- * and/or completed before freeing the `gxp_async_response`.
- * There are 3 possible cases when we arrive at this point:
- * 1) The response arrived normally and the timeout was cancelled
- * 2) The response timedout and its timeout handler finished
- * 3) The response handler and timeout handler raced, and the response
- * handler "cancelled" the timeout handler while it was already in
- * progress.
- *
- * This call handles case #3, and ensures any in-process timeout
- * handler (which may reference the `gxp_async_response`) has
- * been able to exit cleanly.
- */
- cancel_delayed_work_sync(&resp_ptr->timeout_work);
- kfree(resp_ptr);
-
- return 0;
-}
-
-static void gxp_mailbox_manager_release_unconsumed_async_resps(
- struct gxp_virtual_device *vd)
-{
- struct gxp_async_response *cur, *nxt;
- int i;
- unsigned long flags;
-
- /* Cleanup any unconsumed responses */
- for (i = 0; i < vd->num_cores; i++) {
- /*
- * Since VD is releasing, it is not necessary to lock here.
- * Do it anyway for consistency.
- */
- spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
- list_for_each_entry_safe (cur, nxt,
- &vd->mailbox_resp_queues[i].queue,
- list_entry) {
- list_del(&cur->list_entry);
- kfree(cur);
- }
- spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
- }
-}
-
-static void gxp_mailbox_manager_set_ops(struct gxp_mailbox_manager *mgr)
-{
- mgr->allocate_mailbox = gxp_mailbox_manager_allocate_mailbox;
- mgr->release_mailbox = gxp_mailbox_release;
- mgr->reset_mailbox = gxp_mailbox_reset;
- mgr->execute_cmd = gxp_mailbox_manager_execute_cmd;
- mgr->execute_cmd_async = gxp_mailbox_manager_execute_cmd_async;
- mgr->wait_async_resp = gxp_mailbox_manager_wait_async_resp;
- mgr->release_unconsumed_async_resps =
- gxp_mailbox_manager_release_unconsumed_async_resps;
-}
-#endif /* !GXP_HAS_DCI */
-
/*
* Fetches and handles responses, then wakes up threads that are waiting for a
* response.
@@ -253,7 +48,21 @@ static void gxp_mailbox_consume_responses_work(struct kthread_work *work)
{
struct gxp_mailbox *mailbox =
container_of(work, struct gxp_mailbox, response_work);
- mailbox->ops->consume_responses_work(mailbox);
+
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ gcip_kci_handle_irq(mailbox->mbx_impl.gcip_kci);
+ gxp_mcu_telemetry_irq_handler(
+ ((struct gxp_kci *)mailbox->data)->mcu);
+ break;
+ }
+#endif
}
/*
@@ -298,14 +107,6 @@ static int gxp_mailbox_set_ops(struct gxp_mailbox *mailbox,
return -EINVAL;
}
- if (!ops->init_consume_responses_work ||
- !ops->release_consume_responses_work ||
- !ops->consume_responses_work) {
- dev_err(mailbox->gxp->dev,
- "Incomplete gxp_mailbox consume_responses_work ops.\n");
- return -EINVAL;
- }
-
mailbox->ops = ops;
return 0;
@@ -340,6 +141,10 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
mailbox->gxp = mgr->gxp;
mailbox->csr_reg_base = mgr->get_mailbox_csr_base(mgr->gxp, core_id);
mailbox->data_reg_base = mgr->get_mailbox_data_base(mgr->gxp, core_id);
+ mailbox->type = args->type;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_elem_size = args->resp_elem_size;
gxp_mailbox_set_data(mailbox, args->data);
ret = gxp_mailbox_set_ops(mailbox, args->ops);
@@ -378,6 +183,8 @@ err_args:
static void release_mailbox(struct gxp_mailbox *mailbox,
struct gxp_virtual_device *vd, uint virt_core)
{
+ if (IS_GXP_TEST && !mailbox)
+ return;
mailbox->ops->release_resources(mailbox, vd, virt_core);
kthread_flush_worker(&mailbox->response_worker);
if (mailbox->response_thread)
@@ -385,17 +192,146 @@ static void release_mailbox(struct gxp_mailbox *mailbox,
kfree(mailbox);
}
+#if !GXP_USE_LEGACY_MAILBOX
+static int init_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_mailbox_args args = {
+ .dev = mailbox->gxp->dev,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .cmd_elem_size = mailbox->cmd_elem_size,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .resp_elem_size = mailbox->resp_elem_size,
+ .timeout = MAILBOX_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.mbx,
+ .data = mailbox,
+ };
+ struct gcip_mailbox *gcip_mbx;
+ int ret;
+
+ gcip_mbx = kzalloc(sizeof(*gcip_mbx), GFP_KERNEL);
+ if (!gcip_mbx)
+ return -ENOMEM;
+
+ /* Initialize gcip_mailbox */
+ ret = gcip_mailbox_init(gcip_mbx, &args);
+ if (ret) {
+ kfree(gcip_mbx);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_mbx = gcip_mbx;
+
+ return 0;
+}
+
+static void release_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ struct gcip_mailbox *gcip_mbx = mailbox->mbx_impl.gcip_mbx;
+
+ if (gcip_mbx == NULL)
+ return;
+
+ gcip_mailbox_release(gcip_mbx);
+ kfree(gcip_mbx);
+ mailbox->mbx_impl.gcip_mbx = NULL;
+}
+
+static int init_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_kci_args args = {
+ .dev = mailbox->gxp->dev,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .rkci_buffer_size = GXP_REVERSE_KCI_BUFFER_SIZE,
+ .timeout = GXP_KCI_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.kci,
+ .data = mailbox,
+ };
+ struct gcip_kci *gcip_kci;
+ int ret;
+
+ gcip_kci = kzalloc(sizeof(*gcip_kci), GFP_KERNEL);
+ if (!gcip_kci)
+ return -ENOMEM;
+
+ ret = gcip_kci_init(gcip_kci, &args);
+ if (ret) {
+ kfree(gcip_kci);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_kci = gcip_kci;
+
+ return 0;
+}
+
+static void release_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ struct gcip_kci *gcip_kci = mailbox->mbx_impl.gcip_kci;
+
+ if (gcip_kci == NULL)
+ return;
+
+ gcip_kci_cancel_work_queues(gcip_kci);
+ gcip_kci_release(gcip_kci);
+ kfree(gcip_kci);
+ mailbox->mbx_impl.gcip_kci = NULL;
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
+/*
+ * Initializes @mailbox->mbx_impl to start waiting and consuming responses.
+ * This will initializes GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will initialize @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will initialize @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will initialize @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static int init_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+ int ret;
+
+#if GXP_USE_LEGACY_MAILBOX
+ if (mailbox->type != GXP_MBOX_TYPE_GENERAL)
+ return -EOPNOTSUPP;
+
+ ret = gxp_mailbox_init_consume_responses(mailbox);
+ if (ret)
+ return ret;
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ ret = init_gcip_mailbox(mailbox);
+ if (ret)
+ return ret;
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ ret = init_gcip_kci(mailbox);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+#endif /* GXP_USE_LEGACY_MAILBOX */
+
+ return 0;
+}
+
static int enable_mailbox(struct gxp_mailbox *mailbox)
{
int ret;
- gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_device_addr);
+ gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_buf.dsp_addr);
gxp_mailbox_write_cmd_queue_head(mailbox, 0);
gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
gxp_mailbox_write_resp_queue_head(mailbox, 0);
gxp_mailbox_write_resp_queue_tail(mailbox, 0);
- ret = mailbox->ops->init_consume_responses_work(mailbox);
+ ret = init_mailbox_impl(mailbox);
if (ret)
return ret;
@@ -433,6 +369,32 @@ struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
return mailbox;
}
+/*
+ * Releases the @mailbox->mbx_impl to flush all pending responses in the wait
+ * list.
+ * This releases GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will release @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will release @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will release @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static void release_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_release_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ release_gcip_mailbox(mailbox);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ release_gcip_kci(mailbox);
+ break;
+ }
+#endif
+}
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox)
@@ -460,7 +422,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
cancel_work_sync(mailbox->interrupt_handlers[i]);
}
- mailbox->ops->release_consume_responses_work(mailbox);
+ release_mailbox_impl(mailbox);
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
@@ -510,604 +472,30 @@ int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
return 0;
}
-/*
- * Following codes are the implementation of legacy mailbox. They will be compiled only when the
- * `GXP_HAS_DCI` is not defined. The logic is identical with before refactoring but the only one
- * difference is that the interface of `gxp_mailbox_ops` and `gxp_mailbox_args` are applied.
- */
-
-#ifndef GXP_HAS_DCI
-static int gxp_mailbox_ops_allocate_resources(struct gxp_mailbox *mailbox,
- struct gxp_virtual_device *vd,
- uint virt_core)
-{
- /* Allocate and initialize the command queue */
- mailbox->cmd_queue = (struct gxp_command *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
- &(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->cmd_queue)
- goto err_cmd_queue;
-
- mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
- mailbox->cmd_queue_tail = 0;
-
- /* Allocate and initialize the response queue */
- mailbox->resp_queue = (struct gxp_response *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
- &(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->resp_queue)
- goto err_resp_queue;
-
- mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
- mailbox->resp_queue_head = 0;
-
- /* Allocate and initialize the mailbox descriptor */
- mailbox->descriptor =
- (struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_mailbox_descriptor),
- &(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
- if (!mailbox->descriptor)
- goto err_descriptor;
-
- mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
- mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
- mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
- mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
-
- return 0;
-
-err_descriptor:
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
-err_resp_queue:
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
-err_cmd_queue:
- return -ENOMEM;
-}
-
-static void gxp_mailbox_ops_release_resources(struct gxp_mailbox *mailbox,
- struct gxp_virtual_device *vd,
- uint virt_core)
-{
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, vd->domain,
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, vd->domain,
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
-}
-
-static int
-gxp_mailbox_ops_init_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- mailbox->cur_seq = 0;
- init_waitqueue_head(&mailbox->wait_list_waitq);
- INIT_LIST_HEAD(&mailbox->wait_list);
-
- return 0;
-}
-
-static void
-gxp_mailbox_ops_release_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- struct list_head resps_to_flush;
- unsigned long flags;
-
- /*
- * At this point only async responses should be pending. Flush them all
- * from the `wait_list` at once so any remaining timeout workers
- * waiting on `wait_list_lock` will know their responses have been
- * handled already.
- */
- INIT_LIST_HEAD(&resps_to_flush);
- mutex_lock(&mailbox->wait_list_lock);
- list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
- list_del(&cur->list);
- if (cur->is_async) {
- list_add_tail(&cur->list, &resps_to_flush);
- /*
- * Clear the response's destination queue so that if the
- * timeout worker is running, it won't try to process
- * this response after `wait_list_lock` is released.
- */
- async_resp = container_of(
- cur->resp, struct gxp_async_response, resp);
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(async_resp->dest_queue_lock,
- flags);
-
- } else {
- dev_warn(
- mailbox->gxp->dev,
- "Unexpected synchronous command pending on mailbox release\n");
- kfree(cur);
- }
- }
- mutex_unlock(&mailbox->wait_list_lock);
-
- /*
- * Cancel the timeout timer of and free any responses that were still in
- * the `wait_list` above.
- */
- list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
- list_del(&cur->list);
- async_resp = container_of(cur->resp, struct gxp_async_response,
- resp);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- kfree(cur);
- }
-}
-
-/*
- * Pops the wait_list until the sequence number of @resp is found, and copies
- * @resp to the found entry.
- *
- * Entries in wait_list should have sequence number in increasing order, but
- * the responses arriving and being handled may be out-of-order.
- *
- * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
- * 1. #cur->resp->seq > @resp->seq:
- * - Nothing to do, either @resp is invalid or its command timed out.
- * - We're done.
- * 2. #cur->resp->seq == @resp->seq:
- * - Copy @resp, pop the head.
- * - If #cur->resp has a destination queue, push it to that queue
- * - We're done.
- * 3. #cur->resp->seq < @resp->seq:
- * - @resp has arrived out of sequence order.
- * - Leave #cur->resp in the wait_list.
- * - Keep iterating unless the list is exhausted.
- */
-static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
- const struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- unsigned long flags;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * This response has already timed out and been removed
- * from the wait list (or this is an invalid response).
- * Drop it.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- memcpy(cur->resp, resp, sizeof(*resp));
- list_del(&cur->list);
- if (cur->is_async) {
- async_resp =
- container_of(cur->resp,
- struct gxp_async_response,
- resp);
-
- cancel_delayed_work(&async_resp->timeout_work);
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp,
- async_resp->requested_states,
- off_states);
-
- spin_lock_irqsave(async_resp->dest_queue_lock,
- flags);
-
- list_add_tail(&async_resp->list_entry,
- async_resp->dest_queue);
- /*
- * Marking the dest_queue as NULL indicates the
- * response was handled in case its timeout
- * handler fired between acquiring the
- * wait_list_lock and cancelling the timeout.
- */
- async_resp->dest_queue = NULL;
-
- /*
- * Don't release the dest_queue_lock until both
- * any eventfd has been signaled and any waiting
- * thread has been woken. Otherwise one thread
- * might consume and free the response before
- * this function is done with it.
- */
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
-
- spin_unlock_irqrestore(
- async_resp->dest_queue_lock, flags);
- }
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-/*
- * Fetches elements in the response queue.
- *
- * Returns the pointer of fetched response elements.
- * @total_ptr will be the number of elements fetched.
- *
- * Returns -ENOMEM if failed on memory allocation.
- * Returns NULL if the response queue is empty.
- */
-static struct gxp_response *
-gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
+#if !GXP_USE_LEGACY_MAILBOX
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp)
{
- u32 head;
- u32 tail;
- u32 count;
- u32 i;
- u32 j;
- u32 total = 0;
- const u32 size = mailbox->resp_queue_size;
- const struct gxp_response *queue = mailbox->resp_queue;
- struct gxp_response *ret = NULL;
- struct gxp_response *prev_ptr = NULL;
-
- mutex_lock(&mailbox->resp_queue_lock);
-
- head = mailbox->resp_queue_head;
- /* loop until our head equals to CSR tail */
- while (1) {
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- count = gxp_circ_queue_cnt(head, tail, size,
- CIRCULAR_QUEUE_WRAP_BIT);
- if (count == 0)
- break;
-
- prev_ptr = ret;
- ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
- GFP_KERNEL);
- /*
- * Out-of-memory, we can return the previously fetched responses
- * if any, or ENOMEM otherwise.
- */
- if (!ret) {
- if (!prev_ptr)
- ret = ERR_PTR(-ENOMEM);
- else
- ret = prev_ptr;
- break;
- }
- /* copy responses */
- j = CIRCULAR_QUEUE_REAL_INDEX(head, CIRCULAR_QUEUE_WRAP_BIT);
- for (i = 0; i < count; i++) {
- memcpy(&ret[total], &queue[j], sizeof(*queue));
- ret[total].status = GXP_RESP_OK;
- j = (j + 1) % size;
- total++;
- }
- head = gxp_circ_queue_inc(head, count, size,
- CIRCULAR_QUEUE_WRAP_BIT);
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_send_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp);
+ case GXP_MBOX_TYPE_KCI:
+ return gcip_kci_send_cmd(mailbox->mbx_impl.gcip_kci, cmd);
}
- gxp_mailbox_inc_resp_queue_head_locked(mailbox, total,
- CIRCULAR_QUEUE_WRAP_BIT);
-
- mutex_unlock(&mailbox->resp_queue_lock);
- /*
- * Now that the response queue has been drained, send an interrupt
- * to the device in case firmware was waiting for us to consume
- * responses.
- */
- if (total == size) {
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- }
-
- *total_ptr = total;
- return ret;
+ return -EOPNOTSUPP;
}
-/*
- * Fetches and handles responses, then wakes up threads that are waiting for a
- * response.
- *
- * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free
- * or race-condition bugs, gxp_mailbox_release() must be called before free the
- * mailbox.
- */
-static void gxp_mailbox_ops_consume_responses_work(struct gxp_mailbox *mailbox)
+struct gcip_mailbox_async_response *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data)
{
- struct gxp_response *responses;
- u32 i;
- u32 count = 0;
-
- /* fetch responses and bump RESP_QUEUE_HEAD */
- responses = gxp_mailbox_fetch_responses(mailbox, &count);
- if (IS_ERR(responses)) {
- dev_err(mailbox->gxp->dev,
- "GXP Mailbox failed on fetching responses: %ld",
- PTR_ERR(responses));
- return;
- }
-
- for (i = 0; i < count; i++)
- gxp_mailbox_handle_response(mailbox, &responses[i]);
- /*
- * Responses handled, wake up threads that are waiting for a response.
- */
- wake_up(&mailbox->wait_list_waitq);
- kfree(responses);
-}
-
-/* Default operators for the DSP mailbox */
-struct gxp_mailbox_ops gxp_mailbox_default_ops = {
- .allocate_resources = gxp_mailbox_ops_allocate_resources,
- .release_resources = gxp_mailbox_ops_release_resources,
- .init_consume_responses_work =
- gxp_mailbox_ops_init_consume_responses_work,
- .release_consume_responses_work =
- gxp_mailbox_ops_release_consume_responses_work,
- .consume_responses_work = gxp_mailbox_ops_consume_responses_work,
-};
-
-/* Default arguments for the DSP mailbox */
-const struct gxp_mailbox_args gxp_mailbox_default_args = {
- .ops = &gxp_mailbox_default_ops,
- .data = NULL,
-};
-
-/*
- * Adds @resp to @mailbox->wait_list.
- *
- * wait_list is a FIFO queue, with sequence number in increasing order.
- *
- * Returns 0 on success, or -ENOMEM if failed on allocation.
- */
-static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp, bool is_async)
-{
- struct gxp_mailbox_wait_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry)
- return -ENOMEM;
- entry->resp = resp;
- entry->is_async = is_async;
- mutex_lock(&mailbox->wait_list_lock);
- list_add_tail(&entry->list, &mailbox->wait_list);
- mutex_unlock(&mailbox->wait_list_lock);
-
- return 0;
-}
-
-/*
- * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
- *
- * This is used when the kernel gives up waiting for the response.
- */
-static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry (cur, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * Sequence numbers in wait_list are in increasing
- * order. This case implies no entry in the list
- * matches @resp's sequence number.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- list_del(&cur->list);
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct gxp_response *resp,
- bool resp_is_async)
-{
- int ret;
- u32 tail;
- struct gxp_command *cmd_queue = mailbox->cmd_queue;
-
- mutex_lock(&mailbox->cmd_queue_lock);
-
- cmd->seq = mailbox->cur_seq;
- /*
- * The lock ensures mailbox->cmd_queue_tail cannot be changed by
- * other processes (this method should be the only one to modify the
- * value of tail), therefore we can remember its value here and use it
- * in various places below.
- */
- tail = mailbox->cmd_queue_tail;
-
- /*
- * If the cmd queue is full, it's up to the caller to retry.
- */
- if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
- (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
- ret = -EAGAIN;
- goto out;
- }
-
- if (resp) {
- /*
- * Add @resp to the wait_list only if the cmd can be pushed
- * successfully.
- */
- resp->seq = cmd->seq;
- resp->status = GXP_RESP_WAITING;
- ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
- if (ret)
- goto out;
- }
- /* size of cmd_queue is a multiple of sizeof(*cmd) */
- memcpy(cmd_queue +
- CIRCULAR_QUEUE_REAL_INDEX(tail, CIRCULAR_QUEUE_WRAP_BIT),
- cmd, sizeof(*cmd));
- gxp_mailbox_inc_cmd_queue_tail_locked(mailbox, 1,
- CIRCULAR_QUEUE_WRAP_BIT);
- /* triggers doorbell */
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- /* bumps sequence number after the command is sent */
- mailbox->cur_seq++;
- ret = 0;
-out:
- mutex_unlock(&mailbox->cmd_queue_lock);
- if (ret)
- dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
-
- return ret;
-}
-
-void gxp_mailbox_init(struct gxp_mailbox_manager *mgr)
-{
- gxp_mailbox_manager_set_ops(mgr);
-}
-
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp)
-{
- int ret;
-
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
- /* resp_is_async = */ false);
- if (ret)
- return ret;
- ret = wait_event_timeout(mailbox->wait_list_waitq,
- resp->status != GXP_RESP_WAITING,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (!ret) {
- dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
- __func__);
- gxp_mailbox_del_wait_resp(mailbox, resp);
- return -ETIMEDOUT;
- }
- if (resp->status != GXP_RESP_OK) {
- dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
- resp->status);
- return -ENOMSG;
- }
-
- return resp->retval;
-}
-
-static void async_cmd_timeout_work(struct work_struct *work)
-{
- struct gxp_async_response *async_resp = container_of(
- work, struct gxp_async_response, timeout_work.work);
- unsigned long flags;
-
- /*
- * This function will acquire the mailbox wait_list_lock. This means if
- * response processing is in progress, it will complete before this
- * response can be removed from the wait list.
- *
- * Once this function has the wait_list_lock, no future response
- * processing will begin until this response has been removed.
- */
- gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
-
- /*
- * Check if this response still has a valid destination queue, in case
- * an in-progress call to `gxp_mailbox_handle_response()` completed
- * the response while `gxp_mailbox_del_wait_resp()` was waiting for
- * the wait_list_lock.
- */
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- if (async_resp->dest_queue) {
- async_resp->resp.status = GXP_RESP_CANCELLED;
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
-
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp, async_resp->requested_states,
- off_states);
-
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
- } else {
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_put_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp, data);
+ default:
+ break;
}
+ return ERR_PTR(-EOPNOTSUPP);
}
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- struct gxp_power_states power_states,
- struct gxp_eventfd *eventfd)
-{
- struct gxp_async_response *async_resp;
- int ret;
-
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
- return -ENOMEM;
-
- async_resp->mailbox = mailbox;
- async_resp->dest_queue = resp_queue;
- async_resp->dest_queue_lock = queue_lock;
- async_resp->dest_queue_waitq = queue_waitq;
- async_resp->requested_states = power_states;
- if (eventfd && gxp_eventfd_get(eventfd))
- async_resp->eventfd = eventfd;
- else
- async_resp->eventfd = NULL;
-
- INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
-
- gxp_pm_update_requested_power_states(mailbox->gxp, off_states,
- power_states);
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
- /* resp_is_async = */ true);
- if (ret)
- goto err_free_resp;
-
- return 0;
-
-err_free_resp:
- gxp_pm_update_requested_power_states(mailbox->gxp, power_states,
- off_states);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- return ret;
-}
-#endif /* !GXP_HAS_DCI */
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 8760ccf..2576c55 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -10,9 +10,16 @@
#include <linux/kthread.h>
#include "gxp-client.h"
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
+#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox-manager.h"
+#if !GXP_USE_LEGACY_MAILBOX
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+#endif
+
/*
* Offset from the host mailbox interface to the device interface that needs to
* be mapped.
@@ -52,90 +59,19 @@ enum gxp_mailbox_command_code {
GXP_MBOX_CODE_SUSPEND_REQUEST = 1,
};
-/*
- * Basic Buffer descriptor struct for message payloads.
- * TODO(b/237908534): this will be used only in the old gxp.
- */
-struct buffer_descriptor {
- /* Address in the device's virtual address space. */
- u64 address;
- /* Size in bytes. */
- u32 size;
- /* Flags can be used to indicate message type, etc. */
- u32 flags;
-};
-
-/*
- * Format used for mailbox command queues.
- * TODO(b/237908534): this will be used only in the old gxp.
- */
-struct gxp_command {
- /* Sequence number. Should match the corresponding response. */
- u64 seq;
+enum gxp_mailbox_type {
/*
- * Identifies the type of command.
- * Should be a value from `gxp_mailbox_command_code`
+ * Mailbox will utilize `gcip-mailbox.h` internally.
+ * (Note: On `GXP_USE_LEGACY_MAILBOX`, it utilizes `gxp-mailbox-impl.h`
+ * instead.)
+ * Mostly will be used for handling user commands.
*/
- u16 code;
+ GXP_MBOX_TYPE_GENERAL = 0,
/*
- * Priority level from 0 to 99, with 0 being the highest. Pending
- * commands with higher priorities will be executed before lower
- * priority ones.
+ * Mailbox will utilize `gcip-kci.h` internally.
+ * Will be used for handling kernel commands.
*/
- u8 priority;
- /*
- * Insert spaces to make padding explicit. This does not affect
- * alignment.
- */
- u8 reserved[5];
- /* Struct describing the buffer containing the message payload */
- struct buffer_descriptor buffer_descriptor;
-};
-
-/*
- * Format used for mailbox response queues from kernel.
- * TODO(b/237908534): this will be used only in the old gxp.
- */
-struct gxp_response {
- /* Sequence number. Should match the corresponding command. */
- u64 seq;
- /* The status code. Either SUCCESS or an error. */
- u16 status;
- /* Padding. */
- u16 reserved;
- /* Return value, dependent on the command this responds to. */
- u32 retval;
-};
-
-/*
- * Wrapper struct for responses consumed by a thread other than the one which
- * sent the command.
- * TODO(b/237908534): this will be used only in the old gxp.
- */
-struct gxp_async_response {
- struct list_head list_entry;
- struct gxp_response resp;
- /* TODO(b/237908534): this will be used only in the old gxp. */
- struct delayed_work timeout_work;
- /*
- * If this response times out, this pointer to the owning mailbox is
- * needed to delete this response from the list of pending responses.
- */
- struct gxp_mailbox *mailbox;
- /* Queue to add the response to once it is complete or timed out */
- struct list_head *dest_queue;
- /*
- * The lock that protects queue pointed to by `dest_queue`.
- * The mailbox code also uses this lock to protect changes to the
- * `dest_queue` pointer itself when processing this response.
- */
- spinlock_t *dest_queue_lock;
- /* Queue of clients to notify when this response is processed */
- wait_queue_head_t *dest_queue_waitq;
- /* Specified power states vote during the command execution */
- struct gxp_power_states requested_states;
- /* gxp_eventfd to signal when the response completes. May be NULL */
- struct gxp_eventfd *eventfd;
+ GXP_MBOX_TYPE_KCI = 1,
};
enum gxp_response_status {
@@ -144,13 +80,6 @@ enum gxp_response_status {
GXP_RESP_CANCELLED = 2,
};
-/* TODO(b/237908534): this will be used only in the old gxp. */
-struct gxp_mailbox_wait_list {
- struct list_head list;
- struct gxp_response *resp;
- bool is_async;
-};
-
/* Mailbox Structures */
struct gxp_mailbox_descriptor {
u64 cmd_queue_device_addr;
@@ -163,15 +92,11 @@ struct gxp_mailbox;
/*
* Defines the callback functions which are used by the mailbox.
- *
- * These callbacks will be called in this order:
- * `gxp_mailbox_alloc` -> `allocate_resources` -> `init_consume_responses_work`
- * -> ... -> `consume_responses_work` -> ...
- * `gxp_mailbox_release` -> `release_consume_responses_work` -> `release_resources`
*/
struct gxp_mailbox_ops {
/*
* Allocates resources such as cmd_queue and resp_queue which are used by the mailbox.
+ * This callback will be called by the `gxp_mailbox_alloc` internally.
* Following variables should be set in this callback.
* - @mailbox->cmd_queue : the pointer of the command queue.
* - @mailbox->cmd_queue_size : the size of @mailbox->cmd_queue. (the maximum number of
@@ -200,47 +125,34 @@ struct gxp_mailbox_ops {
uint virt_core);
/*
* Releases resources which are allocated by `allocate_resources`.
+ * This callback will be called by the `gxp_mailbox_release` internally.
* Context: normal.
*/
void (*release_resources)(struct gxp_mailbox *mailbox,
struct gxp_virtual_device *vd,
uint virt_core);
+#if !GXP_USE_LEGACY_MAILBOX
/*
- * Initializes consuming the resp_queue of the mailbox. This prepares the data which is
- * needed in the `consume_responses_work` callback before starting consuming. That data
- * should be released in the `release_consume_responses_work` callback. This function will
- * be called when the `gxp_mailbox_alloc` is called. (After the information of cmd_queue,
- * resp_queue and descriptor are written to the CSRs and before the mailbox registers to
- * the IRQ.)
- * Returns 0 if succeed.
- * Context: normal.
- */
- int (*init_consume_responses_work)(struct gxp_mailbox *mailbox);
- /*
- * Cleans up consuming the resp_queue of the mailbox. This releases the data which is
- * allocated by the `init_consume_responses_work` callback. This function will be called
- * when the `gxp_mailbox_release` is called. (After the mailbox unregisters from the IRQ
- * and before reset the mailbox hw.)
- * Context: normal.
- */
- void (*release_consume_responses_work)(struct gxp_mailbox *mailbox);
- /*
- * Consumes the resp_queue of the mailbox. This function will be called when the mailbox
- * IRQ is fired.
- * Context: in_interrupt().
+ * Operators which has dependency on the GCIP according to the type of mailbox.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_ops.mbx must be defined.
+ * - GXP_MBOX_TYPE_KCI: @gcip_ops.kci must be defined.
*/
- void (*consume_responses_work)(struct gxp_mailbox *mailbox);
+ union {
+ const struct gcip_mailbox_ops *mbx;
+ const struct gcip_kci_ops *kci;
+ } gcip_ops;
+#endif
};
struct gxp_mailbox_args {
+ enum gxp_mailbox_type type;
struct gxp_mailbox_ops *ops;
+ u64 queue_wrap_bit;
+ u32 cmd_elem_size;
+ u32 resp_elem_size;
void *data;
};
-#ifndef GXP_HAS_DCI
-extern const struct gxp_mailbox_args gxp_mailbox_default_args;
-#endif
-
#define GXP_MAILBOX_INT_BIT_COUNT 16
struct gxp_mailbox {
@@ -258,43 +170,52 @@ struct gxp_mailbox {
/* Protects to_host_poll_task while it holds a sync barrier */
struct mutex polling_lock;
- /* TODO(b/237908534): this will be used only in the old gxp. */
- u64 cur_seq;
+ u64 queue_wrap_bit; /* warp bit for both cmd and resp queue */
+
+ u32 cmd_elem_size; /* size of element of cmd queue */
+ struct gxp_coherent_buf descriptor_buf;
struct gxp_mailbox_descriptor *descriptor;
- dma_addr_t descriptor_device_addr;
- void *cmd_queue;
+ struct gxp_coherent_buf cmd_queue_buf;
u32 cmd_queue_size; /* size of cmd queue */
u32 cmd_queue_tail; /* offset within the cmd queue */
- dma_addr_t cmd_queue_device_addr; /* device address for cmd queue */
struct mutex cmd_queue_lock; /* protects cmd_queue */
- void *resp_queue;
+ u32 resp_elem_size; /* size of element of resp queue */
+ struct gxp_coherent_buf resp_queue_buf;
u32 resp_queue_size; /* size of resp queue */
u32 resp_queue_head; /* offset within the resp queue */
- dma_addr_t resp_queue_device_addr; /* device address for resp queue */
struct mutex resp_queue_lock; /* protects resp_queue */
- /*
- * add to this list if a command needs to wait for a response
- * TODO(b/237908534): this will be used only in the old gxp.
- */
- struct list_head wait_list;
/* commands which need to wait for responses will be added to the wait_list */
struct mutex wait_list_lock; /* protects wait_list */
- /*
- * queue for waiting for the wait_list to be consumed
- * TODO(b/237908534): this will be used only in the old gxp.
- */
- wait_queue_head_t wait_list_waitq;
/* to create our own realtime worker for handling responses */
struct kthread_worker response_worker;
struct task_struct *response_thread;
struct kthread_work response_work;
+ enum gxp_mailbox_type type;
struct gxp_mailbox_ops *ops;
void *data; /* private data */
+
+#if GXP_USE_LEGACY_MAILBOX
+ u64 cur_seq;
+ /* add to this list if a command needs to wait for a response */
+ struct list_head wait_list;
+ /* queue for waiting for the wait_list to be consumed */
+ wait_queue_head_t wait_list_waitq;
+#else /* !GXP_USE_LEGACY_MAILBOX */
+ /*
+ * Implementation of the mailbox according to the type.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_mbx will be allocated.
+ * - GXP_MBOX_TYPE_KCI: @gcip_kci will be allocated.
+ */
+ union {
+ struct gcip_mailbox *gcip_mbx;
+ struct gcip_kci *gcip_kci;
+ } mbx_impl;
+#endif /* GXP_USE_LEGACY_MAILBOX */
};
/* Mailbox APIs */
@@ -303,39 +224,26 @@ extern int gxp_mbx_timeout;
#define MAILBOX_TIMEOUT (gxp_mbx_timeout * GXP_TIME_DELAY_FACTOR)
/*
- * The following functions all require their caller have locked gxp->vd_semaphore for reading. To
- * communicate with the firmware, the platform device should use the callbacks of the mailbox
- * manager instead of calling these functions directly. The purpose of it is to abstract the usage
- * of the mailbox and avoid effort of fixing the codes outside of the mailbox when the interface of
- * these functions are updated. (Except `gxp_mailbox_{register,unregister}_interrupt_handler`
- * functions.)
+ * The following functions are low-level interfaces of the mailbox. The actual work of it will be
+ * implemented from the high-level interfaces such as DCI, UCI and KCI via the callbacks defined
+ * above. Therefore, you may not call these functions directly.
+ * (Except `gxp_mailbox_{register,unregister}_interrupt_handler` functions.)
+ *
+ * If the mailbox interacts with virtual cores according to the implementation, the caller must
+ * have locked gxp->vd_semaphore for reading.
*/
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
uint virt_core, u8 core_id,
const struct gxp_mailbox_args *args);
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox);
void gxp_mailbox_reset(struct gxp_mailbox *mailbox);
-#ifndef GXP_HAS_DCI
-void gxp_mailbox_init(struct gxp_mailbox_manager *mgr);
-
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp);
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- struct gxp_power_states power_states,
- struct gxp_eventfd *eventfd);
-#endif
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler);
@@ -343,4 +251,23 @@ int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit);
+#if !GXP_USE_LEGACY_MAILBOX
+/*
+ * Executes command synchronously. If @resp is not NULL, the response will be returned to it.
+ * See the `gcip_mailbox_send_cmd` of `gcip-mailbox.h` or `gcip_kci_send_cmd` of `gcip-kci.h`
+ * for detail.
+ */
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes command asynchronously. The response will be written to @resp.
+ * See the `gcip_mailbox_put_cmd` function of `gcip-mailbox.h` for detail.
+ *
+ * Note: KCI doesn't support asynchronous requests.
+ */
+struct gcip_mailbox_async_response *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data);
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
#endif /* __GXP_MAILBOX_H__ */
diff --git a/gxp-mapping.c b/gxp-mapping.c
index c81f282..0188fad 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -56,7 +56,7 @@ static void destroy_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct iommu_domain *domain,
+ struct gxp_iommu_domain *domain,
u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir)
{
diff --git a/gxp-mapping.h b/gxp-mapping.h
index 2ff4e44..18454e6 100644
--- a/gxp-mapping.h
+++ b/gxp-mapping.h
@@ -27,7 +27,7 @@ struct gxp_mapping {
*/
u64 host_address;
struct gxp_dev *gxp;
- struct iommu_domain *domain;
+ struct gxp_iommu_domain *domain;
/*
* `device_address` and `size` are the base address and size of the
* user buffer a mapping represents.
@@ -74,7 +74,7 @@ struct gxp_mapping {
* to map the buffer for the device.
*/
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct iommu_domain *domain,
+ struct gxp_iommu_domain *domain,
u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir);
diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c
index d2bcd99..f1f2d91 100644
--- a/gxp-mcu-firmware.c
+++ b/gxp-mcu-firmware.c
@@ -141,6 +141,7 @@ static int gxp_mcu_firmware_handshake(struct gxp_mcu_firmware *mcu_fw)
struct gxp_dev *gxp = mcu_fw->gxp;
struct gxp_mcu *mcu = container_of(mcu_fw, struct gxp_mcu, fw);
enum gcip_fw_flavor fw_flavor;
+ int ret;
dev_dbg(gxp->dev, "Detecting MCU firmware info...");
mcu_fw->fw_info.fw_build_time = 0;
@@ -162,6 +163,11 @@ static int gxp_mcu_firmware_handshake(struct gxp_mcu_firmware *mcu_fw)
gxp_bpm_stop(gxp, GXP_MCU_CORE_ID);
dev_notice(gxp->dev, "MCU Instruction read transactions: 0x%x\n",
gxp_bpm_read_counter(gxp, GXP_MCU_CORE_ID, INST_BPM_OFFSET));
+
+ ret = gxp_mcu_telemetry_kci(mcu);
+ if (ret)
+ dev_warn(gxp->dev, "telemetry KCI error: %d", ret);
+
return 0;
}
@@ -172,10 +178,21 @@ static void gxp_mcu_firmware_stop_locked(struct gxp_mcu_firmware *mcu_fw)
int ret;
lockdep_assert_held(&mcu_fw->lock);
+
+ gxp_lpm_enable_state(gxp, GXP_MCU_CORE_ID, LPM_PG_STATE);
+
+ /* Clear doorbell to refuse non-expected interrupts */
+ gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL(GXP_MCU_CORE_ID));
+
ret = gxp_kci_shutdown(&mcu->kci);
if (ret)
dev_warn(gxp->dev, "KCI shutdown failed: %d", ret);
- gxp_lpm_down(gxp, GXP_MCU_CORE_ID);
+
+ if (!gxp_lpm_wait_state_eq(gxp, GXP_MCU_CORE_ID, LPM_PG_STATE))
+ dev_warn(gxp->dev,
+ "MCU PSM transition to PS3 fails, current state: %u\n",
+ gxp_lpm_get_state(gxp, GXP_MCU_CORE_ID));
+
gxp_mcu_firmware_unload_locked(mcu_fw);
}
@@ -400,6 +417,8 @@ int gxp_mcu_firmware_init(struct gxp_dev *gxp, struct gxp_mcu_firmware *mcu_fw)
void gxp_mcu_firmware_exit(struct gxp_mcu_firmware *mcu_fw)
{
+ if (IS_GXP_TEST && (!mcu_fw || !mcu_fw->gxp))
+ return;
device_remove_group(mcu_fw->gxp->dev, &firmware_attr_group);
}
diff --git a/gxp-mcu-telemetry.c b/gxp-mcu-telemetry.c
new file mode 100644
index 0000000..6dcdae6
--- /dev/null
+++ b/gxp-mcu-telemetry.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP MCU telemetry support
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <gcip/gcip-telemetry.h>
+
+#include "gxp-internal.h"
+#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
+#include "gxp-mcu.h"
+#include "gxp-notification.h"
+
+static struct gcip_telemetry *
+select_telemetry(struct gxp_mcu_telemetry_ctx *ctx,
+ enum gcip_telemetry_type type)
+{
+ switch (type) {
+ case GCIP_TELEMETRY_TRACE:
+ return &ctx->trace;
+ case GCIP_TELEMETRY_LOG:
+ return &ctx->log;
+ default:
+ WARN_ONCE(1, "Unrecognized GCIP telemetry type: %d", type);
+ /* return a valid object, don't crash the kernel */
+ return &ctx->log;
+ }
+}
+
+static struct gxp_mapped_resource *
+select_telemetry_mem(struct gxp_mcu_telemetry_ctx *ctx,
+ enum gcip_telemetry_type type)
+{
+ switch (type) {
+ case GCIP_TELEMETRY_TRACE:
+ return &ctx->trace_mem;
+ case GCIP_TELEMETRY_LOG:
+ return &ctx->log_mem;
+ default:
+ WARN_ONCE(1, "Unrecognized GCIP telemetry type: %d", type);
+ /* return a valid object, don't crash the kernel */
+ return &ctx->log_mem;
+ }
+}
+
+int gxp_mcu_telemetry_init(struct gxp_mcu *mcu)
+{
+ struct gxp_mcu_telemetry_ctx *tel = &mcu->telemetry;
+ int ret;
+
+ ret = gxp_mcu_mem_alloc_data(mcu, &tel->log_mem,
+ GXP_MCU_TELEMETRY_LOG_BUFFER_SIZE);
+ if (ret)
+ return ret;
+
+ ret = gcip_telemetry_init(mcu->gxp->dev, &tel->log, "telemetry_log",
+ tel->log_mem.vaddr,
+ GXP_MCU_TELEMETRY_LOG_BUFFER_SIZE,
+ gcip_telemetry_fw_log);
+ if (ret)
+ goto free_log_mem;
+
+ ret = gxp_mcu_mem_alloc_data(mcu, &tel->trace_mem,
+ GXP_MCU_TELEMETRY_TRACE_BUFFER_SIZE);
+ if (ret)
+ goto uninit_log;
+
+ ret = gcip_telemetry_init(mcu->gxp->dev, &tel->trace, "telemetry_trace",
+ tel->trace_mem.vaddr,
+ GXP_MCU_TELEMETRY_TRACE_BUFFER_SIZE,
+ gcip_telemetry_fw_trace);
+ if (ret)
+ goto free_trace_mem;
+
+ return 0;
+
+free_trace_mem:
+ gxp_mcu_mem_free_data(mcu, &tel->trace_mem);
+
+uninit_log:
+ gcip_telemetry_exit(&tel->log);
+
+free_log_mem:
+ gxp_mcu_mem_free_data(mcu, &tel->log_mem);
+
+ return ret;
+}
+
+void gxp_mcu_telemetry_exit(struct gxp_mcu *mcu)
+{
+ gxp_mcu_mem_free_data(mcu, &mcu->telemetry.trace_mem);
+ gcip_telemetry_exit(&mcu->telemetry.trace);
+ gxp_mcu_mem_free_data(mcu, &mcu->telemetry.log_mem);
+ gcip_telemetry_exit(&mcu->telemetry.log);
+}
+
+void gxp_mcu_telemetry_irq_handler(struct gxp_mcu *mcu)
+{
+ struct gxp_mcu_telemetry_ctx *tel = &mcu->telemetry;
+
+ gcip_telemetry_irq_handler(&tel->log);
+ gcip_telemetry_irq_handler(&tel->trace);
+}
+
+int gxp_mcu_telemetry_kci(struct gxp_mcu *mcu)
+{
+ struct gcip_telemetry_kci_args args = {
+ .kci = mcu->kci.mbx->mbx_impl.gcip_kci,
+ .addr = mcu->telemetry.log_mem.daddr,
+ .size = mcu->telemetry.log_mem.size,
+ };
+ int ret;
+
+ ret = gcip_telemetry_kci(&mcu->telemetry.log,
+ gxp_kci_map_mcu_log_buffer, &args);
+ if (ret)
+ return ret;
+
+ args.addr = mcu->telemetry.trace_mem.daddr,
+ args.size = mcu->telemetry.trace_mem.size,
+ ret = gcip_telemetry_kci(&mcu->telemetry.trace,
+ gxp_kci_map_mcu_trace_buffer, &args);
+
+ return ret;
+}
+
+int gxp_mcu_telemetry_register_eventfd(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type,
+ u32 eventfd)
+{
+ int ret;
+
+ ret = gcip_telemetry_set_event(select_telemetry(&mcu->telemetry, type),
+ eventfd);
+ if (ret)
+ gxp_mcu_telemetry_unregister_eventfd(mcu, type);
+
+ return ret;
+}
+
+int gxp_mcu_telemetry_unregister_eventfd(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type)
+{
+ gcip_telemetry_unset_event(select_telemetry(&mcu->telemetry, type));
+
+ return 0;
+}
+
+struct telemetry_vma_data {
+ struct gcip_telemetry *tel;
+ refcount_t ref_count;
+};
+
+static void telemetry_vma_open(struct vm_area_struct *vma)
+{
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ struct gcip_telemetry *tel = vma_data->tel;
+
+ WARN_ON_ONCE(!refcount_inc_not_zero(&vma_data->ref_count));
+ gcip_telemetry_inc_mmap_count(tel, 1);
+}
+
+static void telemetry_vma_close(struct vm_area_struct *vma)
+{
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ struct gcip_telemetry *tel = vma_data->tel;
+
+ gcip_telemetry_inc_mmap_count(tel, -1);
+ if (refcount_dec_and_test(&vma_data->ref_count))
+ kfree(vma_data);
+}
+
+static const struct vm_operations_struct telemetry_vma_ops = {
+ .open = telemetry_vma_open,
+ .close = telemetry_vma_close,
+};
+
+struct gxp_mcu_telemetry_mmap_args {
+ struct gcip_telemetry *tel;
+ struct gxp_mapped_resource *mem;
+ struct vm_area_struct *vma;
+};
+
+static int telemetry_mmap_buffer(void *args)
+{
+ struct gxp_mcu_telemetry_mmap_args *data = args;
+ struct gcip_telemetry *tel = data->tel;
+ struct gxp_mapped_resource *mem = data->mem;
+ struct vm_area_struct *vma = data->vma;
+ struct telemetry_vma_data *vma_data =
+ kmalloc(sizeof(*vma_data), GFP_KERNEL);
+ unsigned long orig_pgoff = vma->vm_pgoff;
+ int ret;
+
+ vma_data->tel = tel;
+ refcount_set(&vma_data->ref_count, 1);
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_pgoff = 0;
+
+ if (mem->size > vma->vm_end - vma->vm_start) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start, mem->paddr >> PAGE_SHIFT,
+ mem->size, vma->vm_page_prot);
+ vma->vm_pgoff = orig_pgoff;
+
+out:
+ if (ret) {
+ kfree(vma_data);
+ } else {
+ vma->vm_ops = &telemetry_vma_ops;
+ vma->vm_private_data = vma_data;
+ }
+
+ return ret;
+}
+
+int gxp_mcu_telemetry_mmap_buffer(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type,
+ struct vm_area_struct *vma)
+{
+ struct gxp_mcu_telemetry_mmap_args args = {
+ .mem = select_telemetry_mem(&mcu->telemetry, type),
+ .tel = select_telemetry(&mcu->telemetry, type),
+ .vma = vma,
+ };
+
+ return gcip_telemetry_mmap_buffer(select_telemetry(&mcu->telemetry,
+ type),
+ telemetry_mmap_buffer, &args);
+}
diff --git a/gxp-mcu-telemetry.h b/gxp-mcu-telemetry.h
new file mode 100644
index 0000000..4658fee
--- /dev/null
+++ b/gxp-mcu-telemetry.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP MCU telemetry support
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MCU_TELEMETRY_H__
+#define __GXP_MCU_TELEMETRY_H__
+
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-telemetry.h>
+
+#include "gxp-internal.h"
+
+/* Buffer size must be a power of 2 */
+#define GXP_MCU_TELEMETRY_LOG_BUFFER_SIZE (16 * 4096)
+#define GXP_MCU_TELEMETRY_TRACE_BUFFER_SIZE (64 * 4096)
+
+struct gxp_mcu;
+
+struct gxp_mcu_telemetry_ctx {
+ struct gcip_telemetry log;
+ struct gxp_mapped_resource log_mem;
+ struct gcip_telemetry trace;
+ struct gxp_mapped_resource trace_mem;
+};
+
+/*
+ * Allocates resources needed for @mcu->telemetry.
+ *
+ * Returns 0 on success, or a negative errno on error.
+ */
+int gxp_mcu_telemetry_init(struct gxp_mcu *mcu);
+
+/*
+ * Disable the MCU telemetry if enabled, release resources allocated in init().
+ */
+void gxp_mcu_telemetry_exit(struct gxp_mcu *mcu);
+
+/* Interrupt handler. */
+void gxp_mcu_telemetry_irq_handler(struct gxp_mcu *mcu);
+
+/*
+ * Sends the KCI commands about MCU telemetry buffers to the device.
+ *
+ * Returns the code of KCI response, or a negative errno on error.
+ */
+int gxp_mcu_telemetry_kci(struct gxp_mcu *mcu);
+
+/*
+ * Sets the eventfd to notify the runtime when an IRQ is sent from the device.
+ *
+ * Returns 0 on success, or a negative errno on error.
+ */
+int gxp_mcu_telemetry_register_eventfd(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type,
+ u32 eventfd);
+/* Removes previously set event. */
+int gxp_mcu_telemetry_unregister_eventfd(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type);
+
+/* Maps MCU telemetry buffer into user space. */
+int gxp_mcu_telemetry_mmap_buffer(struct gxp_mcu *mcu,
+ enum gcip_telemetry_type type,
+ struct vm_area_struct *vma);
+
+#endif /* __GXP_MCU_TELEMETRY_H__ */
diff --git a/gxp-mcu.c b/gxp-mcu.c
index 1204a6b..e59f865 100644
--- a/gxp-mcu.c
+++ b/gxp-mcu.c
@@ -11,6 +11,7 @@
#include <gcip/gcip-mem-pool.h>
#include "gxp-config.h"
+#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
#include "gxp-mcu-firmware.h"
@@ -80,21 +81,21 @@ static int gxp_mcu_mem_pools_init(struct gxp_dev *gxp, struct gxp_mcu *mcu)
static void gxp_mcu_unmap_resources(struct gxp_mcu *mcu)
{
struct gxp_dev *gxp = mcu->gxp;
- struct iommu_domain *domain = iommu_get_domain_for_dev(gxp->dev);
+ struct gxp_iommu_domain *gdomain = gxp_iommu_get_domain_for_dev(gxp);
int i;
for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++)
- iommu_unmap(domain, gxp->mbx[i].daddr, gxp->mbx[i].size);
+ iommu_unmap(gdomain->domain, gxp->mbx[i].daddr, gxp->mbx[i].size);
}
static int gxp_mcu_map_resources(struct gxp_dev *gxp, struct gxp_mcu *mcu)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(gxp->dev);
+ struct gxp_iommu_domain *gdomain = gxp_iommu_get_domain_for_dev(gxp);
int i, ret;
for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) {
gxp->mbx[i].daddr = GXP_MCU_NS_MAILBOX(i - GXP_NUM_CORES);
- ret = iommu_map(domain, gxp->mbx[i].daddr,
+ ret = iommu_map(gdomain->domain, gxp->mbx[i].daddr,
gxp->mbx[i].paddr +
MAILBOX_DEVICE_INTERFACE_OFFSET,
gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE);
@@ -162,9 +163,16 @@ int gxp_mcu_init(struct gxp_dev *gxp, struct gxp_mcu *mcu)
ret = gxp_mcu_map_resources(gxp, mcu);
if (ret)
goto err_free_shared_buffer;
- ret = gxp_uci_init(mcu);
+ /*
+ * MCU telemetry must be initialized before UCI and KCI to match the
+ * .log_buffer address in the firmware linker.ld.
+ */
+ ret = gxp_mcu_telemetry_init(mcu);
if (ret)
goto err_mcu_unmap_resources;
+ ret = gxp_uci_init(mcu);
+ if (ret)
+ goto err_telemetry_exit;
ret = gxp_kci_init(mcu);
if (ret)
goto err_uci_exit;
@@ -172,6 +180,8 @@ int gxp_mcu_init(struct gxp_dev *gxp, struct gxp_mcu *mcu)
err_uci_exit:
gxp_uci_exit(&mcu->uci);
+err_telemetry_exit:
+ gxp_mcu_telemetry_exit(mcu);
err_mcu_unmap_resources:
gxp_mcu_unmap_resources(mcu);
err_free_shared_buffer:
@@ -187,6 +197,7 @@ void gxp_mcu_exit(struct gxp_mcu *mcu)
{
gxp_kci_exit(&mcu->kci);
gxp_uci_exit(&mcu->uci);
+ gxp_mcu_telemetry_exit(mcu);
gxp_mcu_unmap_resources(mcu);
gxp_free_shared_buffer(mcu);
gxp_mcu_mem_pools_exit(mcu);
diff --git a/gxp-mcu.h b/gxp-mcu.h
index ae4d6f7..d8c8fbe 100644
--- a/gxp-mcu.h
+++ b/gxp-mcu.h
@@ -12,6 +12,7 @@
#include "gxp-kci.h"
#include "gxp-mcu-firmware.h"
+#include "gxp-mcu-telemetry.h"
#include "gxp-uci.h"
struct gxp_dev;
@@ -26,6 +27,7 @@ struct gxp_mcu {
struct gcip_mem_pool remap_secure_pool;
struct gxp_uci uci;
struct gxp_kci kci;
+ struct gxp_mcu_telemetry_ctx telemetry;
};
/*
diff --git a/gxp-notification.h b/gxp-notification.h
index a4e4fd3..6f43b70 100644
--- a/gxp-notification.h
+++ b/gxp-notification.h
@@ -14,7 +14,7 @@
enum gxp_notification_to_host_type {
HOST_NOTIF_MAILBOX_RESPONSE = 0,
HOST_NOTIF_DEBUG_DUMP_READY = 1,
- HOST_NOTIF_TELEMETRY_STATUS = 2,
+ HOST_NOTIF_CORE_TELEMETRY_STATUS = 2,
HOST_NOTIF_MAX
};
diff --git a/gxp-pm.c b/gxp-pm.c
index 787ba32..cde555d 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpm_dvfs.h>
+#include <linux/bits.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -63,8 +64,9 @@ static int gxp_pm_blkpwr_up(struct gxp_dev *gxp)
*/
ret = pm_runtime_resume_and_get(gxp->dev);
if (ret)
- dev_err(gxp->dev, "%s: pm_runtime_resume_and_get returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_resume_and_get returned %d during blk up\n",
+ ret);
return ret;
}
@@ -78,6 +80,12 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
*/
lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
+ if (!gxp_lpm_wait_state_eq(gxp, LPM_TOP_PSM, LPM_ACTIVE_STATE)) {
+ dev_err(gxp->dev,
+ "failed to force TOP LPM to PS0 during blk down\n");
+ return -EAGAIN;
+ }
+
ret = pm_runtime_put_sync(gxp->dev);
if (ret)
/*
@@ -86,8 +94,9 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
* indicate the device is still in use somewhere. The only
* expected value here is 0, indicating no remaining users.
*/
- dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_put_sync returned %d during blk down\n",
+ ret);
/* Remove our vote for INT/MIF state (if any) */
exynos_pm_qos_update_request(&gxp->power_mgr->int_min, 0);
exynos_pm_qos_update_request(&gxp->power_mgr->mif_min, 0);
@@ -112,7 +121,7 @@ int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
{
int ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, rate);
- dev_dbg(gxp->dev, "%s: rate %lu, ret %d\n", __func__, rate, ret);
+ dev_dbg(gxp->dev, "set blk rate %lu, ret %d\n", rate, ret);
return ret;
}
@@ -194,11 +203,14 @@ out:
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
+#define AUR_DVFS_DEBUG_REQ BIT(31)
+#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
+
int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
{
int ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
- dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
+ dev_dbg(gxp->dev, "current blk state %d\n", ret);
return ret;
}
@@ -284,7 +296,7 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_lpm_up(gxp, core);
if (ret) {
- dev_err(gxp->dev, "%s: Core %d on fail\n", __func__, core);
+ dev_err(gxp->dev, "Core %d on fail\n", core);
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
@@ -292,7 +304,7 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
mutex_unlock(&gxp->power_mgr->pm_lock);
if (verbose)
- dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
+ dev_notice(gxp->dev, "Core %d powered up\n", core);
return ret;
}
@@ -304,7 +316,7 @@ void gxp_pm_core_off(struct gxp_dev *gxp, uint core)
mutex_lock(&gxp->power_mgr->pm_lock);
gxp_lpm_down(gxp, core);
mutex_unlock(&gxp->power_mgr->pm_lock);
- dev_notice(gxp->dev, "%s: Core %d down\n", __func__, core);
+ dev_notice(gxp->dev, "Core %d powered down\n", core);
}
static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
@@ -701,9 +713,11 @@ int gxp_pm_init(struct gxp_dev *gxp)
int gxp_pm_destroy(struct gxp_dev *gxp)
{
- struct gxp_power_manager *mgr;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+
+ if (IS_GXP_TEST && !mgr)
+ return 0;
- mgr = gxp->power_mgr;
exynos_pm_qos_remove_request(&mgr->mif_min);
exynos_pm_qos_remove_request(&mgr->int_min);
pm_runtime_disable(gxp->dev);
diff --git a/gxp-uci.c b/gxp-uci.c
index 56d52cc..c1e0f29 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -26,8 +26,7 @@
static void gxp_uci_mailbox_manager_release_unconsumed_async_resps(
struct gxp_virtual_device *vd)
{
- struct gxp_async_response *cur, *nxt;
- struct gxp_uci_async_response *uci_async_resp;
+ struct gxp_uci_async_response *cur, *nxt;
unsigned long flags;
/*
@@ -41,9 +40,7 @@ static void gxp_uci_mailbox_manager_release_unconsumed_async_resps(
cur, nxt, &vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
list_entry) {
list_del(&cur->list_entry);
- uci_async_resp = container_of(
- cur, struct gxp_uci_async_response, async_response);
- kfree(uci_async_resp);
+ gcip_mailbox_release_async_resp(cur->awaiter);
}
spin_unlock_irqrestore(&vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
flags);
@@ -116,10 +113,9 @@ static void gxp_uci_set_resp_elem_status(struct gcip_mailbox *mailbox,
static void gxp_uci_handle_async_resp_arrived(
struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_gcip_resp)
+ struct gcip_mailbox_async_response *gcip_async_resp)
{
- struct gxp_uci_async_response *async_uci_resp = async_gcip_resp->data;
- struct gxp_async_response *async_resp = &async_uci_resp->async_response;
+ struct gxp_uci_async_response *async_resp = gcip_async_resp->data;
unsigned long flags;
/*
@@ -152,10 +148,9 @@ static void gxp_uci_handle_async_resp_arrived(
static void gxp_uci_handle_async_resp_timedout(
struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_gcip_resp)
+ struct gcip_mailbox_async_response *gcip_async_resp)
{
- struct gxp_uci_async_response *async_uci_resp = async_gcip_resp->data;
- struct gxp_async_response *async_resp = &async_uci_resp->async_response;
+ struct gxp_uci_async_response *async_resp = gcip_async_resp->data;
unsigned long flags;
/*
@@ -168,7 +163,7 @@ static void gxp_uci_handle_async_resp_timedout(
*/
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
if (async_resp->dest_queue) {
- async_uci_resp->resp.code = GXP_RESP_CANCELLED;
+ async_resp->resp.code = GXP_RESP_CANCELLED;
list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
@@ -185,10 +180,9 @@ static void gxp_uci_handle_async_resp_timedout(
static void
gxp_uci_flush_async_resp(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_gcip_resp)
+ struct gcip_mailbox_async_response *gcip_async_resp)
{
- struct gxp_uci_async_response *async_uci_resp = async_gcip_resp->data;
- struct gxp_async_response *async_resp = &async_uci_resp->async_response;
+ struct gxp_uci_async_response *async_resp = gcip_async_resp->data;
unsigned long flags;
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
@@ -198,9 +192,9 @@ gxp_uci_flush_async_resp(struct gcip_mailbox *mailbox,
static void gxp_uci_release_async_resp_data(void *data)
{
- struct gxp_uci_async_response *async_uci_resp = data;
+ struct gxp_uci_async_response *async_resp = data;
- kfree(async_uci_resp);
+ kfree(async_resp);
}
static const struct gcip_mailbox_ops gxp_uci_gcip_mbx_ops = {
@@ -248,8 +242,8 @@ static int gxp_uci_allocate_resources(struct gxp_mailbox *mailbox,
MBOX_CMD_QUEUE_NUM_ENTRIES);
if (ret)
goto err_cmd_queue;
- mailbox->cmd_queue = uci->cmd_queue_mem.vaddr;
- mailbox->cmd_queue_device_addr = uci->cmd_queue_mem.daddr;
+ mailbox->cmd_queue_buf.vaddr = uci->cmd_queue_mem.vaddr;
+ mailbox->cmd_queue_buf.dsp_addr = uci->cmd_queue_mem.daddr;
mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
mailbox->cmd_queue_tail = 0;
@@ -259,8 +253,8 @@ static int gxp_uci_allocate_resources(struct gxp_mailbox *mailbox,
MBOX_RESP_QUEUE_NUM_ENTRIES);
if (ret)
goto err_resp_queue;
- mailbox->resp_queue = uci->resp_queue_mem.vaddr;
- mailbox->resp_queue_device_addr = uci->resp_queue_mem.daddr;
+ mailbox->resp_queue_buf.vaddr = uci->resp_queue_mem.vaddr;
+ mailbox->resp_queue_buf.dsp_addr = uci->resp_queue_mem.daddr;
mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
mailbox->resp_queue_head = 0;
@@ -270,8 +264,10 @@ static int gxp_uci_allocate_resources(struct gxp_mailbox *mailbox,
if (ret)
goto err_descriptor;
- mailbox->descriptor = uci->descriptor_mem.vaddr;
- mailbox->descriptor_device_addr = uci->descriptor_mem.daddr;
+ mailbox->descriptor_buf.vaddr = uci->descriptor_mem.vaddr;
+ mailbox->descriptor_buf.dsp_addr = uci->descriptor_mem.daddr;
+ mailbox->descriptor =
+ (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
mailbox->descriptor->cmd_queue_device_addr = uci->cmd_queue_mem.daddr;
mailbox->descriptor->resp_queue_device_addr = uci->resp_queue_mem.daddr;
mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
@@ -284,7 +280,7 @@ err_descriptor:
err_resp_queue:
gxp_mcu_mem_free_data(mcu, &uci->cmd_queue_mem);
err_cmd_queue:
- return -ENOMEM;
+ return ret;
}
static void gxp_uci_release_resources(struct gxp_mailbox *mailbox,
@@ -298,59 +294,10 @@ static void gxp_uci_release_resources(struct gxp_mailbox *mailbox,
gxp_mcu_mem_free_data(uci->mcu, &uci->cmd_queue_mem);
}
-static int gxp_uci_init_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_uci *uci = mailbox->data;
- struct gcip_mailbox_args args = {
- .dev = mailbox->gxp->dev,
- .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
- .cmd_queue = mailbox->cmd_queue,
- .cmd_elem_size = sizeof(struct gxp_uci_command),
- .resp_queue = mailbox->resp_queue,
- .resp_elem_size = sizeof(struct gxp_uci_response),
- .timeout = MAILBOX_TIMEOUT,
- .ops = &gxp_uci_gcip_mbx_ops,
- .data = mailbox,
- };
- int ret;
-
- uci->gcip_mbx = kzalloc(sizeof(*uci->gcip_mbx), GFP_KERNEL);
- if (!uci->gcip_mbx)
- return -ENOMEM;
-
- /* Initialize gcip_mailbox */
- ret = gcip_mailbox_init(uci->gcip_mbx, &args);
- if (ret) {
- kfree(uci->gcip_mbx);
- return ret;
- }
-
- return 0;
-}
-
-static void gxp_uci_release_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_uci *uci = mailbox->data;
-
- /* Release gcip_mailbox */
- gcip_mailbox_release(uci->gcip_mbx);
- kfree(uci->gcip_mbx);
-}
-
-static void gxp_uci_consume_responses_work(struct gxp_mailbox *mailbox)
-{
- struct gxp_uci *uci = mailbox->data;
-
- gcip_mailbox_consume_responses_work(uci->gcip_mbx);
-}
-
static struct gxp_mailbox_ops gxp_uci_gxp_mbx_ops = {
.allocate_resources = gxp_uci_allocate_resources,
.release_resources = gxp_uci_release_resources,
- .init_consume_responses_work = gxp_uci_init_consume_responses_work,
- .release_consume_responses_work =
- gxp_uci_release_consume_responses_work,
- .consume_responses_work = gxp_uci_consume_responses_work,
+ .gcip_ops.mbx = &gxp_uci_gcip_mbx_ops,
};
int gxp_uci_init(struct gxp_mcu *mcu)
@@ -358,16 +305,20 @@ int gxp_uci_init(struct gxp_mcu *mcu)
struct gxp_dev *gxp = mcu->gxp;
struct gxp_uci *uci = &mcu->uci;
struct gxp_mailbox_args mbx_args = {
+ .type = GXP_MBOX_TYPE_GENERAL,
.ops = &gxp_uci_gxp_mbx_ops,
+ .queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
+ .cmd_elem_size = sizeof(struct gxp_uci_command),
+ .resp_elem_size = sizeof(struct gxp_uci_response),
.data = uci,
};
uci->gxp = gxp;
uci->mcu = mcu;
- uci->gxp_mbx = gxp_mailbox_alloc(gxp->mailbox_mgr, NULL, 0,
- UCI_MAILBOX_ID, &mbx_args);
- if (IS_ERR(uci->gxp_mbx))
- return PTR_ERR(uci->gxp_mbx);
+ uci->mbx = gxp_mailbox_alloc(gxp->mailbox_mgr, NULL, 0, UCI_MAILBOX_ID,
+ &mbx_args);
+ if (IS_ERR(uci->mbx))
+ return PTR_ERR(uci->mbx);
gxp_uci_mailbox_manager_set_ops(gxp->mailbox_mgr);
return 0;
@@ -375,8 +326,10 @@ int gxp_uci_init(struct gxp_mcu *mcu)
void gxp_uci_exit(struct gxp_uci *uci)
{
- gxp_mailbox_release(uci->gxp->mailbox_mgr, NULL, 0, uci->gxp_mbx);
- uci->gxp_mbx = NULL;
+ if (IS_GXP_TEST && (!uci || !uci->mbx))
+ return;
+ gxp_mailbox_release(uci->gxp->mailbox_mgr, NULL, 0, uci->mbx);
+ uci->mbx = NULL;
}
int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
@@ -384,17 +337,14 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
wait_queue_head_t *queue_waitq,
struct gxp_eventfd *eventfd)
{
- struct gxp_uci_async_response *async_uci_resp;
- struct gxp_async_response *async_resp;
+ struct gxp_uci_async_response *async_resp;
int ret;
- async_uci_resp = kzalloc(sizeof(*async_uci_resp), GFP_KERNEL);
- if (!async_uci_resp)
+ async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
+ if (!async_resp)
return -ENOMEM;
- async_resp = &async_uci_resp->async_response;
- async_uci_resp->uci = uci;
-
+ async_resp->uci = uci;
/*
* If the command is a wakelock command, keep dest_queue as a null
* pointer to indicate that we will not expose the response to the
@@ -409,17 +359,17 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
else
async_resp->eventfd = NULL;
- async_uci_resp->async_gcip_resp = gcip_mailbox_put_cmd(
- uci->gcip_mbx, cmd, &async_uci_resp->resp, async_uci_resp);
- if (IS_ERR(async_uci_resp->async_gcip_resp)) {
- ret = PTR_ERR(async_uci_resp->async_gcip_resp);
+ async_resp->awaiter = gxp_mailbox_put_cmd(
+ uci->mbx, cmd, &async_resp->resp, async_resp);
+ if (IS_ERR(async_resp->awaiter)) {
+ ret = PTR_ERR(async_resp->awaiter);
goto err_free_resp;
}
return 0;
err_free_resp:
- kfree(async_uci_resp);
+ kfree(async_resp);
return ret;
}
@@ -428,8 +378,7 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
u16 *error_code)
{
long timeout;
- struct gxp_async_response *async_resp;
- struct gxp_uci_async_response *uci_async_resp;
+ struct gxp_uci_async_response *async_resp;
spin_lock_irq(&uci_resp_queue->lock);
@@ -447,31 +396,30 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
/* unusual case - this only happens when there is no command pushed */
return timeout ? -ETIMEDOUT : timeout;
}
- async_resp = list_first_entry(&uci_resp_queue->queue,
- struct gxp_async_response, list_entry);
- uci_async_resp = container_of(async_resp, struct gxp_uci_async_response,
- async_response);
+ async_resp =
+ list_first_entry(&uci_resp_queue->queue,
+ struct gxp_uci_async_response, list_entry);
/* Pop the front of the response list */
list_del(&(async_resp->list_entry));
spin_unlock_irq(&uci_resp_queue->lock);
- *resp_seq = uci_async_resp->resp.seq;
- switch (uci_async_resp->resp.code) {
+ *resp_seq = async_resp->resp.seq;
+ switch (async_resp->resp.code) {
case GXP_RESP_OK:
*error_code = GXP_RESPONSE_ERROR_NONE;
/* payload is only valid if code == GXP_RESP_OK */
- *resp_retval = uci_async_resp->resp.payload;
+ *resp_retval = async_resp->resp.payload;
break;
case GXP_RESP_CANCELLED:
*error_code = GXP_RESPONSE_ERROR_TIMEOUT;
break;
default:
/* No other code values are valid at this point */
- dev_err(uci_async_resp->uci->gxp->dev,
+ dev_err(async_resp->uci->gxp->dev,
"Completed response had invalid code %hu\n",
- uci_async_resp->resp.code);
+ async_resp->resp.code);
*error_code = GXP_RESPONSE_ERROR_INTERNAL;
break;
}
@@ -490,8 +438,8 @@ int gxp_uci_wait_async_response(struct mailbox_resp_queue *uci_resp_queue,
* handler (which may reference the `gxp_async_response`) has
* been able to exit cleanly.
*/
- gcip_mailbox_cancel_async_resp_timeout(uci_async_resp->async_gcip_resp);
- gcip_mailbox_release_async_resp(uci_async_resp->async_gcip_resp);
+ gcip_mailbox_cancel_async_resp_timeout(async_resp->awaiter);
+ gcip_mailbox_release_async_resp(async_resp->awaiter);
return 0;
}
diff --git a/gxp-uci.h b/gxp-uci.h
index 9d71365..c267b1f 100644
--- a/gxp-uci.h
+++ b/gxp-uci.h
@@ -85,10 +85,24 @@ struct gxp_uci_response {
* sent the command.
*/
struct gxp_uci_async_response {
+ struct list_head list_entry;
+ /* Stores the response. */
struct gxp_uci_response resp;
struct gxp_uci *uci;
- struct gxp_async_response async_response;
- struct gcip_mailbox_async_response *async_gcip_resp;
+ /* Queue to add the response to once it is complete or timed out. */
+ struct list_head *dest_queue;
+ /*
+ * The lock that protects queue pointed to by `dest_queue`.
+ * The mailbox code also uses this lock to protect changes to the
+ * `dest_queue` pointer itself when processing this response.
+ */
+ spinlock_t *dest_queue_lock;
+ /* Queue of clients to notify when this response is processed. */
+ wait_queue_head_t *dest_queue_waitq;
+ /* gxp_eventfd to signal when the response completes. May be NULL. */
+ struct gxp_eventfd *eventfd;
+ /* Handles arrival, timeout of async response. */
+ struct gcip_mailbox_async_response *awaiter;
};
struct gxp_uci_wait_list {
@@ -100,8 +114,7 @@ struct gxp_uci_wait_list {
struct gxp_uci {
struct gxp_dev *gxp;
struct gxp_mcu *mcu;
- struct gxp_mailbox *gxp_mbx;
- struct gcip_mailbox *gcip_mbx;
+ struct gxp_mailbox *mbx;
struct gxp_mapped_resource cmd_queue_mem;
struct gxp_mapped_resource resp_queue_mem;
struct gxp_mapped_resource descriptor_mem;
diff --git a/gxp-vd.c b/gxp-vd.c
index f7cff44..e40cad6 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-domain-pool.h"
@@ -21,14 +22,13 @@
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
static inline void hold_core_in_reset(struct gxp_dev *gxp, uint core)
{
gxp_write_32(gxp, GXP_CORE_REG_ETM_PWRCTL(core),
- 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ BIT(GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT));
}
void gxp_vd_init(struct gxp_dev *gxp)
@@ -47,18 +47,19 @@ void gxp_vd_destroy(struct gxp_dev *gxp)
/* NO-OP for now. */
}
-static int map_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint core_list)
+static int map_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
{
struct buffer_data *data[2];
int i, core, ret;
- if (!gxp->telemetry_mgr)
+ if (!gxp->core_telemetry_mgr)
return 0;
- mutex_lock(&gxp->telemetry_mgr->lock);
- data[0] = gxp->telemetry_mgr->logging_buff_data;
- data[1] = gxp->telemetry_mgr->tracing_buff_data;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
@@ -67,25 +68,23 @@ static int map_telemetry_buffers(struct gxp_dev *gxp,
if (!(BIT(core) & core_list))
continue;
ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, data[i]->buffers[core], vd->domain,
- data[i]->size, data[i]->buffer_daddrs[core], 0);
+ gxp, &data[i]->buffers[core], vd->domain, 0);
if (ret) {
dev_err(gxp->dev,
- "Mapping telemetry buffer to core %d failed",
+ "Mapping core telemetry buffer to core %d failed",
core);
goto error;
}
}
}
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
error:
while (core--) {
if (!(BIT(core) & core_list))
continue;
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd->domain, data[i]->size,
- data[i]->buffer_daddrs[core]);
+ gxp, vd->domain, &data[i]->buffers[core]);
}
while (i--) {
if (!data[i] || !data[i]->is_enabled)
@@ -94,26 +93,25 @@ error:
if (!(BIT(core) & core_list))
continue;
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd->domain, data[i]->size,
- data[i]->buffer_daddrs[core]);
+ gxp, vd->domain, &data[i]->buffers[core]);
}
}
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
-static void unmap_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint core_list)
+static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
{
struct buffer_data *data[2];
int i, core;
- if (!gxp->telemetry_mgr)
+ if (!gxp->core_telemetry_mgr)
return;
- mutex_lock(&gxp->telemetry_mgr->lock);
- data[0] = gxp->telemetry_mgr->logging_buff_data;
- data[1] = gxp->telemetry_mgr->tracing_buff_data;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
@@ -122,11 +120,10 @@ static void unmap_telemetry_buffers(struct gxp_dev *gxp,
if (!(BIT(core) & core_list))
continue;
gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd->domain, data[i]->size,
- data[i]->buffer_daddrs[core]);
+ gxp, vd->domain, &data[i]->buffers[core]);
}
}
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
}
static int map_debug_dump_buffer(struct gxp_dev *gxp,
@@ -136,9 +133,7 @@ static int map_debug_dump_buffer(struct gxp_dev *gxp,
return 0;
return gxp_dma_map_allocated_coherent_buffer(
- gxp, gxp->debug_dump_mgr->buf.vaddr, vd->domain,
- gxp->debug_dump_mgr->buf.size, gxp->debug_dump_mgr->buf.daddr,
- 0);
+ gxp, &gxp->debug_dump_mgr->buf, vd->domain, 0);
}
static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
@@ -148,8 +143,7 @@ static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
return;
gxp_dma_unmap_allocated_coherent_buffer(gxp, vd->domain,
- gxp->debug_dump_mgr->buf.size,
- gxp->debug_dump_mgr->buf.daddr);
+ &gxp->debug_dump_mgr->buf);
}
static int assign_cores(struct gxp_virtual_device *vd)
@@ -208,6 +202,8 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->num_cores = requested_cores;
vd->state = GXP_VD_OFF;
vd->slice_index = -1;
+ vd->client_id = -1;
+ vd->tpu_client_id = -1;
vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
if (!vd->domain) {
@@ -243,34 +239,38 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
err = assign_cores(vd);
if (err)
- goto error_free_slice_index;
+ goto error_free_resp_queues;
if (gxp->data_mgr) {
vd->fw_app = gxp_fw_data_create_app(gxp, vd->core_list);
- if (IS_ERR_OR_NULL(vd->fw_app))
+ if (IS_ERR(vd->fw_app)) {
+ err = PTR_ERR(vd->fw_app);
goto error_unassign_cores;
+ }
}
err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list,
vd->slice_index);
if (err)
goto error_destroy_fw_data;
- err = map_telemetry_buffers(gxp, vd, vd->core_list);
+ err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
if (err)
goto error_unmap_core_resources;
err = map_debug_dump_buffer(gxp, vd);
if (err)
- goto error_unmap_telemetry_buffer;
+ goto error_unmap_core_telemetry_buffer;
return vd;
-error_unmap_telemetry_buffer:
- unmap_telemetry_buffers(gxp, vd, vd->core_list);
+error_unmap_core_telemetry_buffer:
+ unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
error_unmap_core_resources:
gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
error_destroy_fw_data:
gxp_fw_data_destroy_app(gxp, vd->fw_app);
error_unassign_cores:
unassign_cores(vd);
+error_free_resp_queues:
+ kfree(vd->mailbox_resp_queues);
error_free_slice_index:
if (vd->slice_index >= 0)
ida_free(&gxp->shared_slice_idp, vd->slice_index);
@@ -292,7 +292,7 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
lockdep_assert_held_write(&gxp->vd_semaphore);
unassign_cores(vd);
unmap_debug_dump_buffer(gxp, vd);
- unmap_telemetry_buffers(gxp, vd, core_list);
+ unmap_core_telemetry_buffers(gxp, vd, core_list);
gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
if (!IS_ERR_OR_NULL(vd->fw_app)) {
diff --git a/gxp-vd.h b/gxp-vd.h
index 02c8be3..c84e527 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -20,7 +20,7 @@
#include "gxp-mapping.h"
struct mailbox_resp_queue {
- /* Queue of `struct gxp_async_response`s */
+ /* Queue of async responses */
struct list_head queue;
/* Lock protecting access to the `queue` */
spinlock_t lock;
@@ -45,7 +45,7 @@ struct gxp_virtual_device {
struct gxp_dev *gxp;
uint num_cores;
void *fw_app;
- struct iommu_domain *domain;
+ struct gxp_iommu_domain *domain;
struct mailbox_resp_queue *mailbox_resp_queues;
struct rb_root mappings_root;
struct rw_semaphore mappings_semaphore;
@@ -63,6 +63,19 @@ struct gxp_virtual_device {
*/
int slice_index;
uint core_list;
+ /*
+ * The ID of DSP client. -1 if it is not allocated.
+ * This is allocated by the DSP kernel driver, but will be set to this variable only when
+ * the client of this vd acquires the block wakelock successfully. (i.e, after the kernel
+ * driver allocates a virtual mailbox with the firmware side successfully by sending the
+ * `allocate_vmbox` KCI command.)
+ */
+ int client_id;
+ /*
+ * The ID of TPU client. -1 if it is not allocated.
+ * This ID will be fetched from the TPU kernel driver.
+ */
+ int tpu_client_id;
};
/*
diff --git a/gxp-wakelock.h b/gxp-wakelock.h
index ff76325..6babf27 100644
--- a/gxp-wakelock.h
+++ b/gxp-wakelock.h
@@ -18,7 +18,7 @@ struct gxp_wakelock_manager {
};
/**
- * gxp_telemetry_init() - Initialize wakelock support
+ * gxp_wakelock_init() - Initialize wakelock support
* @gxp: The GXP device to initialize wakelock support for
*
* Return:
diff --git a/gxp.h b/gxp.h
index 6610273..3953cd0 100644
--- a/gxp.h
+++ b/gxp.h
@@ -2,8 +2,9 @@
/*
* GXP kernel-userspace interface definitions.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
+
#ifndef __GXP_H__
#define __GXP_H__
@@ -11,280 +12,38 @@
#include <linux/types.h>
/* Interface Version */
-#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 4
-#define GXP_INTERFACE_VERSION_BUILD 0
+#define GXP_INTERFACE_VERSION_MAJOR 1
+#define GXP_INTERFACE_VERSION_MINOR 5
+#define GXP_INTERFACE_VERSION_BUILD 0
/*
- * mmap offsets for logging and tracing buffers
+ * mmap offsets for core logging and tracing buffers
* Requested size will be divided evenly among all cores. The whole buffer
* must be page-aligned, and the size of each core's buffer must be a multiple
* of PAGE_SIZE.
*/
-#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
-#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
-
-#define GXP_IOCTL_BASE 0xEE
-
-#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
-struct gxp_interface_version_ioctl {
- /*
- * Driver major version number.
- * Increments whenever a non-backwards compatible change to the
- * interface defined in this file changes.
- */
- __u16 version_major;
- /*
- * Driver minor version number.
- * Increments whenever a backwards compatible change, such as the
- * addition of a new IOCTL, is made to the interface defined in this
- * file.
- */
- __u16 version_minor;
- /*
- * Driver build identifier.
- * NULL-terminated string of the git hash of the commit the driver was
- * built from. If the driver had uncommitted changes the string will
- * end with "-dirty".
- */
- char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
-};
-
-/* Query the driver's interface version. */
-#define GXP_GET_INTERFACE_VERSION \
- _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
-
-struct gxp_specs_ioctl {
- /* Maximum number of cores that can be allocated to a virtual device */
- __u8 core_count;
- /* Deprecated fields that should be ignored */
- __u16 reserved_0;
- __u16 reserved_1;
- __u16 reserved_2;
- __u8 reserved_3;
- /*
- * Amount of "tightly-coupled memory" or TCM available to each core.
- * The value returned will be in kB, or 0 if the value was not
- * specified in the device-tree.
- */
- __u32 memory_per_core;
-};
-
-/* Query system specs. */
-#define GXP_GET_SPECS \
- _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
-
-struct gxp_virtual_device_ioctl {
- /*
- * Input:
- * The number of cores requested for the virtual device.
- */
- __u8 core_count;
- /*
- * Input:
- * The number of threads requested per core.
- */
- __u16 threads_per_core;
- /*
- * Input:
- * The amount of memory requested per core, in kB.
- */
- __u32 memory_per_core;
- /*
- * Output:
- * The ID assigned to the virtual device and shared with its cores.
- */
- __u32 vdid;
-};
-
-/* Allocate virtual device. */
-#define GXP_ALLOCATE_VIRTUAL_DEVICE \
- _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
-
-/*
- * Components for which a client may hold a wakelock.
- * Acquired by passing these values as `components_to_wake` in
- * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
- * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
- *
- * Multiple wakelocks can be acquired or released at once by passing multiple
- * components, ORed together.
- */
-#define WAKELOCK_BLOCK (1 << 0)
-#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET 0x10000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET 0x20000
-/*
- * DSP subsystem Power state values for use as `gxp_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
- * original state is to request GXP_POWER_STATE_UUD with setting
- * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
- * as identical to GXP_POWER_STATE_UUD.
- */
-#define GXP_POWER_STATE_OFF 0
-#define GXP_POWER_STATE_UUD 1
-#define GXP_POWER_STATE_SUD 2
-#define GXP_POWER_STATE_UD 3
-#define GXP_POWER_STATE_NOM 4
-#define GXP_POWER_STATE_READY 5
-#define GXP_POWER_STATE_UUD_PLUS 6
-#define GXP_POWER_STATE_SUD_PLUS 7
-#define GXP_POWER_STATE_UD_PLUS 8
-#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
+/* mmap offsets for MCU logging and tracing buffers */
+#define GXP_MMAP_MCU_LOG_BUFFER_OFFSET 0x30000
+#define GXP_MMAP_MCU_TRACE_BUFFER_OFFSET 0x40000
-/*
- * Memory interface power state values for use as `memory_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- */
-#define MEMORY_POWER_STATE_UNDEFINED 0
-#define MEMORY_POWER_STATE_MIN 1
-#define MEMORY_POWER_STATE_VERY_LOW 2
-#define MEMORY_POWER_STATE_LOW 3
-#define MEMORY_POWER_STATE_HIGH 4
-#define MEMORY_POWER_STATE_VERY_HIGH 5
-#define MEMORY_POWER_STATE_MAX 6
+/* For backward compatibility. */
+#define GXP_MMAP_LOG_BUFFER_OFFSET GXP_MMAP_CORE_LOG_BUFFER_OFFSET
+#define GXP_MMAP_TRACE_BUFFER_OFFSET GXP_MMAP_CORE_TRACE_BUFFER_OFFSET
-/*
- * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
- * and `power_flags in `gxp_mailbox_command_ioctl`.
- *
- * Non-aggressor flag is deprecated. Setting this flag is a no-op since
- * non-aggressor support is defeatured.
- */
-#define GXP_POWER_NON_AGGRESSOR (1 << 0)
-/*
- * The client can request low frequency clkmux vote by this flag, which means
- * the kernel driver will switch the CLKMUX clocks to save more power.
- *
- * Note: The kernel driver keep separate track of low frequency clkmux votes
- * and normal votes, and the low frequency clkmux votes will have lower priority
- * than all normal votes.
- * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
- * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX.
- */
-#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
-
-struct gxp_acquire_wakelock_ioctl {
- /*
- * The components for which a wakelock will be acquired.
- * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
- * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
- * client must already have acquired a BLOCK wakelock or acquire both
- * in the same call.
- */
- __u32 components_to_wake;
- /*
- * Minimum power state to operate the entire DSP subsystem at until
- * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
- * from above. Note that the requested power state will not be cleared
- * if only the VIRTUAL_DEVICE wakelock is released.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
- * wakelock.
- */
- __u32 gxp_power_state;
- /*
- * Memory interface power state to request from the system so long as
- * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
- * from above. The requested memory power state will not be cleared if
- * only the VIRTUAL_DEVICE wakelock is released.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * How long to wait, in microseconds, before returning if insufficient
- * physical cores are available when attempting to acquire a
- * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
- * should not wait at all if cores are not available.
- */
- __u32 vd_timeout_us;
- /*
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 flags;
-};
-
-/*
- * Acquire a wakelock and request minimum power states for the DSP subsystem
- * and the memory interface.
- *
- * Upon a successful return, the specified components will be powered on and if
- * they were not already running at the specified or higher power states,
- * requests will have been sent to transition both the DSP subsystem and
- * memory interface to the specified states.
- *
- * If the same client invokes this IOCTL for the same component more than once
- * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
- * second call will update requested power states, but have no other effects.
- * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
- *
- * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
- * insufficient physical cores available, the driver will wait up to
- * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
- * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
- * wakelocks were being requested, neither will have been acquired.
- */
-#define GXP_ACQUIRE_WAKE_LOCK \
- _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
-
-/*
- * Legacy "acquire wakelock" IOCTL that does not support power flags.
- * This IOCTL exists for backwards compatibility with older runtimes. All other
- * fields are the same as in `struct gxp_acquire_wakelock_ioctl`.
- */
-struct gxp_acquire_wakelock_compat_ioctl {
- __u32 components_to_wake;
- __u32 gxp_power_state;
- __u32 memory_power_state;
- __u32 vd_timeout_us;
-};
-
-#define GXP_ACQUIRE_WAKE_LOCK_COMPAT \
- _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_compat_ioctl)
-
-/*
- * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
- *
- * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
- * removed from physical cores. At that point the cores may be reallocated to
- * another client or powered down.
- *
- * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
- * down. If a client attempts to release a BLOCK wakelock while still holding
- * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
- *
- * If a client attempts to release a wakelock it does not hold, this IOCTL will
- * return -ENODEV.
- */
-#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+#define GXP_IOCTL_BASE 0xEE
/* GXP map flag macros */
/* The mask for specifying DMA direction in GXP map flag */
-#define GXP_MAP_DIR_MASK 3
+#define GXP_MAP_DIR_MASK 3
/* The targeted DMA direction for the buffer */
-#define GXP_MAP_DMA_BIDIRECTIONAL 0
-#define GXP_MAP_DMA_TO_DEVICE 1
-#define GXP_MAP_DMA_FROM_DEVICE 2
+#define GXP_MAP_DMA_BIDIRECTIONAL 0
+#define GXP_MAP_DMA_TO_DEVICE 1
+#define GXP_MAP_DMA_FROM_DEVICE 2
/* Create coherent mappings of the buffer. */
-#define GXP_MAP_COHERENT (1 << 2)
+#define GXP_MAP_COHERENT (1 << 2)
struct gxp_map_ioctl {
/*
@@ -297,8 +56,8 @@ struct gxp_map_ioctl {
* buffer for all cores it had been mapped for.
*/
__u16 virtual_core_list;
- __u64 host_address; /* virtual address in the process space */
- __u32 size; /* size of mapping in bytes */
+ __u64 host_address; /* virtual address in the process space */
+ __u32 size; /* size of mapping in bytes */
/*
* Flags indicating mapping attribute requests from the runtime.
* Set RESERVED bits to 0 to ensure backwards compatibility.
@@ -318,7 +77,7 @@ struct gxp_map_ioctl {
* [31:3] - RESERVED
*/
__u32 flags;
- __u64 device_address; /* returned device address */
+ __u64 device_address; /* returned device address */
};
/*
@@ -326,8 +85,7 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_BUFFER \
- _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
+#define GXP_MAP_BUFFER _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
/*
* Un-map host buffer previously mapped by GXP_MAP_BUFFER.
@@ -338,12 +96,11 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_BUFFER \
- _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
+#define GXP_UNMAP_BUFFER _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
/* GXP sync flag macros */
-#define GXP_SYNC_FOR_DEVICE (0)
-#define GXP_SYNC_FOR_CPU (1)
+#define GXP_SYNC_FOR_DEVICE (0)
+#define GXP_SYNC_FOR_CPU (1)
struct gxp_sync_ioctl {
/*
@@ -380,167 +137,12 @@ struct gxp_sync_ioctl {
* EINVAL: If @size equals 0.
* EINVAL: If @offset plus @size exceeds the mapping size.
*/
-#define GXP_SYNC_BUFFER \
- _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
-
-struct gxp_map_dmabuf_ioctl {
- /*
- * Deprecated. All virtual cores will be mapped.
- *
- * Bitfield indicating which virtual cores to map the dma-buf for.
- * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
- *
- * This field is not used by the unmap dma-buf IOCTL, which always
- * unmaps a dma-buf for all cores it had been mapped for.
- */
- __u16 virtual_core_list;
- __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
- /*
- * Flags indicating mapping attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [1:0] - DMA_DIRECTION:
- * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
- * 01 = DMA_TO_DEVICE (host can write buffer)
- * 10 = DMA_FROM_DEVICE (device can write buffer)
- * Note: DMA_DIRECTION is the direction in which data moves
- * from the host's perspective.
- * [31:2] - RESERVED
- */
- __u32 flags;
- /*
- * Device address the dmabuf is mapped to.
- * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
- * can be accessed from by the device.
- * - GXP_UNMAP_DMABUF expects this field to contain the value from the
- * mapping call, and uses it to determine which dma-buf to unmap.
- */
- __u64 device_address;
-};
-
-/*
- * Map host buffer via its dma-buf FD.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
-
-/*
- * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
- *
- * Only the @device_address field is used. Other fields are fetched from the
- * kernel's internal records. It is recommended to use the argument that was
- * passed in GXP_MAP_DMABUF to un-map the dma-buf.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
-
-struct gxp_mailbox_command_ioctl {
- /*
- * Input:
- * The virtual core to dispatch the command to.
- * Only used in direct mode.
- */
- __u16 virtual_core_id;
- /*
- * Input:
- * The number of cores to dispatch the command to.
- * Only used in non-direct mode.
- */
- __u16 num_cores;
- /*
- * Output:
- * The sequence number assigned to this command. The caller can use
- * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
- * with this command.
- */
- __u64 sequence_number;
- /*
- * Input:
- * Device address to the buffer containing a GXP command. The user
- * should have obtained this address from the GXP_MAP_BUFFER ioctl.
- */
- __u64 device_address;
- /*
- * Input:
- * Size of the buffer at `device_address` in bytes.
- */
- __u32 size;
- /*
- * Input:
- * Minimum power state to operate the entire DSP subsystem at until
- * the mailbox command is finished(executed or timeout). One of the
- * GXP_POWER_STATE_* defines from below.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when executing a
- * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
- * command is expected to run at the power state the wakelock has
- * specified.
- */
- __u32 gxp_power_state;
- /*
- * Input:
- * Memory interface power state to request from the system so long as
- * the mailbox command is executing. One of the MEMORY_POWER_STATE*
- * defines from below.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * Input:
- * Flags describing the command, for use by the GXP device.
- */
- __u32 flags;
- /*
- * Input:
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 power_flags;
-};
-
-/*
- * Push element to the mailbox commmand queue.
- *
- * The client must hold a VIRTUAL_DEVICE wakelock.
- */
-#define GXP_MAILBOX_COMMAND \
- _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
-
-/*
- * Legacy "mailbox command" IOCTL that does not support power requests.
- * This IOCTL exists for backwards compatibility with older runtimes. All
- * fields, other than the unsupported `gxp_power_state`, `memory_power_state`,
- * and `power_flags`, are the same as in `struct gxp_mailbox_command_ioctl`.
- */
-struct gxp_mailbox_command_compat_ioctl {
- __u16 virtual_core_id;
- __u16 num_cores;
- __u64 sequence_number;
- __u64 device_address;
- __u32 size;
- __u32 flags;
-};
-
-/* The client must hold a VIRTUAL_DEVICE wakelock. */
-#define GXP_MAILBOX_COMMAND_COMPAT \
- _IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_compat_ioctl)
+#define GXP_SYNC_BUFFER _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
/* GXP mailbox response error code values */
-#define GXP_RESPONSE_ERROR_NONE (0)
-#define GXP_RESPONSE_ERROR_INTERNAL (1)
-#define GXP_RESPONSE_ERROR_TIMEOUT (2)
+#define GXP_RESPONSE_ERROR_NONE (0)
+#define GXP_RESPONSE_ERROR_INTERNAL (1)
+#define GXP_RESPONSE_ERROR_TIMEOUT (2)
struct gxp_mailbox_response_ioctl {
/*
@@ -576,51 +178,54 @@ struct gxp_mailbox_response_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAILBOX_RESPONSE \
+#define GXP_MAILBOX_RESPONSE \
_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)
-struct gxp_register_mailbox_eventfd_ioctl {
+struct gxp_specs_ioctl {
+ /* Maximum number of cores that can be allocated to a virtual device */
+ __u8 core_count;
+ /* Deprecated fields that should be ignored */
+ __u16 reserved_0;
+ __u16 reserved_1;
+ __u16 reserved_2;
+ __u8 reserved_3;
/*
- * This eventfd will be signaled whenever a mailbox response arrives
- * for the core specified by `virtual_core_id`.
- *
- * When registering, if an eventfd has already been registered for the
- * specified core, the old eventfd will be unregistered and replaced.
- *
- * Not used during the unregister call, which clears any existing
- * eventfd.
+ * Amount of "tightly-coupled memory" or TCM available to each core.
+ * The value returned will be in kB, or 0 if the value was not
+ * specified in the device-tree.
*/
- __u32 eventfd;
+ __u32 memory_per_core;
+};
+
+/* Query system specs. */
+#define GXP_GET_SPECS _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
+
+struct gxp_virtual_device_ioctl {
/*
- * Reserved.
- * Pass 0 for backwards compatibility.
+ * Input:
+ * The number of cores requested for the virtual device.
*/
- __u32 flags;
+ __u8 core_count;
/*
- * The virtual core to register or unregister an eventfd from.
- * While an eventfd is registered, it will be signaled exactly once
- * any time a command to this virtual core receives a response or times
- * out.
+ * Input:
+ * The number of threads requested per core.
*/
- __u16 virtual_core_id;
+ __u16 threads_per_core;
+ /*
+ * Input:
+ * The amount of memory requested per core, in kB.
+ */
+ __u32 memory_per_core;
+ /*
+ * Output:
+ * The ID assigned to the virtual device and shared with its cores.
+ */
+ __u32 vdid;
};
-/*
- * Register an eventfd to be signaled whenever the specified virtual core
- * sends a mailbox response.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_REGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
-
-/*
- * Clear a previously registered mailbox response eventfd.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNREGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+/* Allocate virtual device. */
+#define GXP_ALLOCATE_VIRTUAL_DEVICE \
+ _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
#define ETM_TRACE_LSB_MASK 0x1
#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
@@ -673,7 +278,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_START_COMMAND \
+#define GXP_ETM_TRACE_START_COMMAND \
_IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)
/*
@@ -682,8 +287,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_SW_STOP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 8, __u16)
+#define GXP_ETM_TRACE_SW_STOP_COMMAND _IOW(GXP_IOCTL_BASE, 8, __u16)
/*
* Users should call this IOCTL after tracing has been stopped for the last
@@ -694,8 +298,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_CLEANUP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 9, __u16)
+#define GXP_ETM_TRACE_CLEANUP_COMMAND _IOW(GXP_IOCTL_BASE, 9, __u16)
#define GXP_TRACE_HEADER_SIZE 256
#define GXP_TRACE_RAM_SIZE 4096
@@ -731,11 +334,11 @@ struct gxp_etm_get_trace_info_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_GET_TRACE_INFO_COMMAND \
+#define GXP_ETM_GET_TRACE_INFO_COMMAND \
_IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)
-#define GXP_TELEMETRY_TYPE_LOGGING (0)
-#define GXP_TELEMETRY_TYPE_TRACING (1)
+#define GXP_TELEMETRY_TYPE_LOGGING (0)
+#define GXP_TELEMETRY_TYPE_TRACING (1)
/*
* Enable either logging or software tracing for all cores.
@@ -750,7 +353,7 @@ struct gxp_etm_get_trace_info_ioctl {
* logging/tracing to their buffers. Any cores booting after this call will
* begin logging/tracing as soon as their firmware is able to.
*/
-#define GXP_ENABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
+#define GXP_ENABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
/*
* Disable either logging or software tracing for all cores.
@@ -760,7 +363,53 @@ struct gxp_etm_get_trace_info_ioctl {
* This call will block until any running cores have been notified and ACKed
* that they have disabled the specified telemetry type.
*/
-#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+#define GXP_DISABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+
+/* For backward compatibility. */
+#define GXP_ENABLE_TELEMETRY GXP_ENABLE_CORE_TELEMETRY
+#define GXP_DISABLE_TELEMETRY GXP_DISABLE_CORE_TELEMETRY
+
+struct gxp_tpu_mbx_queue_ioctl {
+ __u32 tpu_fd; /* TPU virtual device group fd */
+ /*
+ * Deprecated. All virtual cores will be mapped.
+ *
+ * Bitfield indicating which virtual cores to allocate and map the
+ * buffers for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap IOCTL, which always unmaps the
+ * buffers for all cores it had been mapped for.
+ */
+ __u32 virtual_core_list;
+ /*
+ * The user address of an edgetpu_mailbox_attr struct, containing
+ * cmd/rsp queue size, mailbox priority and other relevant info.
+ * This structure is defined in edgetpu.h in the TPU driver.
+ */
+ __u64 attr_ptr;
+};
+
+/*
+ * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_MAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+
+/*
+ * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
+ * GXP_MAP_TPU_MBX_QUEUE.
+ *
+ * Only the @tpu_fd field will be used. Other fields will be fetched
+ * from the kernel's internal records. It is recommended to use the argument
+ * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNMAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
struct gxp_register_telemetry_eventfd_ioctl {
/*
@@ -778,12 +427,16 @@ struct gxp_register_telemetry_eventfd_ioctl {
__u8 type;
};
-#define GXP_REGISTER_TELEMETRY_EVENTFD \
+#define GXP_REGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 15, struct gxp_register_telemetry_eventfd_ioctl)
-#define GXP_UNREGISTER_TELEMETRY_EVENTFD \
+#define GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 16, struct gxp_register_telemetry_eventfd_ioctl)
+/* For backward compatibility. */
+#define GXP_REGISTER_TELEMETRY_EVENTFD GXP_REGISTER_CORE_TELEMETRY_EVENTFD
+#define GXP_UNREGISTER_TELEMETRY_EVENTFD GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD
+
/*
* Reads the 2 global counter registers in AURORA_TOP and combines them to
* return the full 64-bit value of the counter.
@@ -792,47 +445,371 @@ struct gxp_register_telemetry_eventfd_ioctl {
*/
#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
-struct gxp_tpu_mbx_queue_ioctl {
- __u32 tpu_fd; /* TPU virtual device group fd */
+/*
+ * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
+ *
+ * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
+ * removed from physical cores. At that point the cores may be reallocated to
+ * another client or powered down.
+ *
+ * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
+ * down. If a client attempts to release a BLOCK wakelock while still holding
+ * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
+ *
+ * If a client attempts to release a wakelock it does not hold, this IOCTL will
+ * return -ENODEV.
+ */
+#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+
+struct gxp_map_dmabuf_ioctl {
/*
* Deprecated. All virtual cores will be mapped.
*
- * Bitfield indicating which virtual cores to allocate and map the
- * buffers for.
+ * Bitfield indicating which virtual cores to map the dma-buf for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
- * This field is not used by the unmap IOCTL, which always unmaps the
- * buffers for all cores it had been mapped for.
+ * This field is not used by the unmap dma-buf IOCTL, which always
+ * unmaps a dma-buf for all cores it had been mapped for.
*/
- __u32 virtual_core_list;
+ __u16 virtual_core_list;
+ __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
/*
- * The user address of an edgetpu_mailbox_attr struct, containing
- * cmd/rsp queue size, mailbox priority and other relevant info.
- * This structure is defined in edgetpu.h in the TPU driver.
+ * Flags indicating mapping attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [1:0] - DMA_DIRECTION:
+ * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
+ * 01 = DMA_TO_DEVICE (host can write buffer)
+ * 10 = DMA_FROM_DEVICE (device can write buffer)
+ * Note: DMA_DIRECTION is the direction in which data moves
+ * from the host's perspective.
+ * [31:2] - RESERVED
*/
- __u64 attr_ptr;
+ __u32 flags;
+ /*
+ * Device address the dmabuf is mapped to.
+ * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
+ * can be accessed from by the device.
+ * - GXP_UNMAP_DMABUF expects this field to contain the value from the
+ * mapping call, and uses it to determine which dma-buf to unmap.
+ */
+ __u64 device_address;
};
/*
- * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ * Map host buffer via its dma-buf FD.
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
/*
- * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
- * GXP_MAP_TPU_MBX_QUEUE.
+ * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
*
- * Only the @tpu_fd field will be used. Other fields will be fetched
- * from the kernel's internal records. It is recommended to use the argument
- * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ * Only the @device_address field is used. Other fields are fetched from the
+ * kernel's internal records. It is recommended to use the argument that was
+ * passed in GXP_MAP_DMABUF to un-map the dma-buf.
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+
+struct gxp_register_mailbox_eventfd_ioctl {
+ /*
+ * This eventfd will be signaled whenever a mailbox response arrives
+ * for the core specified by `virtual_core_id`.
+ *
+ * When registering, if an eventfd has already been registered for the
+ * specified core, the old eventfd will be unregistered and replaced.
+ *
+ * Not used during the unregister call, which clears any existing
+ * eventfd.
+ */
+ __u32 eventfd;
+ /*
+ * Reserved.
+ * Pass 0 for backwards compatibility.
+ */
+ __u32 flags;
+ /*
+ * The virtual core to register or unregister an eventfd from.
+ * While an eventfd is registered, it will be signaled exactly once
+ * any time a command to this virtual core receives a response or times
+ * out.
+ */
+ __u16 virtual_core_id;
+};
+
+/*
+ * Register an eventfd to be signaled whenever the specified virtual core
+ * sends a mailbox response.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_REGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+
+struct gxp_mailbox_command_ioctl {
+ /*
+ * Input:
+ * The virtual core to dispatch the command to.
+ * Only used in direct mode.
+ */
+ __u16 virtual_core_id;
+ /*
+ * Input:
+ * The number of cores to dispatch the command to.
+ * Only used in non-direct mode.
+ */
+ __u16 num_cores;
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /*
+ * Input:
+ * Device address to the buffer containing a GXP command. The user
+ * should have obtained this address from the GXP_MAP_BUFFER ioctl.
+ */
+ __u64 device_address;
+ /*
+ * Input:
+ * Size of the buffer at `device_address` in bytes.
+ */
+ __u32 size;
+ /*
+ * Input:
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the mailbox command is finished(executed or timeout). One of the
+ * GXP_POWER_STATE_* defines from below.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when executing a
+ * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
+ * command is expected to run at the power state the wakelock has
+ * specified.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Input:
+ * Memory interface power state to request from the system so long as
+ * the mailbox command is executing. One of the MEMORY_POWER_STATE*
+ * defines from below.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * Input:
+ * Flags describing the command, for use by the GXP device.
+ */
+ __u32 flags;
+ /*
+ * Input:
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 power_flags;
+};
+
+/*
+ * Push element to the mailbox commmand queue.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAILBOX_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
+
+/*
+ * Clear a previously registered mailbox response eventfd.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNREGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+
+/*
+ * Components for which a client may hold a wakelock.
+ * Acquired by passing these values as `components_to_wake` in
+ * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
+ * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
+ *
+ * Multiple wakelocks can be acquired or released at once by passing multiple
+ * components, ORed together.
+ */
+#define WAKELOCK_BLOCK (1 << 0)
+#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+
+/*
+ * DSP subsystem Power state values for use as `gxp_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
+ * original state is to request GXP_POWER_STATE_UUD with setting
+ * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
+ * as identical to GXP_POWER_STATE_UUD.
+ */
+#define GXP_POWER_STATE_OFF 0
+#define GXP_POWER_STATE_UUD 1
+#define GXP_POWER_STATE_SUD 2
+#define GXP_POWER_STATE_UD 3
+#define GXP_POWER_STATE_NOM 4
+#define GXP_POWER_STATE_READY 5
+#define GXP_POWER_STATE_UUD_PLUS 6
+#define GXP_POWER_STATE_SUD_PLUS 7
+#define GXP_POWER_STATE_UD_PLUS 8
+#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
+
+/*
+ * Memory interface power state values for use as `memory_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ */
+#define MEMORY_POWER_STATE_UNDEFINED 0
+#define MEMORY_POWER_STATE_MIN 1
+#define MEMORY_POWER_STATE_VERY_LOW 2
+#define MEMORY_POWER_STATE_LOW 3
+#define MEMORY_POWER_STATE_HIGH 4
+#define MEMORY_POWER_STATE_VERY_HIGH 5
+#define MEMORY_POWER_STATE_MAX 6
+
+/*
+ * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
+ * and `power_flags in `gxp_mailbox_command_ioctl`.
+ *
+ * Non-aggressor flag is deprecated. Setting this flag is a no-op since
+ * non-aggressor support is defeatured.
+ */
+#define GXP_POWER_NON_AGGRESSOR (1 << 0)
+/*
+ * The client can request low frequency clkmux vote by this flag, which means
+ * the kernel driver will switch the CLKMUX clocks to save more power.
+ *
+ * Note: The kernel driver keep separate track of low frequency clkmux votes
+ * and normal votes, and the low frequency clkmux votes will have lower priority
+ * than all normal votes.
+ * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
+ * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX.
+ */
+#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
+
+struct gxp_acquire_wakelock_ioctl {
+ /*
+ * The components for which a wakelock will be acquired.
+ * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
+ * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
+ * client must already have acquired a BLOCK wakelock or acquire both
+ * in the same call.
+ */
+ __u32 components_to_wake;
+ /*
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above. Note that the requested power state will not be cleared
+ * if only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
+ * wakelock.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Memory interface power state to request from the system so long as
+ * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
+ * from above. The requested memory power state will not be cleared if
+ * only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * How long to wait, in microseconds, before returning if insufficient
+ * physical cores are available when attempting to acquire a
+ * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
+ * should not wait at all if cores are not available.
+ */
+ __u32 vd_timeout_us;
+ /*
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 flags;
+};
+
+/*
+ * Acquire a wakelock and request minimum power states for the DSP subsystem
+ * and the memory interface.
+ *
+ * Upon a successful return, the specified components will be powered on and if
+ * they were not already running at the specified or higher power states,
+ * requests will have been sent to transition both the DSP subsystem and
+ * memory interface to the specified states.
+ *
+ * If the same client invokes this IOCTL for the same component more than once
+ * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
+ * second call will update requested power states, but have no other effects.
+ * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
+ *
+ * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
+ * insufficient physical cores available, the driver will wait up to
+ * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
+ * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
+ * wakelocks were being requested, neither will have been acquired.
+ */
+#define GXP_ACQUIRE_WAKE_LOCK \
+ _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
+
+#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
+struct gxp_interface_version_ioctl {
+ /*
+ * Driver major version number.
+ * Increments whenever a non-backwards compatible change to the
+ * interface defined in this file changes.
+ */
+ __u16 version_major;
+ /*
+ * Driver minor version number.
+ * Increments whenever a backwards compatible change, such as the
+ * addition of a new IOCTL, is made to the interface defined in this
+ * file.
+ */
+ __u16 version_minor;
+ /*
+ * Driver build identifier.
+ * NULL-terminated string of the git hash of the commit the driver was
+ * built from. If the driver had uncommitted changes the string will
+ * end with "-dirty".
+ */
+ char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
+};
+
+/* Query the driver's interface version. */
+#define GXP_GET_INTERFACE_VERSION \
+ _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
/*
* Triggers a debug dump to be generated for cores.
@@ -851,4 +828,10 @@ struct gxp_tpu_mbx_queue_ioctl {
*/
#define GXP_TRIGGER_DEBUG_DUMP _IOW(GXP_IOCTL_BASE, 27, __u32)
+#define GXP_REGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 28, struct gxp_register_telemetry_eventfd_ioctl)
+
+#define GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 29, struct gxp_register_telemetry_eventfd_ioctl)
+
#endif /* __GXP_H__ */