summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-02-09 08:28:05 +0000
committerCopybara-Service <copybara-worker@google.com>2023-02-09 02:41:06 -0800
commit557924e23d1d1bb4a5e0242f2af42f3083c6a00c (patch)
treee21608a979ac1212e48b312cb034a7ce5647000f
parent34333c35377d6625f6019644c2bbb31fa704ea52 (diff)
downloadzuma-557924e23d1d1bb4a5e0242f2af42f3083c6a00c.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: fix missing up_read in ioctl UCI command Bug: 268449263 gxp: introduce debug_dump_lock per vd Bug: 234172464 gxp: gxp_vd_{run,stop} holds vd_sema for writing Bug: 234172464 (repeat) gxp: pass vd to the static functions of debug dump Bug: 234172464 (repeat) gcip: call before_enqueue_wait_list Bug: 267713283 GCIP_MAIN_REV_ID: 442aa3a49affc15c9db72240cd2d0692b242a82a gxp: fix up HAS_TPU_EXT gxp: telemetry refactor for config_version 2 Bug: 266886853 gxp: fix deleting async_resp before putting Bug: 267713283 (repeat) gcip: introduce before_enqueue_wait_list Bug: 267713283 (repeat) GCIP_HEADERS_REV_ID: 16235f58e3f50f260c7b13f939857a414fc7eb21 GitOrigin-RevId: 093323949cc38e8af2e9951cf116df936c9b13c1 Change-Id: Iba85dcaa5af22bd89d2f2cd5646268cfd25e2e66
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c9
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h16
-rw-r--r--gxp-client.c4
-rw-r--r--gxp-common-platform.c2
-rw-r--r--gxp-config.h14
-rw-r--r--gxp-debug-dump.c109
-rw-r--r--gxp-debug-dump.h8
-rw-r--r--gxp-firmware-data.c82
-rw-r--r--gxp-mcu-fs.c5
-rw-r--r--gxp-uci.c50
-rw-r--r--gxp-vd.c71
-rw-r--r--gxp-vd.h11
12 files changed, 261 insertions, 120 deletions
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index cbb3c80..334a51d 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -111,10 +111,19 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
{
struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
unsigned long flags;
+ int ret;
if (!entry)
return -ENOMEM;
+ if (mailbox->ops->before_enqueue_wait_list) {
+ ret = mailbox->ops->before_enqueue_wait_list(mailbox, resp, awaiter);
+ if (ret) {
+ kfree(entry);
+ return ret;
+ }
+ }
+
/* Increase a reference of arrived handler. */
if (awaiter)
refcount_inc(&awaiter->refs);
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index e81cfb9..649b574 100644
--- a/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -260,6 +260,22 @@ struct gcip_mailbox_ops {
*/
int (*wait_for_cmd_queue_not_full)(struct gcip_mailbox *mailbox);
/*
+ * This callback will be called before putting the @resp into @mailbox->wait_list and
+ * putting @cmd of @resp into the command queue. After this callback returns, the consumer
+ * is able to start processing it and the mailbox is going to wait for it. Therefore, this
+ * callback is the final checkpoint of deciding whether it is good to wait for the response
+ * or not. If you don't want to wait for it, return a non-zero value error.
+ *
+ * If the implement side has its own wait queue, this callback is suitable to put @resp or
+ * @awaiter into that.
+ *
+ * If @resp is synchronous, @awaiter will be NULL.
+ *
+ * Context: normal.
+ */
+ int (*before_enqueue_wait_list)(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
* This callback will be called after putting the @cmd to the command queue. It can be used
* for triggering the doorbell. Also, @mailbox->cur_seq will be increased by the return
* value. If error occurs, returns negative value and @mailbox->cur_seq will not be changed
diff --git a/gxp-client.c b/gxp-client.c
index 813de48..9686818 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -44,9 +44,9 @@ void gxp_client_destroy(struct gxp_client *client)
gxp_vd_block_unready(client->vd);
if (client->vd && client->vd->state != GXP_VD_OFF) {
- down_read(&gxp->vd_semaphore);
+ down_write(&gxp->vd_semaphore);
gxp_vd_stop(client->vd);
- up_read(&gxp->vd_semaphore);
+ up_write(&gxp->vd_semaphore);
}
for (core = 0; core < GXP_NUM_CORES; core++) {
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 8d56391..66be9c0 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -870,7 +870,7 @@ static int gxp_disable_core_telemetry(struct gxp_client *client,
return ret;
}
-#ifdef HAS_TPU_EXT
+#if HAS_TPU_EXT
/*
* Map TPU mailboxes to IOVA.
diff --git a/gxp-config.h b/gxp-config.h
index 6a35d90..2987a8f 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -40,11 +40,13 @@
#define GXP_HAS_MCU 1
#endif
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
- !IS_ENABLED(CONFIG_GXP_GEM5)
-#define HAS_TPU_EXT 1
-#else
-#define HAS_TPU_EXT 0
-#endif
+/*
+ * Only supports interop with TPU when
+ * 1. Unit testing, or
+ * 2. Production on Android (to exclude vanilla Linux for bringup) but not GEM5.
+ */
+#define HAS_TPU_EXT \
+ ((IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5))
#endif /* __GXP_CONFIG_H__ */
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 5589ea3..4c2483b 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -352,33 +352,21 @@ static int gxp_add_user_buffer_to_segments(struct gxp_dev *gxp,
* Caller must have locked `gxp->vd_semaphore` for reading.
*/
static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_core_header *core_header)
{
- struct gxp_virtual_device *vd;
struct gxp_user_buffer user_buf;
int i;
struct gxp_mapping *mapping;
- lockdep_assert_held(&gxp->vd_semaphore);
-
- /*
- * TODO (b/234172464): When implementing per-core debug dump locks,
- * down_read(&gxp->vd_semaphore) must be re-added before accessing
- * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
- * after.
- */
- if (gxp_is_direct_mode(gxp)) {
- vd = gxp->core_to_vd[core_header->core_id];
- } else {
- vd = gxp->debug_dump_mgr
- ->crashed_core_to_vd[core_header->core_id];
- }
- if (!vd) {
+ if (!vd || vd->state == GXP_VD_RELEASED) {
dev_err(gxp->dev,
"Virtual device is not available for vunmap\n");
return;
}
+ lockdep_assert_held(&vd->debug_dump_lock);
+
for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
user_buf = core_header->user_bufs[i];
if (user_buf.size == 0)
@@ -402,35 +390,23 @@ static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
* Caller must have locked `gxp->vd_semaphore` for reading.
*/
static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_core_header *core_header,
void *user_buf_vaddrs[])
{
- struct gxp_virtual_device *vd;
struct gxp_user_buffer *user_buf;
int i, cnt = 0;
dma_addr_t daddr;
struct gxp_mapping *mapping;
void *vaddr;
- lockdep_assert_held(&gxp->vd_semaphore);
-
- /*
- * TODO (b/234172464): When implementing per-core debug dump locks,
- * down_read(&gxp->vd_semaphore) must be re-added before accessing
- * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
- * after.
- */
- if (gxp_is_direct_mode(gxp)) {
- vd = gxp->core_to_vd[core_header->core_id];
- } else {
- vd = gxp->debug_dump_mgr
- ->crashed_core_to_vd[core_header->core_id];
- }
- if (!vd) {
+ if (!vd || vd->state == GXP_VD_RELEASED) {
dev_err(gxp->dev, "Virtual device is not available for vmap\n");
goto out;
}
+ lockdep_assert_held(&vd->debug_dump_lock);
+
for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
user_buf = &core_header->user_bufs[i];
if (user_buf->size == 0)
@@ -455,7 +431,7 @@ static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
gxp_mapping_put(mapping);
if (IS_ERR(vaddr)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
return 0;
}
@@ -467,7 +443,7 @@ static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
/* Check that the entire user buffer is mapped */
if ((user_buf_vaddrs[i] + user_buf->size) >
(vaddr + mapping->size)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
return 0;
}
@@ -518,7 +494,9 @@ void gxp_debug_dump_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id)
* Caller must make sure that gxp->debug_dump_mgr->common_dump and
* gxp->debug_dump_mgr->core_dump are not NULL.
*/
-static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
+static int gxp_handle_debug_dump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint32_t core_id)
{
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
struct gxp_core_dump *core_dump = mgr->core_dump;
@@ -601,11 +579,12 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
seg_idx++;
/* User Buffers */
- user_buf_cnt = gxp_user_buffers_vmap(gxp, core_header, user_buf_vaddrs);
+ user_buf_cnt =
+ gxp_user_buffers_vmap(gxp, vd, core_header, user_buf_vaddrs);
if (user_buf_cnt > 0) {
if (gxp_add_user_buffer_to_segments(gxp, core_header, core_id,
seg_idx, user_buf_vaddrs)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
ret = -EFAULT;
goto out_efault;
}
@@ -623,7 +602,7 @@ out_efault:
gxp_send_to_sscd(gxp, mgr->segs[core_id],
seg_idx + user_buf_cnt, sscd_msg);
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
}
#endif
@@ -652,7 +631,9 @@ static int gxp_init_segments(struct gxp_dev *gxp)
* Caller must have locked `gxp->debug_dump_mgr->debug_dump_lock` before calling
* `gxp_generate_coredump`.
*/
-static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
+static int gxp_generate_coredump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint32_t core_id)
{
int ret = 0;
@@ -668,7 +649,7 @@ static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
if (ret)
goto out;
- ret = gxp_handle_debug_dump(gxp, core_id);
+ ret = gxp_handle_debug_dump(gxp, vd, core_id);
if (ret)
goto out;
@@ -679,43 +660,30 @@ out:
}
static void gxp_generate_debug_dump(struct gxp_dev *gxp, uint core_id,
- struct gxp_virtual_device *crashed_vd)
+ struct gxp_virtual_device *vd)
{
u32 boot_mode;
bool gxp_generate_coredump_called = false;
mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
- /* crashed_core_to_vd[] is only relevant in case of mcu mode.*/
- gxp->debug_dump_mgr->crashed_core_to_vd[core_id] = crashed_vd;
- /*
- * Lock the VD semaphore to ensure no suspend/resume/start/stop requests
- * can be made on core `core_id` while generating debug dump.
- * However, since VD semaphore is used by other VDs as well, it can
- * potentially block device creation and destruction for other cores.
- * TODO (b/234172464): Implement per-core debug dump locks and
- * lock/unlock vd_semaphore before/after accessing gxp->core_to_vd[].
- */
- down_read(&gxp->vd_semaphore);
/*
* TODO(b/265105909): Checks below to be verified after implementation for
* firmware loading for mcu mode are completed.
*/
- boot_mode = gxp_firmware_get_boot_mode(gxp, crashed_vd, core_id);
+ boot_mode = gxp_firmware_get_boot_mode(gxp, vd, core_id);
if (gxp_is_fw_running(gxp, core_id) &&
(boot_mode == GXP_BOOT_MODE_STATUS_COLD_BOOT_COMPLETED ||
boot_mode == GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)) {
gxp_generate_coredump_called = true;
- if (gxp_generate_coredump(gxp, core_id))
+ if (gxp_generate_coredump(gxp, vd, core_id))
dev_err(gxp->dev, "Failed to generate coredump\n");
}
/* Invalidate segments to prepare for the next debug dump trigger */
gxp_debug_dump_invalidate_segments(gxp, core_id);
- up_read(&gxp->vd_semaphore);
-
/*
* This delay is needed to ensure there's sufficient time
* in between sscd_report() being called, as the file name of
@@ -725,19 +693,36 @@ static void gxp_generate_debug_dump(struct gxp_dev *gxp, uint core_id,
if (gxp_generate_coredump_called)
msleep(1000);
- /* crashed_core_to_vd[] is only relevant in case of mcu mode.*/
- gxp->debug_dump_mgr->crashed_core_to_vd[core_id] = NULL;
mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
}
-static void gxp_debug_dump_process_dump(struct work_struct *work)
+static void gxp_debug_dump_process_dump_direct_mode(struct work_struct *work)
{
struct gxp_debug_dump_work *debug_dump_work =
container_of(work, struct gxp_debug_dump_work, work);
uint core_id = debug_dump_work->core_id;
struct gxp_dev *gxp = debug_dump_work->gxp;
+ struct gxp_virtual_device *vd = NULL;
+
+ down_read(&gxp->vd_semaphore);
+ if (gxp->core_to_vd[core_id])
+ vd = gxp_vd_get(gxp->core_to_vd[core_id]);
+ up_read(&gxp->vd_semaphore);
- gxp_generate_debug_dump(gxp, core_id, NULL /*Not used*/);
+ /*
+ * Hold @vd->debug_dump_lock instead of @gxp->vd_semaphore to prevent changing the state
+ * of @vd while generating a debug dump. This will help not to block other virtual devices
+ * proceeding their jobs.
+ */
+ if (vd)
+ mutex_lock(&vd->debug_dump_lock);
+
+ gxp_generate_debug_dump(gxp, core_id, vd);
+
+ if (vd) {
+ mutex_unlock(&vd->debug_dump_lock);
+ gxp_vd_put(vd);
+ }
}
int gxp_debug_dump_process_dump_mcu_mode(struct gxp_dev *gxp, uint core_list,
@@ -747,6 +732,8 @@ int gxp_debug_dump_process_dump_mcu_mode(struct gxp_dev *gxp, uint core_list,
struct gxp_core_dump_header *core_dump_header;
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+ lockdep_assert_held(&crashed_vd->debug_dump_lock);
+
if (crashed_vd->state != GXP_VD_UNAVAILABLE) {
dev_dbg(gxp->dev, "Invalid vd state=%u for processing dumps.\n",
crashed_vd->state);
@@ -815,7 +802,7 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
mgr->debug_dump_works[core].gxp = gxp;
mgr->debug_dump_works[core].core_id = core;
INIT_WORK(&mgr->debug_dump_works[core].work,
- gxp_debug_dump_process_dump);
+ gxp_debug_dump_process_dump_direct_mode);
}
/* No need for a DMA handle since the carveout is coherent */
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index 66ab782..1cf9219 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -188,12 +188,6 @@ struct gxp_debug_dump_manager {
* time
*/
struct mutex debug_dump_lock;
- /*
- * Array index maps to dsp cores. Array stores the pointer to the
- * crashed VD that was running on the respective core. This is used
- * only in mcu mode.
- */
- struct gxp_virtual_device *crashed_core_to_vd[GXP_NUM_CORES];
#if IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
struct sscd_segment segs[GXP_NUM_CORES][GXP_NUM_SEGMENTS_PER_CORE];
#endif
@@ -225,6 +219,8 @@ void gxp_debug_dump_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id);
* reported from firmware.
* @crashed_vd: vd that has crashed.
*
+ * The caller must hold @crashed_vd->debug_dump_lock.
+ *
* Return:
* * 0 - Success.
* * -EINVAL - If vd state is not GXP_VD_UNAVAILABLE.
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index f716f1b..4a35ccc 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -22,29 +22,29 @@
* The minimum alignment order (power of 2) of allocations in the firmware data
* region.
*/
-#define FW_DATA_STORAGE_ORDER 3
+#define FW_DATA_STORAGE_ORDER 3
/* A byte pattern to pre-populate the FW region with */
-#define FW_DATA_DEBUG_PATTERN 0x66
+#define FW_DATA_DEBUG_PATTERN 0x66
/* IDs for dedicated doorbells used by some system components */
#define DOORBELL_ID_CORE_WAKEUP(__core__) (0 + __core__)
/* IDs for dedicated sync barriers used by some system components */
-#define SYNC_BARRIER_ID_UART 1
+#define SYNC_BARRIER_ID_UART 1
/* Default application parameters */
-#define DEFAULT_APP_ID 1
-#define DEFAULT_APP_USER_MEM_SIZE (120 * 1024)
-#define DEFAULT_APP_USER_MEM_ALIGNMENT 8
-#define DEFAULT_APP_THREAD_COUNT 2
-#define DEFAULT_APP_TCM_PER_BANK (100 * 1024)
-#define DEFAULT_APP_USER_DOORBELL_COUNT 2
-#define DEFAULT_APP_USER_BARRIER_COUNT 2
+#define DEFAULT_APP_ID 1
+#define DEFAULT_APP_USER_MEM_SIZE (120 * 1024)
+#define DEFAULT_APP_USER_MEM_ALIGNMENT 8
+#define DEFAULT_APP_THREAD_COUNT 2
+#define DEFAULT_APP_TCM_PER_BANK (100 * 1024)
+#define DEFAULT_APP_USER_DOORBELL_COUNT 2
+#define DEFAULT_APP_USER_BARRIER_COUNT 2
/* Core-to-core mailbox communication constants */
-#define CORE_TO_CORE_MBX_CMD_COUNT 10
-#define CORE_TO_CORE_MBX_RSP_COUNT 10
+#define CORE_TO_CORE_MBX_CMD_COUNT 10
+#define CORE_TO_CORE_MBX_RSP_COUNT 10
/* A block allocator managing and partitioning a memory region for device use */
struct fw_memory_allocator {
@@ -614,6 +614,10 @@ static void set_system_cfg_region(struct gxp_dev *gxp, void *sys_cfg)
COPY_FIELDS(tel_des, tel_ro, tel_rw);
}
#undef COPY_FIELDS
+
+ /* Update the global descriptors. */
+ gxp->data_mgr->sys_desc_ro = des_ro;
+ gxp->data_mgr->sys_desc_rw = des_rw;
}
static struct app_metadata *
@@ -632,7 +636,7 @@ _gxp_fw_data_create_app(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
* initialization of legacy mode, and have here copy the values to the
* config region.
*/
- if (vd->vdid == 0)
+ if (vd->vdid == 1)
set_system_cfg_region(gxp, vd->sys_cfg.vaddr);
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
@@ -782,9 +786,9 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
goto err;
/* Shared firmware data memory region */
- mgr->allocator = mem_alloc_create(gxp, mgr->fw_data_virt,
- gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.size);
+ mgr->allocator =
+ mem_alloc_create(gxp, mgr->fw_data_virt, gxp->fwdatabuf.daddr,
+ gxp->fwdatabuf.size);
if (IS_ERR(mgr->allocator)) {
dev_err(gxp->dev,
"Failed to create the FW data memory allocator\n");
@@ -902,13 +906,14 @@ int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
/* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
for (core = 0; core < GXP_NUM_CORES; core++) {
if (buffers && buffers[core].dsp_addr > U32_MAX &&
- buffers[core].size == per_buffer_size)
+ buffers[core].size == per_buffer_size)
return -EINVAL;
}
for (core = 0; core < GXP_NUM_CORES; core++) {
core_descriptors[core].host_status = host_status;
- core_descriptors[core].buffer_addr = (u32)buffers[core].dsp_addr;
+ core_descriptors[core].buffer_addr =
+ (u32)buffers[core].dsp_addr;
core_descriptors[core].buffer_size = per_buffer_size;
}
} else {
@@ -922,15 +927,13 @@ int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
return 0;
}
-u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type)
+static u32
+gxp_fw_data_get_core_telemetry_device_status_legacy(struct gxp_dev *gxp,
+ uint core, u8 type)
{
struct gxp_core_telemetry_descriptor *descriptor =
gxp->data_mgr->core_telemetry_mem.host_addr;
- if (core >= GXP_NUM_CORES)
- return 0;
-
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
return descriptor->per_core_loggers[core].device_status;
@@ -940,3 +943,36 @@ u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
return 0;
}
}
+
+static u32 _gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp,
+ uint core, u8 type)
+{
+ struct gxp_system_descriptor_rw *des_rw = gxp->data_mgr->sys_desc_rw;
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ return des_rw->telemetry_desc.per_core_loggers[core]
+ .device_status;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ return des_rw->telemetry_desc.per_core_tracers[core]
+ .device_status;
+ default:
+ return 0;
+ }
+}
+
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ if (core >= GXP_NUM_CORES)
+ return 0;
+
+ if (gxp->firmware_mgr->img_cfg.config_version >=
+ FW_DATA_PROTOCOL_PER_VD_CONFIG) {
+ return _gxp_fw_data_get_core_telemetry_device_status(gxp, core,
+ type);
+ } else {
+ return gxp_fw_data_get_core_telemetry_device_status_legacy(
+ gxp, core, type);
+ }
+}
diff --git a/gxp-mcu-fs.c b/gxp-mcu-fs.c
index 0a5fe7d..0bca897 100644
--- a/gxp-mcu-fs.c
+++ b/gxp-mcu-fs.c
@@ -58,10 +58,13 @@ gxp_ioctl_uci_command(struct gxp_client *client,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
client->mb_eventfds[UCI_RESOURCE_ID]);
+
+ up_read(&client->semaphore);
+
if (ret) {
dev_err(gxp->dev,
"Failed to enqueue mailbox command (ret=%d)\n", ret);
- goto out;
+ return ret;
}
ibuf.sequence_number = cmd.seq;
diff --git a/gxp-uci.c b/gxp-uci.c
index eecc29f..e4f81a3 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -100,13 +100,14 @@ static void gxp_uci_mailbox_manager_release_unconsumed_async_resps(
wait_list_entry) {
cur->wait_queue = NULL;
}
+ vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue_closed = true;
spin_unlock_irqrestore(&vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
flags);
/*
- * From here it is guaranteed that @wait_queue will not be manipulated by the arrived
- * callback.
+ * From here it is guaranteed that @wait_queue will not be manipulated by the arrived,
+ * timedout callback or `gxp_uci_send_command`.
*/
/*
@@ -210,6 +211,34 @@ static void gxp_uci_set_resp_elem_status(struct gcip_mailbox *mailbox,
elem->code = status;
}
+static int
+gxp_uci_before_enqueue_wait_list(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ struct gxp_uci_async_response *async_resp;
+ struct mailbox_resp_queue *mailbox_resp_queue;
+ int ret = 0;
+
+ if (!awaiter)
+ return 0;
+
+ async_resp = awaiter->data;
+ mailbox_resp_queue = container_of(
+ async_resp->wait_queue, struct mailbox_resp_queue, wait_queue);
+
+ spin_lock(async_resp->queue_lock);
+ if (mailbox_resp_queue->wait_queue_closed) {
+ ret = -EIO;
+ } else {
+ async_resp->awaiter = awaiter;
+ list_add_tail(&async_resp->wait_list_entry,
+ async_resp->wait_queue);
+ }
+ spin_unlock(async_resp->queue_lock);
+
+ return ret;
+}
+
static void
gxp_uci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
struct gcip_mailbox_resp_awaiter *awaiter)
@@ -318,6 +347,7 @@ static const struct gcip_mailbox_ops gxp_uci_gcip_mbx_ops = {
.release_wait_list_lock = gxp_mailbox_gcip_ops_release_wait_list_lock,
.wait_for_cmd_queue_not_full =
gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full,
+ .before_enqueue_wait_list = gxp_uci_before_enqueue_wait_list,
.after_enqueue_cmd = gxp_mailbox_gcip_ops_after_enqueue_cmd,
.after_fetch_resps = gxp_mailbox_gcip_ops_after_fetch_resps,
.handle_awaiter_arrived = gxp_uci_handle_awaiter_arrived,
@@ -438,6 +468,7 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
struct gxp_eventfd *eventfd)
{
struct gxp_uci_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
int ret;
if (!gxp_vd_has_and_use_credit(vd))
@@ -465,16 +496,17 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
else
async_resp->eventfd = NULL;
- async_resp->awaiter = gxp_mailbox_put_cmd(
- uci->mbx, cmd, &async_resp->resp, async_resp);
- if (IS_ERR(async_resp->awaiter)) {
- ret = PTR_ERR(async_resp->awaiter);
+ /*
+ * @async_resp->awaiter will be set from the `gxp_uci_before_enqueue_wait_list`
+ * callback.
+ */
+ awaiter = gxp_mailbox_put_cmd(uci->mbx, cmd, &async_resp->resp,
+ async_resp);
+ if (IS_ERR(awaiter)) {
+ ret = PTR_ERR(awaiter);
goto err_free_resp;
}
- /* Put async_resp into the waiting queue. */
- list_add_tail(&async_resp->wait_list_entry, wait_queue);
-
return 0;
err_free_resp:
diff --git a/gxp-vd.c b/gxp-vd.c
index 2468ca9..b4dcad4 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -654,6 +654,24 @@ static void set_config_version(struct gxp_dev *gxp,
gxp->fwdatabuf.daddr = 0;
}
+static void debug_dump_lock(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
+{
+ if (!mutex_trylock(&vd->debug_dump_lock)) {
+ /*
+ * Release @gxp->vd_semaphore to let other virtual devices proceed
+ * their works and wait for the debug dump to finish.
+ */
+ up_write(&gxp->vd_semaphore);
+ mutex_lock(&vd->debug_dump_lock);
+ down_write(&gxp->vd_semaphore);
+ }
+}
+
+static inline void debug_dump_unlock(struct gxp_virtual_device *vd)
+{
+ mutex_unlock(&vd->debug_dump_lock);
+}
+
struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
u16 requested_cores)
{
@@ -683,6 +701,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->vdid = atomic_inc_return(&gxp->next_vdid);
mutex_init(&vd->fence_list_lock);
INIT_LIST_HEAD(&vd->gxp_fence_list);
+ mutex_init(&vd->debug_dump_lock);
vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
if (!vd->domain) {
@@ -785,6 +804,7 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
uint core_list = vd->core_list;
lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
if (vd->is_secure) {
mutex_lock(&gxp->secure_vd_lock);
@@ -820,6 +840,7 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
vd->state = GXP_VD_RELEASED;
+ debug_dump_unlock(vd);
gxp_vd_put(vd);
}
@@ -860,9 +881,9 @@ void gxp_vd_block_unready(struct gxp_virtual_device *vd)
int gxp_vd_run(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- int ret = 0;
+ int ret;
- lockdep_assert_held(&gxp->vd_semaphore);
+ lockdep_assert_held_write(&gxp->vd_semaphore);
if (vd->state != GXP_VD_READY && vd->state != GXP_VD_OFF)
return -EINVAL;
if (vd->state == GXP_VD_OFF) {
@@ -886,15 +907,20 @@ int gxp_vd_run(struct gxp_virtual_device *vd)
if (ret)
goto err_vd_unavailable;
}
+
+ debug_dump_lock(gxp, vd);
/* Clear all doorbells */
vd_restore_doorbells(vd);
ret = gxp_firmware_run(gxp, vd, vd->core_list);
if (ret)
goto err_vd_block_unready;
vd->state = GXP_VD_RUNNING;
- return ret;
+ debug_dump_unlock(vd);
+
+ return 0;
err_vd_block_unready:
+ debug_dump_unlock(vd);
gxp_vd_block_unready(vd);
err_vd_unavailable:
vd->state = GXP_VD_UNAVAILABLE;
@@ -927,7 +953,9 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
uint core_list = vd->core_list;
uint lpm_state;
- lockdep_assert_held(&gxp->vd_semaphore);
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
+
if (gxp_core_boot &&
(vd->state == GXP_VD_OFF || vd->state == GXP_VD_READY ||
vd->state == GXP_VD_RUNNING) &&
@@ -950,6 +978,8 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
vd->state == GXP_VD_UNAVAILABLE)
gxp_dma_domain_detach_device(gxp, vd->domain);
vd->state = GXP_VD_OFF;
+
+ debug_dump_unlock(vd);
}
static inline uint select_core(struct gxp_virtual_device *vd, uint virt_core,
@@ -1005,11 +1035,13 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
return gxp_vd_stop(vd);
#endif
lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
+
dev_info(gxp->dev, "Suspending VD ...\n");
if (vd->state == GXP_VD_SUSPENDED) {
dev_err(gxp->dev,
"Attempt to suspend a virtual device twice\n");
- return;
+ goto out;
}
gxp_pm_force_clkmux_normal(gxp);
/*
@@ -1090,6 +1122,8 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
vd->state = GXP_VD_SUSPENDED;
}
gxp_pm_resume_clkmux(gxp);
+out:
+ debug_dump_unlock(vd);
}
/*
@@ -1107,11 +1141,13 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
uint failed_cores = 0;
lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
dev_info(gxp->dev, "Resuming VD ...\n");
if (vd->state != GXP_VD_SUSPENDED) {
dev_err(gxp->dev,
"Attempt to resume a virtual device which was not suspended\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
gxp_pm_force_clkmux_normal(gxp);
curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
@@ -1206,6 +1242,8 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
vd->state = GXP_VD_RUNNING;
}
gxp_pm_resume_clkmux(gxp);
+out:
+ debug_dump_unlock(vd);
return ret;
}
@@ -1407,6 +1445,7 @@ void gxp_vd_put(struct gxp_virtual_device *vd)
void gxp_vd_invalidate(struct gxp_dev *gxp, int client_id, uint core_list)
{
struct gxp_client *client = NULL, *c;
+ struct gxp_virtual_device *vd;
release_unconsumed_async_resps_t release_unconsumed_async_resps =
gxp->mailbox_mgr->release_unconsumed_async_resps;
int ret;
@@ -1472,7 +1511,20 @@ void gxp_vd_invalidate(struct gxp_dev *gxp, int client_id, uint core_list)
dev_dbg(gxp->dev, "This VD is already invalidated");
}
+ /*
+ * We should increase the refcount of @vd because @gxp->vd_semaphore will be
+ * released below and the client can release it asynchronously.
+ */
+ vd = gxp_vd_get(client->vd);
+
+ /*
+ * Release @gxp->vd_semaphore before generating a debug dump not to block other
+ * virtual devices proceeding their work.
+ */
up_write(&gxp->vd_semaphore);
+ up_write(&client->semaphore);
+ mutex_lock(&vd->debug_dump_lock);
+
/*
* Process debug dump if its enabled and core_list is not empty.
* Keep on hold the client lock while processing the dumps. vd
@@ -1480,12 +1532,13 @@ void gxp_vd_invalidate(struct gxp_dev *gxp, int client_id, uint core_list)
* implementation logic ahead.
*/
if (gxp_debug_dump_is_enabled() && core_list != 0) {
- ret = gxp_debug_dump_process_dump_mcu_mode(gxp, core_list,
- client->vd);
+ ret = gxp_debug_dump_process_dump_mcu_mode(gxp, core_list, vd);
if (ret)
dev_err(gxp->dev,
"debug dump processing failed (ret=%d).\n",
ret);
}
- up_write(&client->semaphore);
+
+ mutex_unlock(&vd->debug_dump_lock);
+ gxp_vd_put(vd);
}
diff --git a/gxp-vd.h b/gxp-vd.h
index 2a0b78c..c7fd4e4 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -40,6 +40,11 @@ struct mailbox_resp_queue {
spinlock_t lock;
/* Waitqueue to wait on if the queue is empty */
wait_queue_head_t waitq;
+ /*
+ * If true, the user cannot send requests anymore.
+ * This must be protected by @lock.
+ */
+ bool wait_queue_closed;
};
enum gxp_virtual_device_state {
@@ -144,6 +149,8 @@ struct gxp_virtual_device {
struct mutex fence_list_lock;
/* List of GXP DMA fences owned by this VD. */
struct list_head gxp_fence_list;
+ /* Protects changing the state of vd while generating a debug dump. */
+ struct mutex debug_dump_lock;
};
/*
@@ -198,7 +205,7 @@ void gxp_vd_release(struct gxp_virtual_device *vd);
* function. If this function runs successfully, the state becomes
* GXP_VD_RUNNING. Otherwise, it would be GXP_VD_UNAVAILABLE.
*
- * The caller must have locked gxp->vd_semaphore.
+ * The caller must have locked gxp->vd_semaphore for writing.
*
* Return:
* * 0 - Success
@@ -213,7 +220,7 @@ int gxp_vd_run(struct gxp_virtual_device *vd);
*
* The state of @vd will be GXP_VD_OFF.
*
- * The caller must have locked gxp->vd_semaphore.
+ * The caller must have locked gxp->vd_semaphore for writing.
*/
void gxp_vd_stop(struct gxp_virtual_device *vd);