summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2022-11-24 09:30:39 +0000
committerCopybara-Service <copybara-worker@google.com>2022-11-24 01:55:08 -0800
commit70e76ccdc0547f3e8f70c3238174ba0da695bb16 (patch)
tree45c720210edd267e1d8585d299294c67d75c4482
parent33da351523d0ff6f10305d4a97405c9074f51258 (diff)
downloadzuma-70e76ccdc0547f3e8f70c3238174ba0da695bb16.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android13-gs-pixel-5.15'
gxp: check virtual_core_id before using Bug: 260201871 gxp: map tpu mbx queue even in MCU mode Bug: 237624453 gxp: fetch segment boundaries from ELF header Bug: 259197130 gxp: respond with an ack to the core telemetry RKCI Bugs: 229143599 gxp: add ifdef guard for edgetpu_ext interface Bug: 259357745 gxp: UCI consider credits of VD Bug: 257137038 gxp: set VD credit to 256 Bug: 257137038 (repeat) gxp: Indroduce config-pwr-state.h Bug: 258154981 gxp: release awaiter when UCI wakelock responded gxp: add cmd credit to virtual device Bug: 257137038 (repeat) gxp: fix the default domain cache gxp: revert ignore_seq_order settings gxp: Map TPU mbx only in the direct mode Bug: 249440369 gcip: image header use unsigned fields GCIP_HEADERS_REV_ID: 6510a92061a4c3ac576dae396b5784698ed37db4 GitOrigin-RevId: 585ac74c28de99eee826bc4f6f22ba95231cb8f3 Change-Id: Ifa9f3651fe4cd824ec98f8acb52cbd5d264c4216
-rw-r--r--callisto-platform.c30
-rw-r--r--callisto/config-pwr-state.h23
-rw-r--r--callisto/config.h1
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-common-image-header.h28
-rw-r--r--gxp-client.c2
-rw-r--r--gxp-common-platform.c206
-rw-r--r--gxp-dci.c1
-rw-r--r--gxp-dma-iommu.c1
-rw-r--r--gxp-firmware.c169
-rw-r--r--gxp-firmware.h10
-rw-r--r--gxp-internal.h1
-rw-r--r--gxp-kci.c16
-rw-r--r--gxp-kci.h4
-rw-r--r--gxp-mailbox.c2
-rw-r--r--gxp-mailbox.h3
-rw-r--r--gxp-pm.h25
-rw-r--r--gxp-thermal.c14
-rw-r--r--gxp-uci.c23
-rw-r--r--gxp-uci.h6
-rw-r--r--gxp-vd.c30
-rw-r--r--gxp-vd.h29
21 files changed, 435 insertions, 189 deletions
diff --git a/callisto-platform.c b/callisto-platform.c
index 6143ad7..26ea3cc 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -88,6 +88,8 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
struct gxp_uci_command cmd;
int ret;
+ if (ibuf->virtual_core_id >= GXP_NUM_CORES)
+ return -EINVAL;
down_read(&client->semaphore);
if (!check_client_has_available_vd(client, "GXP_MAILBOX_COMMAND")) {
@@ -118,15 +120,21 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
cmd.type = CORE_COMMAND;
/* TODO(b/248179414): Remove core assignment when MCU fw re-enable sticky core scheduler. */
- down_read(&gxp->vd_semaphore);
- cmd.priority = gxp_vd_virt_core_to_phys_core(client->vd, ibuf->virtual_core_id);
- up_read(&gxp->vd_semaphore);
- if (cmd.priority < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- ibuf->virtual_core_id);
- ret = -EINVAL;
- goto out;
+ {
+ int core;
+
+ down_read(&gxp->vd_semaphore);
+ core = gxp_vd_virt_core_to_phys_core(client->vd,
+ ibuf->virtual_core_id);
+ up_read(&gxp->vd_semaphore);
+ if (core < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ ibuf->virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.priority = core;
}
cmd.client_id = client->vd->client_id;
@@ -136,7 +144,7 @@ static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
* when MCU fw re-enable sticky core scheduler.
*/
ret = gxp_uci_send_command(
- &callisto->mcu.uci, &cmd,
+ &callisto->mcu.uci, client->vd, &cmd,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
@@ -320,7 +328,7 @@ static int callisto_request_power_states(struct gxp_client *client,
cmd.client_id = client->vd->client_id;
ret = gxp_uci_send_command(
- &callisto->mcu.uci, &cmd,
+ &callisto->mcu.uci, client->vd, &cmd,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
&client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
diff --git a/callisto/config-pwr-state.h b/callisto/config-pwr-state.h
new file mode 100644
index 0000000..1a51874
--- /dev/null
+++ b/callisto/config-pwr-state.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Chip-dependent power configuration and states.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __CALLISTO_CONFIG_PWR_STATE_H__
+#define __CALLISTO_CONFIG_PWR_STATE_H__
+
+enum aur_power_rate {
+ AUR_OFF_RATE = 0,
+ AUR_UUD_RATE = 178000,
+ AUR_SUD_RATE = 355000,
+ AUR_UD_RATE = 713000,
+ AUR_NOM_RATE = 1065000,
+ AUR_READY_RATE = 178000,
+ AUR_UUD_PLUS_RATE = 256000,
+ AUR_SUD_PLUS_RATE = 560000,
+ AUR_UD_PLUS_RATE = 861000,
+};
+
+#endif /* __CALLISTO_CONFIG_PWR_STATE_H__ */
diff --git a/callisto/config.h b/callisto/config.h
index cd47e1d..253e266 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -39,6 +39,7 @@
*/
#define GXP_IS_DMA_COHERENT
+#include "config-pwr-state.h"
#include "context.h"
#include "csrs.h"
#include "iova.h"
diff --git a/gcip-kernel-driver/include/gcip/gcip-common-image-header.h b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
index d376be1..d986fbc 100644
--- a/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
+++ b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
@@ -8,35 +8,37 @@
#ifndef __GCIP_COMMON_IMAGE_HEADER_H__
#define __GCIP_COMMON_IMAGE_HEADER_H__
+#include <linux/types.h>
+
#include "gcip-image-config.h"
#define GCIP_FW_HEADER_SIZE (0x1000)
struct gcip_common_image_sub_header_common {
- int magic;
- int generation;
- int rollback_info;
- int length;
- char flags[16];
+ uint32_t magic;
+ uint32_t generation;
+ uint32_t rollback_info;
+ uint32_t length;
+ uint8_t flags[16];
};
struct gcip_common_image_sub_header_gen1 {
- char body_hash[32];
- char chip_id[32];
- char auth_config[256];
+ uint8_t body_hash[32];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
struct gcip_image_config image_config;
};
struct gcip_common_image_sub_header_gen2 {
- char body_hash[64];
- char chip_id[32];
- char auth_config[256];
+ uint8_t body_hash[64];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
struct gcip_image_config image_config;
};
struct gcip_common_image_header {
- char sig[512];
- char pub[512];
+ uint8_t sig[512];
+ uint8_t pub[512];
struct {
struct gcip_common_image_sub_header_common common;
union {
diff --git a/gxp-client.c b/gxp-client.c
index dfcaed5..3c6fb80 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -30,6 +30,8 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->has_vd_wakelock = false;
client->requested_states = off_states;
client->vd = NULL;
+ client->mbx_desc.mapped = false;
+
return client;
}
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 615da3d..2c32d3f 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -21,9 +21,6 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uidgid.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-#include <soc/google/tpu-ext.h>
-#endif
#include "gxp-client.h"
#include "gxp-config.h"
@@ -46,6 +43,11 @@
#include "gxp-wakelock.h"
#include "gxp.h"
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#define HAS_TPU_EXT
+#include <soc/google/tpu-ext.h>
+#endif
+
#if GXP_USE_LEGACY_MAILBOX
#include "gxp-mailbox-impl.h"
#else
@@ -857,38 +859,25 @@ static int gxp_disable_core_telemetry(struct gxp_client *client,
return ret;
}
+#ifdef HAS_TPU_EXT
+
/*
- * TODO(b/249440369): As the DSP KD will not get involved in the mapping the TPU mailbox buffer
- * from Zuma, remove the corresponding logic from this function. Note that, we still have to do
- * it from here in the direct mode. Also, we have to investigate whether it will be still proper
- * to call the `ALLOCATE_EXTERNAL_MAILBOX` TPU external command from here in MCU mode.
+ * Map TPU mailboxes to IOVA.
+ * This function will be called only when the device is in the direct mode.
*/
-static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
- struct gxp_tpu_mbx_queue_ioctl __user *argp)
+static int map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
struct gxp_dev *gxp = client->gxp;
struct edgetpu_ext_mailbox_info *mbx_info;
- struct gxp_tpu_mbx_queue_ioctl ibuf;
struct edgetpu_ext_client_info gxp_tpu_info;
u32 phys_core_list = 0;
u32 core_count;
int ret = 0;
- if (!gxp->tpu_dev.mbx_paddr) {
- dev_err(gxp->dev, "%s: TPU is not available for interop\n",
- __func__);
- return -EINVAL;
- }
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
+ if (client->mbx_desc.mapped) {
+ dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
+ return -EBUSY;
}
down_read(&gxp->vd_semaphore);
@@ -896,24 +885,24 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
core_count = client->vd->num_cores;
phys_core_list = gxp_vd_phys_core_list(client->vd);
- mbx_info =
- kmalloc(sizeof(struct edgetpu_ext_mailbox_info) + core_count *
- sizeof(struct edgetpu_ext_mailbox_descriptor),
- GFP_KERNEL);
+ mbx_info = kmalloc(
+ sizeof(struct edgetpu_ext_mailbox_info) +
+ core_count *
+ sizeof(struct edgetpu_ext_mailbox_descriptor),
+ GFP_KERNEL);
if (!mbx_info) {
ret = -ENOMEM;
goto out;
}
- if (client->tpu_file) {
- dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
- ret = -EBUSY;
- goto out_free;
- }
-
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
+ /*
+ * TODO(b/249440369): Pass @client->tpu_file file pointer. For the backward compatibility,
+ * keep sending @ibuf->tpu_fd here.
+ */
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
gxp_tpu_info.mbox_map = phys_core_list;
- gxp_tpu_info.attr = (struct edgetpu_mailbox_attr __user *)ibuf.attr_ptr;
+ gxp_tpu_info.attr =
+ (struct edgetpu_mailbox_attr __user *)ibuf->attr_ptr;
ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
@@ -923,78 +912,121 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
ret);
goto out_free;
}
- /*
- * If someone is attacking us through this interface -
- * it's possible that ibuf.tpu_fd here is already a different file from
- * the one passed to edgetpu_ext_driver_cmd() (if the runtime closes the
- * FD and opens another file exactly between the TPU driver call above
- * and the fget below).
- * But the worst consequence of this attack is we fget() ourselves (GXP
- * FD), which only leads to memory leak (because the file object has a
- * reference to itself). The race is also hard to hit so we don't insist
- * on preventing it.
- */
- client->tpu_file = fget(ibuf.tpu_fd);
- if (!client->tpu_file) {
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- ret = -EINVAL;
- goto out_free;
- }
+
/* Align queue size to page size for iommu map. */
mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
- ret = gxp_dma_map_tpu_buffer(gxp, client->vd->domain,
- phys_core_list, mbx_info);
+ ret = gxp_dma_map_tpu_buffer(gxp, client->vd->domain, phys_core_list,
+ mbx_info);
if (ret) {
dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
- goto err_fput;
+ goto err_free_tpu_mbx;
}
client->mbx_desc.phys_core_list = phys_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
-
- if (gxp->after_map_tpu_mbx_queue) {
- ret = gxp->after_map_tpu_mbx_queue(gxp, client);
- if (ret)
- goto err_unmap;
- }
+ client->mbx_desc.mapped = true;
goto out_free;
-err_unmap:
- gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
-err_fput:
- fput(client->tpu_file);
- client->tpu_file = NULL;
+err_free_tpu_mbx:
edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
out_free:
kfree(mbx_info);
-
out:
up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+/*
+ * Unmap TPU mailboxes from IOVA.
+ * This function will be called only when the device is in the direct mode.
+ */
+static void unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+ client->mbx_desc.mapped = false;
+}
+
+static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ int ret = 0;
+
+ if (!gxp->tpu_dev.mbx_paddr) {
+ dev_err(gxp->dev, "%s: TPU is not available for interop\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ /*
+ * If someone is attacking us through this interface -
+ * it's possible that ibuf.tpu_fd here is already a different file from the one passed to
+ * edgetpu_ext_driver_cmd() (if the runtime closes the FD and opens another file exactly
+ * between the TPU driver call above and the fget below).
+ *
+ * However, from Zuma, we pass the file pointer directly to the TPU KD and it will check
+ * whether that file is true TPU device file or not. Therefore, our code is safe from the
+ * fd swapping attack.
+ */
+ client->tpu_file = fget(ibuf.tpu_fd);
+ if (!client->tpu_file) {
+ ret = -EINVAL;
+ goto out_unlock_client_semaphore;
+ }
+
+ /* TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop */
+ if (gxp_is_direct_mode(gxp) || 1) {
+ ret = map_tpu_mbx_queue(client, &ibuf);
+ if (ret)
+ goto out_unlock_client_semaphore;
+ }
+
+ if (gxp->after_map_tpu_mbx_queue) {
+ ret = gxp->after_map_tpu_mbx_queue(gxp, client);
+ if (ret)
+ goto err_unmap_tpu_mbx_queue;
+ }
+
+ goto out_unlock_client_semaphore;
+
+err_unmap_tpu_mbx_queue:
+ unmap_tpu_mbx_queue(client, &ibuf);
out_unlock_client_semaphore:
up_write(&client->semaphore);
return ret;
-#else
- return -ENODEV;
-#endif
}
-/* TODO(b/249440369): The same as the `gxp_map_tpu_mbx_queue` function. */
static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
struct gxp_tpu_mbx_queue_ioctl __user *argp)
{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
struct gxp_dev *gxp = client->gxp;
struct gxp_tpu_mbx_queue_ioctl ibuf;
- struct edgetpu_ext_client_info gxp_tpu_info;
int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
@@ -1018,12 +1050,10 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
if (gxp->before_unmap_tpu_mbx_queue)
gxp->before_unmap_tpu_mbx_queue(gxp, client);
- gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
+ /* TODO(b/237624453): remove '|| 1' once the MCU supports DSP->TPU interop */
+ if (gxp_is_direct_mode(gxp) || 1)
+ unmap_tpu_mbx_queue(client, &ibuf);
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
fput(client->tpu_file);
client->tpu_file = NULL;
@@ -1031,11 +1061,15 @@ out:
up_write(&client->semaphore);
return ret;
-#else
- return -ENODEV;
-#endif
}
+#else /* HAS_TPU_EXT */
+
+#define gxp_map_tpu_mbx_queue(...) (-ENODEV)
+#define gxp_unmap_tpu_mbx_queue(...) (-ENODEV)
+
+#endif /* HAS_TPU_EXT */
+
static int gxp_register_core_telemetry_eventfd(
struct gxp_client *client,
struct gxp_register_telemetry_eventfd_ioctl __user *argp)
diff --git a/gxp-dci.c b/gxp-dci.c
index 26544be..b742a6e 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -468,6 +468,7 @@ struct gxp_mailbox *gxp_dci_alloc(struct gxp_mailbox_manager *mgr,
.queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
.cmd_elem_size = sizeof(struct gxp_dci_command),
.resp_elem_size = sizeof(struct gxp_dci_response),
+ .ignore_seq_order = false,
};
dci = kzalloc(sizeof(*dci), GFP_KERNEL);
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index c87e032..7ca60ea 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -213,6 +213,7 @@ struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp)
devm_kfree(gxp->dev, gdomain);
return ERR_PTR(-ENOMEM);
}
+ gxp->default_domain = gdomain;
}
return gdomain;
diff --git a/gxp-firmware.c b/gxp-firmware.c
index ea29837..57b3583 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -94,60 +94,20 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
- if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
- (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
- (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
- (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
- dev_err(gxp->dev, "Cannot load FW! Invalid ELF format.\n");
- return -EINVAL;
- }
-
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u64 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ const u64 da = phdr->p_paddr;
+ const u32 memsz = phdr->p_memsz;
+ const u32 filesz = phdr->p_filesz;
void *ptr;
- if (phdr->p_type != PT_LOAD)
+ if (phdr->p_type != PT_LOAD || !phdr->p_flags || !memsz)
continue;
- if (!phdr->p_flags)
+ if (!(da >= buffer->daddr &&
+ da + memsz <= buffer->daddr + buffer->size))
continue;
- if (!memsz)
- continue;
-
- if (!((da >= (u32)buffer->daddr) &&
- ((da + memsz) <= ((u32)buffer->daddr +
- (u32)buffer->size)))) {
- /*
- * Some BSS data may be referenced from TCM, and can be
- * skipped while loading
- */
- dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...\n",
- da, memsz);
- continue;
- }
-
- dev_notice(gxp->dev, "phdr: type %d da 0x%llx memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
-
- if (filesz > memsz) {
- dev_err(gxp->dev, "Bad phdr filesz 0x%x memsz 0x%x\n",
- filesz, memsz);
- ret = -EINVAL;
- break;
- }
-
- if (offset + filesz > size) {
- dev_err(gxp->dev, "Truncated fw: need 0x%x avail 0x%zx\n",
- offset + filesz, size);
- ret = -EINVAL;
- break;
- }
-
/* grab the kernel address for this device address */
ptr = buffer->vaddr + (da - buffer->daddr);
if (!ptr) {
@@ -260,6 +220,112 @@ error:
return ret;
}
+static int gxp_firmware_fetch_boundary(struct gxp_dev *gxp, const u8 *elf_data,
+ size_t size,
+ const struct gxp_mapped_resource *buffer,
+ dma_addr_t *boundary_ptr)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ struct elf32_phdr *phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ int i, ret = 0;
+ dma_addr_t boundary = 0;
+
+ if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
+ (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
+ (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
+ (ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
+ dev_err(gxp->dev, "Invalid ELF format.");
+ return -EINVAL;
+ }
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ const u64 da = phdr->p_paddr;
+ const u32 memsz = phdr->p_memsz;
+ const u32 filesz = phdr->p_filesz;
+ const u32 offset = phdr->p_offset;
+ const u32 p_flags = phdr->p_flags;
+
+ if (phdr->p_type != PT_LOAD || !p_flags || !memsz)
+ continue;
+
+ if (!(da >= buffer->daddr &&
+ da + memsz <= buffer->daddr + buffer->size)) {
+ /*
+ * Some BSS data may be referenced from TCM, and can be
+ * skipped while loading
+ */
+ dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...",
+ da, memsz);
+ continue;
+ }
+
+ dev_info(gxp->dev,
+ "phdr: da %#llx memsz %#x filesz %#x perm %d", da,
+ memsz, filesz, p_flags);
+
+ if (filesz > memsz) {
+ dev_err(gxp->dev, "Bad phdr filesz %#x memsz %#x",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > size) {
+ dev_err(gxp->dev, "Truncated fw: need %#x avail %#zx",
+ offset + filesz, size);
+ ret = -EINVAL;
+ break;
+ }
+ if (p_flags & PF_W) {
+ if (!boundary)
+ boundary = da;
+ } else if (boundary) {
+ dev_err(gxp->dev,
+ "Found RO region after a writable segment");
+ ret = -EINVAL;
+ break;
+ }
+ }
+ /* no boundary has been found - assume the whole image is RO */
+ if (!boundary)
+ boundary = buffer->daddr + buffer->size;
+ if (!ret)
+ *boundary_ptr = boundary;
+
+ return ret;
+}
+
+/*
+ * Sets @rw_boundaries by analyzing LOAD segments in ELF headers.
+ *
+ * Assumes the LOAD segments are arranged with RO first then RW. Returns -EINVAL
+ * if this is not true.
+ */
+static int gxp_firmware_fetch_boundaries(struct gxp_dev *gxp,
+ struct gxp_firmware_manager *mgr)
+{
+ int core, ret;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ ret = gxp_firmware_fetch_boundary(
+ gxp, mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ mgr->firmwares[core]->size - FW_HEADER_SIZE,
+ &gxp->fwbufs[core], &mgr->rw_boundaries[core]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "failed to fetch boundary of core %d: %d", core,
+ ret);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ memset(mgr->rw_boundaries, 0, sizeof(mgr->rw_boundaries));
+ return ret;
+}
+
/* Forward declaration for usage inside gxp_firmware_load(..). */
static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
@@ -515,6 +581,10 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
mgr->firmwares[core] = firmwares[core];
}
+ ret = gxp_firmware_fetch_boundaries(gxp, mgr);
+ if (ret)
+ goto err_fetch_boundaries;
+
kfree(mgr->firmware_name);
mgr->firmware_name = name_buf;
@@ -522,6 +592,9 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
up_read(&gxp->vd_semaphore);
return count;
+err_fetch_boundaries:
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ mgr->firmwares[core] = NULL;
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++)
release_firmware(firmwares[core]);
@@ -687,6 +760,10 @@ int gxp_firmware_request_if_needed(struct gxp_dev *gxp)
if (ret)
goto err_authenticate_firmware;
+ ret = gxp_firmware_fetch_boundaries(gxp, mgr);
+ if (ret)
+ goto err_authenticate_firmware;
+
mgr->is_firmware_requested = true;
out:
diff --git a/gxp-firmware.h b/gxp-firmware.h
index ed4fa8d..1985717 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -45,6 +45,16 @@ struct gxp_firmware_manager {
struct mutex dsp_firmware_lock;
/* Firmware status bitmap. Accessors must hold `vd_semaphore`. */
u32 firmware_running;
+ /*
+ * The boundary of readonly segments and writable segments.
+ * The mappings are programmed as
+ * [fwbufs[i].daddr, rw_boundaries[i]): RO
+ * [rw_boundaries[i], daddr + fwbufs[i].size): RW
+ *
+ * The boundary information is collected by parsing the ELF
+ * header after @firmwares have been fetched.
+ */
+ dma_addr_t rw_boundaries[GXP_NUM_CORES];
};
enum aurora_msg {
diff --git a/gxp-internal.h b/gxp-internal.h
index 0b18c1e..5dd6e53 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -37,6 +37,7 @@ enum gxp_chip_revision {
struct gxp_tpu_mbx_desc {
uint phys_core_list;
size_t cmdq_size, respq_size;
+ bool mapped;
};
/* ioremapped resource */
diff --git a/gxp-kci.c b/gxp-kci.c
index 8216706..8191887 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -90,6 +90,7 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci,
{
struct gxp_mailbox *mbx = gcip_kci_get_data(kci);
struct gxp_dev *gxp = mbx->gxp;
+ struct gxp_kci *gxp_kci = mbx->data;
if (resp->code <= GCIP_RKCI_CHIP_CODE_LAST) {
/* TODO(b/239638427): Handle reverse kci */
@@ -104,6 +105,7 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci,
core);
}
}
+ gxp_kci_resp_rkci_ack(gxp_kci, resp);
break;
}
default:
@@ -564,16 +566,18 @@ int gxp_kci_notify_throttling(struct gxp_kci *gkci, u32 rate)
return gxp_kci_send_cmd(gkci->mbx, &cmd);
}
-int gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
- struct gcip_kci_response_element *rkci_cmd)
+void gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
+ struct gcip_kci_response_element *rkci_cmd)
{
struct gcip_kci_command_element cmd = {
.seq = rkci_cmd->seq,
.code = GCIP_KCI_CODE_RKCI_ACK,
};
+ struct gxp_dev *gxp = gkci->gxp;
+ int ret;
- if (!gkci || !gkci->mbx)
- return -ENODEV;
-
- return gxp_kci_send_cmd(gkci->mbx, &cmd);
+ ret = gxp_kci_send_cmd(gkci->mbx, &cmd);
+ if (ret)
+ dev_err(gxp->dev, "failed to send rkci resp %llu (%d)",
+ rkci_cmd->seq, ret);
}
diff --git a/gxp-kci.h b/gxp-kci.h
index aa210c5..0589f0c 100644
--- a/gxp-kci.h
+++ b/gxp-kci.h
@@ -236,7 +236,7 @@ int gxp_kci_release_vmbox(struct gxp_kci *gkci, u8 client_id);
* The FW may wait for a response from the kernel for an RKCI request so a
* response could be sent as an ack.
*/
-int gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
- struct gcip_kci_response_element *rkci_cmd);
+void gxp_kci_resp_rkci_ack(struct gxp_kci *gkci,
+ struct gcip_kci_response_element *rkci_cmd);
#endif /* __GXP_KCI_H__ */
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 09c3e11..0e0b365 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -145,6 +145,7 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
mailbox->queue_wrap_bit = args->queue_wrap_bit;
mailbox->cmd_elem_size = args->cmd_elem_size;
mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
gxp_mailbox_set_data(mailbox, args->data);
ret = gxp_mailbox_set_ops(mailbox, args->ops);
@@ -205,6 +206,7 @@ static int init_gcip_mailbox(struct gxp_mailbox *mailbox)
.timeout = MAILBOX_TIMEOUT,
.ops = mailbox->ops->gcip_ops.mbx,
.data = mailbox,
+ .ignore_seq_order = mailbox->ignore_seq_order,
};
struct gcip_mailbox *gcip_mbx;
int ret;
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 12bc1bf..cf72fbe 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -150,6 +150,7 @@ struct gxp_mailbox_args {
u64 queue_wrap_bit;
u32 cmd_elem_size;
u32 resp_elem_size;
+ bool ignore_seq_order;
void *data;
};
@@ -198,6 +199,8 @@ struct gxp_mailbox {
struct gxp_mailbox_ops *ops;
void *data; /* private data */
+ bool ignore_seq_order; /* allow out-of-order responses if true (always false in KCI) */
+
#if GXP_USE_LEGACY_MAILBOX
u64 cur_seq;
/* add to this list if a command needs to wait for a response */
diff --git a/gxp-pm.h b/gxp-pm.h
index 9526e0c..b1425f5 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -11,18 +11,7 @@
#include "gxp-internal.h"
-#define AUR_DVFS_MIN_RATE 178000
-static const uint aur_power_state2rate[] = {
- 0, /* AUR_OFF */
- 178000, /* AUR_UUD */
- 373000, /* AUR_SUD */
- 750000, /* AUR_UD */
- 1155000, /* AUR_NOM */
- 178000, /* AUR_READY */
- 268000, /* AUR_UUD_PLUS */
- 560000, /* AUR_SUD_PLUS */
- 975000, /* AUR_UD_PLUS */
-};
+#define AUR_DVFS_MIN_RATE AUR_UUD_RATE
enum aur_power_state {
AUR_OFF = 0,
@@ -36,6 +25,18 @@ enum aur_power_state {
AUR_UD_PLUS = 8,
};
+static const uint aur_power_state2rate[] = {
+ AUR_OFF_RATE,
+ AUR_UUD_RATE,
+ AUR_SUD_RATE,
+ AUR_UD_RATE,
+ AUR_NOM_RATE,
+ AUR_READY_RATE,
+ AUR_UUD_PLUS_RATE,
+ AUR_SUD_PLUS_RATE,
+ AUR_UD_PLUS_RATE,
+};
+
enum aur_memory_power_state {
AUR_MEM_UNDEFINED = 0,
AUR_MEM_MIN = 1,
diff --git a/gxp-thermal.c b/gxp-thermal.c
index 6451e76..812f466 100644
--- a/gxp-thermal.c
+++ b/gxp-thermal.c
@@ -43,13 +43,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev);
* b/229623553
*/
static struct gxp_state_pwr state_pwr_map[] = {
- {1155000, 78},
- {975000, 58},
- {750000, 40},
- {560000, 27},
- {373000, 20},
- {268000, 16},
- {178000, 13},
+ { AUR_NOM_RATE, 78 },
+ { AUR_UD_PLUS_RATE, 58 },
+ { AUR_UD_RATE, 40 },
+ { AUR_SUD_PLUS_RATE, 27 },
+ { AUR_SUD_RATE, 20 },
+ { AUR_UUD_PLUS_RATE, 16 },
+ { AUR_UUD_RATE, 13 },
};
static int gxp_get_max_state(struct thermal_cooling_device *cdev,
diff --git a/gxp-uci.c b/gxp-uci.c
index cdd06b5..d9bf21f 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -12,10 +12,11 @@
#include "gxp-config.h"
#include "gxp-internal.h"
-#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
#include "gxp-mcu.h"
#include "gxp-uci.h"
+#include "gxp-vd.h"
#include "gxp.h"
#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
@@ -122,8 +123,10 @@ gxp_uci_handle_awaiter_arrived(struct gcip_mailbox *mailbox,
* If dest_queue is a null pointer, it means we don't care the response
* of the command. Skip it.
*/
- if (!async_resp->dest_queue)
+ if (!async_resp->dest_queue) {
+ gcip_mailbox_release_awaiter(awaiter);
return;
+ }
spin_lock_irqsave(async_resp->dest_queue_lock, flags);
@@ -193,6 +196,7 @@ static void gxp_uci_release_awaiter_data(void *data)
{
struct gxp_uci_async_response *async_resp = data;
+ gxp_vd_release_credit(async_resp->vd);
kfree(async_resp);
}
@@ -309,6 +313,7 @@ int gxp_uci_init(struct gxp_mcu *mcu)
.queue_wrap_bit = CIRCULAR_QUEUE_WRAP_BIT,
.cmd_elem_size = sizeof(struct gxp_uci_command),
.resp_elem_size = sizeof(struct gxp_uci_response),
+ .ignore_seq_order = true,
.data = uci,
};
@@ -331,7 +336,8 @@ void gxp_uci_exit(struct gxp_uci *uci)
uci->mbx = NULL;
}
-int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
+int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
+ struct gxp_uci_command *cmd,
struct list_head *resp_queue, spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
struct gxp_eventfd *eventfd)
@@ -339,11 +345,16 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
struct gxp_uci_async_response *async_resp;
int ret;
+ if (!gxp_vd_has_and_use_credit(vd))
+ return -EBUSY;
async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
- return -ENOMEM;
+ if (!async_resp) {
+ ret = -ENOMEM;
+ goto err_release_credit;
+ }
async_resp->uci = uci;
+ async_resp->vd = vd;
/*
* If the command is a wakelock command, keep dest_queue as a null
* pointer to indicate that we will not expose the response to the
@@ -369,6 +380,8 @@ int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
err_free_resp:
kfree(async_resp);
+err_release_credit:
+ gxp_vd_release_credit(vd);
return ret;
}
diff --git a/gxp-uci.h b/gxp-uci.h
index c05504f..31a0aaa 100644
--- a/gxp-uci.h
+++ b/gxp-uci.h
@@ -15,6 +15,7 @@
#include "gxp-client.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
+#include "gxp-vd.h"
#define UCI_RESOURCE_ID 0
@@ -101,6 +102,8 @@ struct gxp_uci_async_response {
wait_queue_head_t *dest_queue_waitq;
/* gxp_eventfd to signal when the response completes. May be NULL. */
struct gxp_eventfd *eventfd;
+ /* The request was sent from this virtual device. */
+ struct gxp_virtual_device *vd;
/* Handles arrival, timeout of async response. */
struct gcip_mailbox_resp_awaiter *awaiter;
};
@@ -146,7 +149,8 @@ void gxp_uci_exit(struct gxp_uci *uci);
*
* Returns 0 on success, a negative errno on failure.
*/
-int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_uci_command *cmd,
+int gxp_uci_send_command(struct gxp_uci *uci, struct gxp_virtual_device *vd,
+ struct gxp_uci_command *cmd,
struct list_head *resp_queue, spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
struct gxp_eventfd *eventfd);
diff --git a/gxp-vd.c b/gxp-vd.c
index a7d2d6a..7c9d489 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "gxp-config.h"
#include "gxp-core-telemetry.h"
@@ -204,6 +205,8 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->slice_index = -1;
vd->client_id = -1;
vd->tpu_client_id = -1;
+ spin_lock_init(&vd->credit_lock);
+ vd->credit = GXP_COMMAND_CREDIT_PER_VD;
vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
if (!vd->domain) {
@@ -767,3 +770,30 @@ struct gxp_mapping *gxp_vd_mapping_search_host(struct gxp_virtual_device *vd,
return NULL;
}
+
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd)
+{
+ bool ret = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (vd->credit == 0)
+ ret = false;
+ else
+ vd->credit--;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+
+ return ret;
+}
+
+void gxp_vd_release_credit(struct gxp_virtual_device *vd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (unlikely(vd->credit >= GXP_COMMAND_CREDIT_PER_VD))
+ dev_err(vd->gxp->dev, "unbalanced VD credit");
+ else
+ vd->credit++;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index c84e527..22ef800 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -19,6 +19,9 @@
#include "gxp-internal.h"
#include "gxp-mapping.h"
+/* TODO(b/259192112): set to 8 once the runtime has added the credit limit. */
+#define GXP_COMMAND_CREDIT_PER_VD 256
+
struct mailbox_resp_queue {
/* Queue of async responses */
struct list_head queue;
@@ -76,6 +79,20 @@ struct gxp_virtual_device {
* This ID will be fetched from the TPU kernel driver.
*/
int tpu_client_id;
+ /*
+ * Protects credit. Use a spin lock because the critical section of
+ * using @credit is pretty small.
+ */
+ spinlock_t credit_lock;
+ /*
+ * Credits for sending mailbox commands. It's initialized as
+ * GXP_COMMAND_CREDIT_PER_VD. The value is decreased on sending
+ * mailbox commands; increased on receiving mailbox responses.
+ * Mailbox command requests are rejected when this value reaches 0.
+ *
+ * Only used in MCU mode.
+ */
+ uint credit;
};
/*
@@ -287,4 +304,16 @@ int gxp_vd_block_ready(struct gxp_virtual_device *vd);
*/
void gxp_vd_block_unready(struct gxp_virtual_device *vd);
+/*
+ * Checks whether the virtual device has a positive credit, and use 1 credit when
+ * yes.
+ *
+ * Returns true when there is enough credit, false otherwise.
+ */
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd);
+/*
+ * Releases the credit.
+ */
+void gxp_vd_release_credit(struct gxp_virtual_device *vd);
+
#endif /* __GXP_VD_H__ */