summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora zuma automerger <aurora-zuma-automerger@google.com>2023-03-02 11:25:24 +0000
committerCopybara-Service <copybara-worker@google.com>2023-03-02 04:33:22 -0800
commit7fd75e876ae0bcbd3d47f777b0de8774a165238f (patch)
treeee3cfafe39100a97edb2675469ddd6c421a46d6d
parent1f96601aed9b8837e5155eb38f80d05a2220302e (diff)
downloadzuma-7fd75e876ae0bcbd3d47f777b0de8774a165238f.tar.gz
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android14-gs-pixel-5.15'
gxp: mcu use image config for mailbox mappings Bug: 268151565 Bug: 268151565 (repeat) gxp: remove TODOs of VMBox KCIs Bug: 241057541 gcip: Add gcip_thermal_destroy Bug; 264729080 gcip: Add thermal votes Bug: 271194361 Bug: 264729080 (repeat) gcip: Cleanup abandoned domains on domain-pool destroy gcip: Prefix MAX_NUM_THERMAL_STATES Bug: 264729080 (repeat) gcip: Add const to thermal_cooling_device_ops Bug: 264729080 (repeat) gcip: Add thermal support Bug: 264729080 (repeat) GCIP_MAIN_REV_ID: 5cd0d883f53d9414aef73f9b78a48e7cd45e97b6 gxp: remove legacy UCI interface gxp: log a warning on failing to map CMU reg gxp: use HW watchdog to monitor MCU Bug: 255416846 gxp: disable out-dated auth mechanism in MCU mode Bug: 260533620 gxp: refactor core firmware loading gcip: Add gcip_thermal_destroy Bug: 264729080 (repeat) gcip: Add thermal votes Bug: 271194361 (repeat) Bug: 264729080 (repeat) gcip: Add missing includes to gcip-domain-pool.h gcip: Add list of dynamic domains to domain-pool gcip: Prefix MAX_NUM_THERMAL_STATES Bug: 264729080 (repeat) gcip: add watchdog timeout crash type Bug:255416846 gcip: Add thermal header Bug: 264729080 (repeat) GCIP_HEADERS_REV_ID: 6abe7aefe5f6fbc752e3f5307af6a3e08e1f5917 GitOrigin-RevId: eac0a9b25a25665e34ccf94eac855e4f594bc9ba Change-Id: I17890be7fdf5633ed7ec114532a4d4fa360641db
-rw-r--r--callisto/config.h5
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile3
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c54
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-thermal.c517
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-domain-pool.h5
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-firmware.h8
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-thermal.h118
-rw-r--r--gxp-common-platform.c2
-rw-r--r--gxp-firmware.c103
-rw-r--r--gxp-firmware.h2
-rw-r--r--gxp-mcu-firmware.c26
-rw-r--r--gxp-mcu-firmware.h7
-rw-r--r--gxp-mcu-fs.c148
-rw-r--r--gxp-mcu-platform.c116
-rw-r--r--gxp-mcu.c48
15 files changed, 836 insertions, 326 deletions
diff --git a/callisto/config.h b/callisto/config.h
index 6b9aa0d..9be1ef5 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -46,6 +46,11 @@
*/
#define GXP_IS_DMA_COHERENT
+/* HW watchdog */
+#define GXP_WDG_DT_IRQ_INDEX 5
+#define GXP_WDG_ENABLE_BIT 0
+#define GXP_WDG_INT_CLEAR_BIT 5
+
#include "config-pwr-state.h"
#include "context.h"
#include "csrs.h"
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
index bc370e5..c47f1c5 100644
--- a/gcip-kernel-driver/drivers/gcip/Makefile
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -15,7 +15,8 @@ gcip-objs := gcip-alloc-helper.o \
gcip-mailbox.o \
gcip-mem-pool.o \
gcip-pm.o \
- gcip-telemetry.o
+ gcip-telemetry.o \
+ gcip-thermal.o
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
index 2341b52..c3c41ea 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
@@ -12,6 +12,11 @@
#include <gcip/gcip-domain-pool.h>
+struct dynamic_domain {
+ struct list_head list_entry;
+ struct iommu_domain *domain;
+};
+
int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size)
{
unsigned int i;
@@ -19,6 +24,8 @@ int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, uns
pool->size = size;
pool->dev = dev;
+ INIT_LIST_HEAD(&pool->dynamic_domains);
+ mutex_init(&pool->lock);
if (!size)
return 0;
@@ -48,9 +55,23 @@ int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, uns
struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
{
int id;
+ struct dynamic_domain *ddomain;
- if (!pool->size)
- return iommu_domain_alloc(pool->dev->bus);
+ if (!pool->size) {
+ ddomain = vzalloc(sizeof(*ddomain));
+ if (!ddomain)
+ return ERR_PTR(-ENOMEM);
+
+ ddomain->domain = iommu_domain_alloc(pool->dev->bus);
+ if (!ddomain->domain) {
+ vfree(ddomain);
+ return NULL;
+ }
+ mutex_lock(&pool->lock);
+ list_add_tail(&ddomain->list_entry, &pool->dynamic_domains);
+ mutex_unlock(&pool->lock);
+ return ddomain->domain;
+ }
id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
@@ -67,11 +88,25 @@ struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain)
{
int id;
+ struct dynamic_domain *ddomain;
+ struct list_head *cur, *nxt;
if (!pool->size) {
- iommu_domain_free(domain);
+ mutex_lock(&pool->lock);
+ list_for_each_safe(cur, nxt, &pool->dynamic_domains) {
+ ddomain = container_of(cur, struct dynamic_domain, list_entry);
+ if (ddomain->domain == domain) {
+ list_del(&ddomain->list_entry);
+ mutex_unlock(&pool->lock);
+ iommu_domain_free(domain);
+ vfree(ddomain);
+ return;
+ }
+ }
+ mutex_unlock(&pool->lock);
return;
}
+
for (id = 0; id < pool->size; id++) {
if (pool->array[id] == domain) {
dev_dbg(pool->dev, "Released domain from pool with id = %d\n", id);
@@ -85,9 +120,20 @@ void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *d
void gcip_domain_pool_destroy(struct gcip_domain_pool *pool)
{
int i;
+ struct dynamic_domain *ddomain;
+ struct list_head *cur, *nxt;
- if (!pool->size)
+ if (!pool->size) {
+ mutex_lock(&pool->lock);
+ list_for_each_safe(cur, nxt, &pool->dynamic_domains) {
+ ddomain = container_of(cur, struct dynamic_domain, list_entry);
+ list_del(&ddomain->list_entry);
+ iommu_domain_free(ddomain->domain);
+ vfree(ddomain);
+ }
+ mutex_unlock(&pool->lock);
return;
+ }
dev_dbg(pool->dev, "Destroying domain pool with %u domains\n", pool->size);
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-thermal.c b/gcip-kernel-driver/drivers/gcip/gcip-thermal.c
new file mode 100644
index 0000000..5afa65e
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-thermal.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thermal management support for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/thermal.h>
+#include <linux/version.h>
+
+#include <gcip/gcip-pm.h>
+#include <gcip/gcip-thermal.h>
+
+#define OF_DATA_NUM_MAX (GCIP_THERMAL_MAX_NUM_STATES * 2)
+
+#define to_cdev(dev) container_of(dev, struct thermal_cooling_device, device)
+#define to_gcip_thermal(dev) ((struct gcip_thermal *)to_cdev(dev)->devdata)
+
+/* Struct for state to rate and state to power mappings. */
+struct gcip_rate_pwr {
+ unsigned long rate;
+ u32 power;
+};
+
+static struct gcip_rate_pwr state_map[GCIP_THERMAL_MAX_NUM_STATES] = { 0 };
+
+static int gcip_thermal_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (!thermal->num_states)
+ return -ENODEV;
+
+ *state = thermal->num_states - 1;
+
+ return 0;
+}
+
+static int gcip_thermal_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ mutex_lock(&thermal->lock);
+ *state = thermal->state;
+ mutex_unlock(&thermal->lock);
+
+ return 0;
+}
+
+static int gcip_thermal_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+ int i, ret = 0;
+
+ if (state >= thermal->num_states) {
+ dev_err(thermal->dev, "Invalid thermal cooling state %lu\n", state);
+ return -EINVAL;
+ }
+
+ mutex_lock(&thermal->lock);
+
+ thermal->vote[GCIP_THERMAL_COOLING_DEVICE] = state;
+ for (i = 0; i < GCIP_THERMAL_MAX_NUM_VOTERS; i++)
+ state = max(state, thermal->vote[i]);
+
+ if (state == thermal->state)
+ goto out;
+
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, state_map[state].rate);
+ gcip_pm_put(thermal->pm);
+ }
+
+ if (ret)
+ dev_err(thermal->dev, "Failed to set thermal cooling state: %d\n", ret);
+ else
+ thermal->state = state;
+out:
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+static int gcip_thermal_rate2power_internal(struct gcip_thermal *thermal, unsigned long rate,
+ u32 *power)
+{
+ int i;
+
+ for (i = 0; i < thermal->num_states; i++) {
+ if (rate == state_map[i].rate) {
+ *power = state_map[i].power;
+ return 0;
+ }
+ }
+
+ dev_err(thermal->dev, "Unknown rate for: %lu\n", rate);
+ *power = 0;
+
+ return -EINVAL;
+}
+
+static int gcip_thermal_get_requested_power(struct thermal_cooling_device *cdev, u32 *power)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+ unsigned long rate;
+ int ret;
+
+ if (gcip_pm_get_if_powered(thermal->pm, false)) {
+ *power = 0;
+ return 0;
+ }
+
+ mutex_lock(&thermal->lock);
+
+ ret = thermal->get_rate(thermal->data, &rate);
+
+ mutex_unlock(&thermal->lock);
+ gcip_pm_put(thermal->pm);
+
+ if (ret)
+ return ret;
+
+ return gcip_thermal_rate2power_internal(thermal, rate, power);
+}
+
+static int gcip_thermal_state2power(struct thermal_cooling_device *cdev, unsigned long state,
+ u32 *power)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (state >= thermal->num_states) {
+ dev_err(thermal->dev, "Invalid state: %lu\n", state);
+ return -EINVAL;
+ }
+
+ return gcip_thermal_rate2power_internal(thermal, state_map[state].rate, power);
+}
+
+static int gcip_thermal_power2state(struct thermal_cooling_device *cdev, u32 power,
+ unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (!thermal->num_states)
+ return -ENODEV;
+
+ /*
+ * Argument "power" is the maximum allowed power consumption in mW as defined by the PID
+ * control loop. Checks for the first state that is less than or equal to the current
+ * allowed power. state_map is descending, so lowest power consumption is last value in the
+ * array. Returns lowest state even if it consumes more power than allowed as not all
+ * platforms can handle throttling below an active state.
+ */
+ for (*state = 0; *state < thermal->num_states; (*state)++)
+ if (power >= state_map[*state].power)
+ return 0;
+
+ *state = thermal->num_states - 1;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops gcip_thermal_ops = {
+ .get_max_state = gcip_thermal_get_max_state,
+ .get_cur_state = gcip_thermal_get_cur_state,
+ .set_cur_state = gcip_thermal_set_cur_state,
+ .get_requested_power = gcip_thermal_get_requested_power,
+ .state2power = gcip_thermal_state2power,
+ .power2state = gcip_thermal_power2state,
+};
+
+/* This API was removed, but Android still uses it to update thermal request. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) && IS_ENABLED(CONFIG_ANDROID)
+void thermal_cdev_update(struct thermal_cooling_device *cdev);
+#endif
+
+static void gcip_thermal_update(struct gcip_thermal *thermal)
+{
+ struct thermal_cooling_device *cdev = thermal->cdev;
+
+ cdev->updated = false;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0) || IS_ENABLED(CONFIG_ANDROID)
+ thermal_cdev_update(cdev);
+#elif IS_ENABLED(CONFIG_THERMAL)
+ dev_err_once(dev, "Thermal update not implemented");
+#endif
+}
+
+static ssize_t user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct gcip_thermal *thermal = to_gcip_thermal(dev);
+ ssize_t ret;
+
+ if (!thermal)
+ return -ENODEV;
+
+ mutex_lock(&thermal->lock);
+ ret = sysfs_emit(buf, "%lu\n", thermal->vote[GCIP_THERMAL_SYSFS]);
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+static ssize_t user_vote_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct gcip_thermal *thermal = to_gcip_thermal(dev);
+ unsigned long state;
+ int ret;
+
+ if (!thermal)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ if (state >= thermal->num_states)
+ return -EINVAL;
+
+ mutex_lock(&thermal->lock);
+ thermal->vote[GCIP_THERMAL_SYSFS] = state;
+ mutex_unlock(&thermal->lock);
+
+ gcip_thermal_update(thermal);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(user_vote);
+
+static int gcip_thermal_rate2state(struct gcip_thermal *thermal, unsigned long rate)
+{
+ int i;
+
+ for (i = 0; i < thermal->num_states; i++) {
+ if (state_map[i].rate <= rate)
+ return i;
+ }
+
+ /* Returns lowest state on an invalid input. */
+ return thermal->num_states - 1;
+}
+
+static int gcip_thermal_notifier(struct notifier_block *nb, unsigned long rate, void *nb_data)
+{
+ struct gcip_thermal *thermal = container_of(nb, struct gcip_thermal, nb);
+ unsigned long state = gcip_thermal_rate2state(thermal, rate);
+
+ dev_dbg(thermal->dev, "Thermal notifier req original: %lu, state: %lu\n", rate, state);
+
+ mutex_lock(&thermal->lock);
+ thermal->vote[GCIP_THERMAL_NOTIFIER_BLOCK] = state;
+ mutex_unlock(&thermal->lock);
+
+ gcip_thermal_update(thermal);
+
+ return NOTIFY_OK;
+}
+
+struct notifier_block *gcip_thermal_get_notifier_block(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return NULL;
+
+ return &thermal->nb;
+}
+
+void gcip_thermal_destroy(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return;
+
+ debugfs_remove_recursive(thermal->dentry);
+ thermal_cooling_device_unregister(thermal->cdev);
+ devm_kfree(thermal->dev, thermal);
+}
+
+static int gcip_thermal_enable_get(void *data, u64 *val)
+{
+ struct gcip_thermal *thermal = (struct gcip_thermal *)data;
+
+ mutex_lock(&thermal->lock);
+ *val = thermal->enabled;
+ mutex_unlock(&thermal->lock);
+
+ return 0;
+}
+
+static int gcip_thermal_enable_set(void *data, u64 val)
+{
+ struct gcip_thermal *thermal = (struct gcip_thermal *)data;
+ int ret = 0;
+
+ mutex_lock(&thermal->lock);
+
+ if (thermal->enabled != (bool)val) {
+ /*
+ * If the device is not powered, the value will be restored by
+ * gcip_thermal_restore_on_powering in next fw boot.
+ */
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->control(thermal->data, val);
+ gcip_pm_put(thermal->pm);
+ }
+
+ if (!ret) {
+ thermal->enabled = val;
+ dev_info_ratelimited(thermal->dev, "%s thermal control",
+ thermal->enabled ? "Enable" : "Disable");
+ } else {
+ dev_err(thermal->dev, "Failed to %s thermal control: %d ",
+ val ? "enable" : "disable", ret);
+ }
+ }
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_gcip_thermal_enable, gcip_thermal_enable_get, gcip_thermal_enable_set,
+ "%llu\n");
+
+static int gcip_thermal_parse_dvfs_table(struct gcip_thermal *thermal)
+{
+ int row_size, col_size, tbl_size, i;
+ int of_data_int_array[OF_DATA_NUM_MAX];
+
+ if (of_property_read_u32_array(thermal->dev->of_node, GCIP_THERMAL_TABLE_SIZE_NAME,
+ of_data_int_array, 2))
+ goto error;
+
+ row_size = of_data_int_array[0];
+ col_size = of_data_int_array[1];
+ tbl_size = row_size * col_size;
+ if (row_size > GCIP_THERMAL_MAX_NUM_STATES) {
+ dev_err(thermal->dev, "Too many states\n");
+ goto error;
+ }
+
+ if (tbl_size > OF_DATA_NUM_MAX)
+ goto error;
+
+ if (of_property_read_u32_array(thermal->dev->of_node, GCIP_THERMAL_TABLE_NAME,
+ of_data_int_array, tbl_size))
+ goto error;
+
+ thermal->num_states = row_size;
+ for (i = 0; i < row_size; ++i) {
+ int idx = col_size * i;
+
+ state_map[i].rate = of_data_int_array[idx];
+ state_map[i].power = of_data_int_array[idx + 1];
+ }
+
+ return 0;
+
+error:
+ dev_err(thermal->dev, "Failed to parse DVFS table\n");
+
+ return -EINVAL;
+}
+
+static int gcip_thermal_cooling_register(struct gcip_thermal *thermal, const char *type,
+ const char *node_name)
+{
+ struct device_node *node = NULL;
+ int ret;
+
+ ret = gcip_thermal_parse_dvfs_table(thermal);
+ if (ret)
+ return ret;
+
+ if (node_name)
+ node = of_find_node_by_name(NULL, node_name);
+ if (!node)
+ dev_warn(thermal->dev, "Failed to find thermal cooling node\n");
+
+ thermal->cdev = thermal_of_cooling_device_register(node, type, thermal, &gcip_thermal_ops);
+ if (IS_ERR(thermal->cdev))
+ return PTR_ERR(thermal->cdev);
+
+ ret = device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
+ if (ret)
+ thermal_cooling_device_unregister(thermal->cdev);
+
+ return ret;
+}
+
+struct gcip_thermal *gcip_thermal_create(const struct gcip_thermal_args *args)
+{
+ struct gcip_thermal *thermal;
+ int ret;
+
+ if (!args->dev || !args->get_rate || !args->set_rate || !args->control)
+ return ERR_PTR(-EINVAL);
+
+ thermal = devm_kzalloc(args->dev, sizeof(*thermal), GFP_KERNEL);
+ if (!thermal)
+ return ERR_PTR(-ENOMEM);
+
+ thermal->dev = args->dev;
+ thermal->nb.notifier_call = gcip_thermal_notifier;
+ thermal->pm = args->pm;
+ thermal->enabled = true;
+ thermal->data = args->data;
+ thermal->get_rate = args->get_rate;
+ thermal->set_rate = args->set_rate;
+ thermal->control = args->control;
+
+ mutex_init(&thermal->lock);
+
+ ret = gcip_thermal_cooling_register(thermal, args->type, args->node_name);
+ if (ret) {
+ dev_err(args->dev, "Failed to initialize external thermal cooling\n");
+ devm_kfree(args->dev, thermal);
+ return ERR_PTR(ret);
+ }
+
+ thermal->dentry = debugfs_create_dir("cooling", args->dentry);
+ /* Don't let debugfs creation failure abort the init procedure. */
+ if (IS_ERR_OR_NULL(thermal->dentry))
+ dev_warn(args->dev, "Failed to create debugfs for thermal cooling");
+ else
+ debugfs_create_file("enable", 0660, thermal->dentry, thermal,
+ &fops_gcip_thermal_enable);
+
+ return thermal;
+}
+
+int gcip_thermal_suspend_device(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ mutex_lock(&thermal->lock);
+
+ /*
+ * Always sets as suspended even when the request cannot be handled for unknown reasons
+ * because we still want to prevent the client from using device.
+ */
+ thermal->device_suspended = true;
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, 0);
+ gcip_pm_put(thermal->pm);
+ }
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+int gcip_thermal_resume_device(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ mutex_lock(&thermal->lock);
+
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, state_map[thermal->state].rate);
+ gcip_pm_put(thermal->pm);
+ }
+
+ /*
+ * Unlike gcip_thermal_suspend_device(), only sets the device as resumed if the request is
+ * fulfilled.
+ */
+ if (!ret)
+ thermal->device_suspended = false;
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+bool gcip_thermal_is_device_suspended(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return false;
+
+ return thermal->device_suspended;
+}
+
+int gcip_thermal_restore_on_powering(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ gcip_pm_lockdep_assert_held(thermal->pm);
+ mutex_lock(&thermal->lock);
+
+ if (!thermal->enabled)
+ ret = thermal->control(thermal->data, thermal->enabled);
+ else if (thermal->device_suspended)
+ ret = thermal->set_rate(thermal->data, 0);
+ else if (thermal->state)
+ /* Skips state 0 since it's the default thermal state. */
+ ret = thermal->set_rate(thermal->data, state_map[thermal->state].rate);
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
diff --git a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
index b740bf9..a5441a9 100644
--- a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
+++ b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
@@ -8,8 +8,11 @@
#ifndef __GCIP_DOMAIN_POOL_H__
#define __GCIP_DOMAIN_POOL_H__
+#include <linux/device.h>
#include <linux/idr.h>
#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
struct gcip_domain_pool {
struct ida idp; /* ID allocator to keep track of used domains. */
@@ -20,6 +23,8 @@ struct gcip_domain_pool {
unsigned int size;
struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
struct device *dev; /* The device used for logging warnings/errors. */
+ struct list_head dynamic_domains; /* Tracks dynamically allocated domains for cleanup. */
+ struct mutex lock; /* Protects dynamic_domains. */
};
/*
diff --git a/gcip-kernel-driver/include/gcip/gcip-firmware.h b/gcip-kernel-driver/include/gcip/gcip-firmware.h
index 8cf4353..b48317b 100644
--- a/gcip-kernel-driver/include/gcip/gcip-firmware.h
+++ b/gcip-kernel-driver/include/gcip/gcip-firmware.h
@@ -44,9 +44,10 @@ enum gcip_fw_flavor {
GCIP_FW_FLAVOR_CUSTOM = 4,
};
-/* Type of firmware crash which will be sent by GCIP_RKCI_FIRMWARE_CRASH RKCI command. */
+/* Type of firmware crash. */
enum gcip_fw_crash_type {
- /* Assert happened. */
+ /* Type which will be sent by GCIP_RKCI_FIRMWARE_CRASH reverse KCI. */
+ /*Assert happened. */
GCIP_FW_CRASH_ASSERT_FAIL = 0,
/* Data abort exception. */
GCIP_FW_CRASH_DATA_ABORT = 1,
@@ -58,6 +59,9 @@ enum gcip_fw_crash_type {
GCIP_FW_CRASH_UNRECOVERABLE_FAULT = 4,
/* Used in debug dump. */
GCIP_FW_CRASH_DUMMY_CRASH_TYPE = 0xFF,
+
+ /* HW watchdog timeout. */
+ GCIP_FW_CRASH_HW_WDG_TIMEOUT = 0x100,
};
/* Firmware info filled out via KCI FIRMWARE_INFO command. */
diff --git a/gcip-kernel-driver/include/gcip/gcip-thermal.h b/gcip-kernel-driver/include/gcip/gcip-thermal.h
new file mode 100644
index 0000000..f742705
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-thermal.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thermal management support for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GCIP_THERMAL_H__
+#define __GCIP_THERMAL_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/thermal.h>
+
+#define GCIP_THERMAL_TABLE_SIZE_NAME "gcip-dvfs-table-size"
+#define GCIP_THERMAL_TABLE_NAME "gcip-dvfs-table"
+#define GCIP_THERMAL_MAX_NUM_STATES 10
+
+enum gcip_thermal_voter {
+ GCIP_THERMAL_COOLING_DEVICE,
+ GCIP_THERMAL_SYSFS,
+ GCIP_THERMAL_NOTIFIER_BLOCK,
+
+ /* Keeps as the last entry for the total number of voters. */
+ GCIP_THERMAL_MAX_NUM_VOTERS,
+};
+
+struct gcip_thermal {
+ struct device *dev;
+ struct thermal_cooling_device *cdev;
+ struct notifier_block nb;
+ struct dentry *dentry;
+ struct gcip_pm *pm;
+
+ /*
+ * Lock to protect the struct members listed below.
+ *
+ * Note that since the request of thermal state adjusting might happen during power state
+ * transitions (i.e., another thread calling gcip_thermal_restore() with pm lock held), one
+ * must either use the non-blocking gcip_pm_get_if_powered() or make sure there won't be any
+ * new power transition after holding this thermal lock to prevent deadlock.
+ */
+ struct mutex lock;
+ unsigned long num_states;
+ unsigned long state;
+ unsigned long vote[GCIP_THERMAL_MAX_NUM_VOTERS];
+ bool device_suspended;
+ bool enabled;
+
+ /* Private data. See struct gcip_thermal_args.*/
+ void *data;
+
+ /* Callbacks. See struct gcip_thermal_args. */
+ int (*get_rate)(void *data, unsigned long *rate);
+ int (*set_rate)(void *data, unsigned long rate);
+ int (*control)(void *data, bool enable);
+};
+
+/* Arguments for devm_gcip_thermal_create. */
+struct gcip_thermal_args {
+ /* Device struct of GCIP device. */
+ struct device *dev;
+ /* GCIP power management. */
+ struct gcip_pm *pm;
+ /* Top-level debugfs directory for the device. */
+ struct dentry *dentry;
+ /* Name of the thermal cooling-device node in device tree. */
+ const char *node_name;
+ /* Thermal cooling device type for thermal_of_cooling_device_register() . */
+ const char *type;
+ /* Private data for callbacks listed below. */
+ void *data;
+ /*
+ * Callbacks listed below are called only if the device is powered and with the guarantee
+ * that there won't be any new power transition during the call (i.e., after
+ * gcip_pm_get_if_powered() succeeds or during the power up triggered by gcip_pm_get())
+ * to prevent deadlock since they are called with thermal lock held. See the note about
+ * thermal lock in struct gcip_thermal.
+ */
+ /* Callback to get the device clock rate. */
+ int (*get_rate)(void *data, unsigned long *rate);
+ /*
+ * Callback to set the device clock rate.
+ * Might be called with pm lock held in gcip_thermal_restore_on_powering().
+ */
+ int (*set_rate)(void *data, unsigned long rate);
+ /*
+ * Callback to enable/disable the thermal control.
+ * Might be called with pm lock held in gcip_thermal_restore_on_powering().
+ */
+ int (*control)(void *data, bool enable);
+};
+
+/* Gets the notifier_block struct for thermal throttling requests. */
+struct notifier_block *gcip_thermal_get_notifier_block(struct gcip_thermal *thermal);
+/* Allocates and initializes GCIP thermal struct. */
+struct gcip_thermal *gcip_thermal_create(const struct gcip_thermal_args *args);
+/* Destroys and frees GCIP thermal struct. */
+void gcip_thermal_destroy(struct gcip_thermal *thermal);
+/* Suspends the device due to thermal request. */
+int gcip_thermal_suspend_device(struct gcip_thermal *thermal);
+/* Resumes the device and restores previous thermal state. */
+int gcip_thermal_resume_device(struct gcip_thermal *thermal);
+/*
+ * Checks whether the device is suspended by thermal.
+ * Note that it's checked without thermal lock and state might change subsequently.
+ */
+bool gcip_thermal_is_device_suspended(struct gcip_thermal *thermal);
+/*
+ * Restores the previous thermal state.
+ *
+ * This function is designed to restore the thermal state during power management calls and thus it
+ * assumes the caller holds the pm lock.
+ */
+int gcip_thermal_restore_on_powering(struct gcip_thermal *thermal);
+
+#endif /* __GCIP_THERMAL_H__ */
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 2a3515f..9a54a6d 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -1836,6 +1836,8 @@ static int gxp_set_reg_resources(struct platform_device *pdev, struct gxp_dev *g
gxp->cmu.paddr = r->start;
gxp->cmu.size = resource_size(r);
gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ dev_warn(dev, "Failed to map CMU registers\n");
}
/*
* TODO (b/224685748): Remove this block after CMU CSR is supported
diff --git a/gxp-firmware.c b/gxp-firmware.c
index b0453d5..804ea1a 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -238,6 +238,9 @@ gxp_firmware_authenticate(struct gxp_dev *gxp,
return 0;
}
+ if (!gxp_is_direct_mode(gxp))
+ return 0;
+
for (core = 0; core < GXP_NUM_CORES; core++) {
data = firmwares[core]->data;
size = firmwares[core]->size;
@@ -302,9 +305,6 @@ error:
return ret;
}
-/* Forward declaration for usage inside gxp_firmware_load(..). */
-static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
-
static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core,
uint phys_core, bool verbose)
{
@@ -383,40 +383,6 @@ static void reset_core_config_region(struct gxp_dev *gxp,
}
}
-static int gxp_firmware_load(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint core)
-{
- struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- int ret;
-
- if (!mgr->firmwares[core])
- return -ENODEV;
- if (mgr->loaded[core])
- return 0;
-
- /* Load firmware to System RAM */
- ret = elf_load_segments(gxp,
- mgr->firmwares[core]->data + FW_HEADER_SIZE,
- mgr->firmwares[core]->size - FW_HEADER_SIZE,
- &gxp->fwbufs[core]);
- if (ret) {
- dev_err(gxp->dev, "Unable to load elf file\n");
- goto out_firmware_unload;
- }
-
- /* TODO(b/188970444): Cleanup logging of addresses */
- dev_notice(gxp->dev,
- "ELF loaded at virtual: %pK and physical: %#llx\n",
- gxp->fwbufs[core].vaddr, gxp->fwbufs[core].paddr);
- mgr->loaded[core] = true;
-
- return 0;
-
-out_firmware_unload:
- gxp_firmware_unload(gxp, core);
- return ret;
-}
-
static int gxp_firmware_handshake(struct gxp_dev *gxp,
struct gxp_virtual_device *vd, uint core,
uint phys_core)
@@ -515,9 +481,40 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp,
return 0;
}
-static void gxp_firmware_unload(struct gxp_dev *gxp, uint core)
+static void gxp_firmware_load(struct gxp_dev *gxp,
+ const struct firmware *firmwares[GXP_NUM_CORES])
+{
+ uint core;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ /* Load firmware to System RAM */
+ memcpy_toio(gxp->fwbufs[core].vaddr,
+ firmwares[core]->data + FW_HEADER_SIZE,
+ firmwares[core]->size - FW_HEADER_SIZE);
+ }
+}
+
+static int gxp_firmware_rearrange_elf(struct gxp_dev *gxp)
{
- /* NO-OP for now. */
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ int ret = 0;
+ uint core;
+
+ lockdep_assert_held(&mgr->dsp_firmware_lock);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ /* Re-arrange ELF firmware in System RAM */
+ ret = elf_load_segments(
+ gxp, mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ mgr->firmwares[core]->size - FW_HEADER_SIZE,
+ &gxp->fwbufs[core]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to parse ELF firmware on core %u\n",
+ core);
+ return ret;
+ }
+ }
+ return ret;
}
/* Helper function to parse name written to sysfs "load_dsp_firmware" node */
@@ -601,6 +598,7 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
goto err_request_firmware;
}
+ gxp_firmware_load(gxp, firmwares);
ret = gxp_firmware_authenticate(gxp, firmwares);
if (ret)
goto err_authenticate_firmware;
@@ -609,9 +607,12 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
if (mgr->firmwares[core])
release_firmware(mgr->firmwares[core]);
mgr->firmwares[core] = firmwares[core];
- mgr->loaded[core] = false;
}
+ ret = gxp_firmware_rearrange_elf(gxp);
+ if (ret)
+ goto err_rearrange_elf;
+
kfree(mgr->firmware_name);
mgr->firmware_name = name_buf;
gxp_firmware_has_requested(gxp, mgr);
@@ -620,6 +621,9 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
up_read(&gxp->vd_semaphore);
return count;
+err_rearrange_elf:
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ mgr->firmwares[core] = NULL;
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++)
release_firmware(firmwares[core]);
@@ -781,16 +785,22 @@ int gxp_firmware_request_if_needed(struct gxp_dev *gxp)
if (ret)
goto out;
+ gxp_firmware_load(gxp, mgr->firmwares);
ret = gxp_firmware_authenticate(gxp, mgr->firmwares);
if (ret)
goto err_authenticate_firmware;
+ ret = gxp_firmware_rearrange_elf(gxp);
+ if (ret)
+ goto err_rearrange_elf;
+
gxp_firmware_has_requested(gxp, mgr);
out:
mutex_unlock(&mgr->dsp_firmware_lock);
return ret;
+err_rearrange_elf:
err_authenticate_firmware:
for (core = 0; core < GXP_NUM_CORES; core++) {
release_firmware(mgr->firmwares[core]);
@@ -837,12 +847,6 @@ static int gxp_firmware_setup(struct gxp_dev *gxp,
return -EBUSY;
}
- ret = gxp_firmware_load(gxp, vd, core);
- if (ret) {
- dev_err(gxp->dev, "Failed to load firmware on core %u\n",
- phys_core);
- return ret;
- }
/* Configure bus performance monitors */
gxp_bpm_configure(gxp, phys_core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
gxp_bpm_configure(gxp, phys_core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
@@ -855,7 +859,6 @@ static int gxp_firmware_setup(struct gxp_dev *gxp,
/*verbose=*/true);
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
- gxp_firmware_unload(gxp, core);
return ret;
}
enable_core_interrupts(gxp, phys_core);
@@ -933,7 +936,6 @@ static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
err_firmware_off:
if (gxp_core_boot)
gxp_pm_core_off(gxp, phys_core);
- gxp_firmware_unload(gxp, core);
return ret;
}
@@ -972,8 +974,6 @@ static void gxp_firmware_stop_core(struct gxp_dev *gxp,
gxp_pm_core_off(gxp, phys_core);
}
}
-
- gxp_firmware_unload(gxp, select_core(vd, virt_core, phys_core));
}
int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
@@ -1007,14 +1007,11 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
*/
virt_core = 0;
for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
- uint core = select_core(vd, virt_core, phys_core);
-
if (!(core_list & BIT(phys_core)))
continue;
if (!(failed_cores & BIT(phys_core))) {
if (gxp_core_boot)
gxp_pm_core_off(gxp, phys_core);
- gxp_firmware_unload(gxp, core);
}
virt_core++;
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index fdaff99..9a02570 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -58,8 +58,6 @@ struct gxp_firmware_manager {
bool is_firmware_requested;
/* Protects `firmwares` and `firmware_name` */
struct mutex dsp_firmware_lock;
- /* FW is readonly, we only need to load it once per image. */
- bool loaded[GXP_NUM_CORES];
/* Firmware status bitmap. Accessors must hold `vd_semaphore`. */
u32 firmware_running;
/*
diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c
index 6e1031a..898ffd6 100644
--- a/gxp-mcu-firmware.c
+++ b/gxp-mcu-firmware.c
@@ -423,20 +423,6 @@ static int image_config_map(void *data, dma_addr_t daddr, phys_addr_t paddr,
return -EINVAL;
}
- /* TODO(b/268150335): remove this block once MCU FW changes land */
- {
- int i;
-
- for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) {
- if (daddr == gxp->mbx[i].daddr) {
- dev_warn(
- gxp->dev,
- "Skip mapping in MCU image config: %pad",
- &daddr);
- return 0;
- }
- }
- }
return gxp_iommu_map(gxp, gxp_iommu_get_domain_for_dev(gxp), daddr,
paddr, size, IOMMU_READ | IOMMU_WRITE);
}
@@ -446,15 +432,6 @@ static void image_config_unmap(void *data, dma_addr_t daddr, size_t size,
{
struct gxp_dev *gxp = data;
- /* TODO(b/268150335): remove this block once MCU FW changes land */
- {
- int i;
-
- for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) {
- if (daddr == gxp->mbx[i].daddr)
- return;
- }
- }
gxp_iommu_unmap(gxp, gxp_iommu_get_domain_for_dev(gxp), daddr, size);
}
@@ -530,7 +507,8 @@ void gxp_mcu_firmware_crash_handler(struct gxp_dev *gxp,
dev_err(gxp->dev, "MCU firmware is crashed, crash_type=%d", crash_type);
- if (crash_type != GCIP_FW_CRASH_UNRECOVERABLE_FAULT)
+ if (crash_type != GCIP_FW_CRASH_UNRECOVERABLE_FAULT &&
+ crash_type != GCIP_FW_CRASH_HW_WDG_TIMEOUT)
return;
dev_err(gxp->dev, "Unrecoverable MCU firmware fault, handle it");
diff --git a/gxp-mcu-firmware.h b/gxp-mcu-firmware.h
index 12b0995..1026988 100644
--- a/gxp-mcu-firmware.h
+++ b/gxp-mcu-firmware.h
@@ -63,10 +63,11 @@ struct gxp_mcu_firmware *gxp_mcu_firmware_of(struct gxp_dev *gxp);
/*
* Handles the MCU firmware crash. It will handle the crash only when the @crash_type is
- * GCIP_FW_CRASH_UNRECOVERABLE_FAULT. Otherwise, it will ignore that crash.
+ * GCIP_FW_CRASH_UNRECOVERABLE_FAULT or GCIP_FW_CRASH_HW_WDG_TIMEOUT. Otherwise, it will ignore
+ * that crash.
*
- * This function will be called from the `gxp-kci.c` when GCIP_RKCI_FIRMWARE_CRASH RKCI is arrived
- * from the MCU firmware side.
+ * This function will be called from the `gxp-kci.c` when GCIP_RKCI_FIRMWARE_CRASH RKCI is received
+ * from the MCU firmware side or from the HW watchdog IRQ handler.
*/
void gxp_mcu_firmware_crash_handler(struct gxp_dev *gxp,
enum gcip_fw_crash_type crash_type);
diff --git a/gxp-mcu-fs.c b/gxp-mcu-fs.c
index 0bca897..f3a59ca 100644
--- a/gxp-mcu-fs.c
+++ b/gxp-mcu-fs.c
@@ -117,150 +117,6 @@ out:
return ret;
}
-static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl *ibuf)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mcu *mcu = gxp_mcu_of(gxp);
- struct gxp_uci_command cmd;
- int ret;
-
- if (ibuf->virtual_core_id >= GXP_NUM_CORES)
- return -EINVAL;
- down_read(&client->semaphore);
-
- if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Caller must hold BLOCK wakelock */
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "GXP_MAILBOX_COMMAND requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- /* Use at least one core for the command */
- if (ibuf->num_cores == 0)
- ibuf->num_cores = 1;
-
- /* Pack the command structure */
- cmd.core_command_params.address = ibuf->device_address;
- cmd.core_command_params.size = ibuf->size;
- cmd.core_command_params.num_cores = ibuf->num_cores;
- /* Plus 1 to align with power states in MCU firmware. */
- cmd.core_command_params.dsp_operating_point = ibuf->gxp_power_state + 1;
- cmd.core_command_params.memory_operating_point =
- ibuf->memory_power_state;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.type = CORE_COMMAND;
-
- /* TODO(b/248179414): Remove core assignment when MCU fw re-enable sticky core scheduler. */
- {
- int core;
-
- down_read(&gxp->vd_semaphore);
- core = gxp_vd_virt_core_to_phys_core(client->vd,
- ibuf->virtual_core_id);
- up_read(&gxp->vd_semaphore);
- if (core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- ibuf->virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
- cmd.core_id = core;
- }
-
- cmd.client_id = client->vd->client_id;
-
- /*
- * TODO(b/248196344): Use the only one permitted eventfd for the virtual device
- * when MCU fw re-enable sticky core scheduler.
- */
- ret = gxp_uci_send_command(
- &mcu->uci, client->vd, &cmd,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].wait_queue,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].dest_queue,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
- client->mb_eventfds[ibuf->virtual_core_id]);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to enqueue mailbox command (ret=%d)\n", ret);
- goto out;
- }
- ibuf->sequence_number = cmd.seq;
-
-out:
- up_read(&client->semaphore);
- return ret;
-}
-
-static int
-gxp_ioctl_uci_command_legacy(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl __user *argp)
-{
- struct gxp_mailbox_command_ioctl ibuf;
- int ret;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- ret = gxp_ioctl_uci_command_helper(client, &ibuf);
- if (ret)
- return ret;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-gxp_ioctl_uci_response_legacy(struct gxp_client *client,
- struct gxp_mailbox_response_ioctl __user *argp)
-{
- struct gxp_mailbox_response_ioctl ibuf;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_RESPONSE")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Caller must hold BLOCK wakelock */
- if (!client->has_block_wakelock) {
- dev_err(client->gxp->dev,
- "GXP_MAILBOX_RESPONSE requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- ret = gxp_uci_wait_async_response(
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID],
- &ibuf.sequence_number, &ibuf.error_code, NULL);
- if (ret)
- goto out;
-
- ibuf.cmd_retval = 0;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
static inline enum gcip_telemetry_type to_gcip_telemetry_type(u8 type)
{
if (type == GXP_TELEMETRY_TYPE_LOGGING)
@@ -307,10 +163,10 @@ long gxp_mcu_ioctl(struct file *file, uint cmd, ulong arg)
return -ENOTTY;
switch (cmd) {
case GXP_MAILBOX_COMMAND:
- ret = gxp_ioctl_uci_command_legacy(client, argp);
+ ret = -EOPNOTSUPP;
break;
case GXP_MAILBOX_RESPONSE:
- ret = gxp_ioctl_uci_response_legacy(client, argp);
+ ret = -EOPNOTSUPP;
break;
case GXP_REGISTER_MCU_TELEMETRY_EVENTFD:
ret = gxp_register_mcu_telemetry_eventfd(client, argp);
diff --git a/gxp-mcu-platform.c b/gxp-mcu-platform.c
index 3a7c7f4..5aca0b6 100644
--- a/gxp-mcu-platform.c
+++ b/gxp-mcu-platform.c
@@ -5,7 +5,9 @@
* Copyright (C) 2022 Google LLC
*/
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of_irq.h>
#include "gxp-config.h"
#include "gxp-internal.h"
@@ -74,20 +76,10 @@ static int allocate_vmbox(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
ret = gxp_kci_allocate_vmbox(kci, client_id, vd->num_cores,
vd->slice_index, vd->first_open);
if (ret) {
- if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
- dev_err(gxp->dev,
- "Failed to allocate VMBox for client %d, TPU client %d: %d",
- client_id, vd->tpu_client_id, ret);
- return ret;
- }
-
- /*
- * TODO(241057541): Remove this conditional branch after the firmware side
- * implements handling allocate_vmbox command.
- */
- dev_info(
- gxp->dev,
- "Allocating VMBox is not implemented from the firmware side");
+ dev_err(gxp->dev,
+ "Failed to allocate VMBox for client %d, TPU client %d: %d",
+ client_id, vd->tpu_client_id, ret);
+ return ret;
}
vd->client_id = client_id;
@@ -136,21 +128,11 @@ static int gxp_mcu_link_offload_vmbox(struct gxp_dev *gxp,
ret = gxp_kci_link_unlink_offload_vmbox(
kci, vd->client_id, offload_client_id, offload_chip_type, true);
if (ret) {
- if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) {
- dev_err(gxp->dev,
- "Failed to link offload VMBox for client %d, offload client %u, offload chip type %d: %d",
- vd->client_id, offload_client_id,
- offload_chip_type, ret);
- return ret;
- }
-
- /*
- * TODO(241057541): Remove this conditional branch after the firmware side
- * implements handling link_offload_vmbox command.
- */
- dev_info(
- gxp->dev,
- "Linking offload VMBox is not implemented from the firmware side");
+ dev_err(gxp->dev,
+ "Failed to link offload VMBox for client %d, offload client %u, offload chip type %d: %d",
+ vd->client_id, offload_client_id, offload_chip_type,
+ ret);
+ return ret;
}
return 0;
@@ -167,21 +149,11 @@ static void gxp_mcu_unlink_offload_vmbox(struct gxp_dev *gxp,
ret = gxp_kci_link_unlink_offload_vmbox(kci, vd->client_id,
offload_client_id,
offload_chip_type, false);
- if (ret) {
- /*
- * TODO(241057541): Remove this conditional branch after the firmware side
- * implements handling allocate_vmbox command.
- */
- if (ret == GCIP_KCI_ERROR_UNIMPLEMENTED)
- dev_info(
- gxp->dev,
- "Unlinking offload VMBox is not implemented from the firmware side");
- else
- dev_err(gxp->dev,
- "Failed to unlink offload VMBox for client %d, offload client %u, offload chip type %d: %d",
- vd->client_id, offload_client_id,
- offload_chip_type, ret);
- }
+ if (ret)
+ dev_err(gxp->dev,
+ "Failed to unlink offload VMBox for client %d, offload client %u, offload chip type %d: %d",
+ vd->client_id, offload_client_id, offload_chip_type,
+ ret);
}
static int gxp_mcu_platform_after_vd_block_ready(struct gxp_dev *gxp,
@@ -303,6 +275,56 @@ static void gxp_mcu_before_unmap_tpu_mbx_queue(struct gxp_dev *gxp,
#endif /* HAS_TPU_EXT */
+static irqreturn_t mcu_wdg_irq_handler(int irq, void *arg)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)arg;
+ u32 wdg_control_val;
+
+ /* Clear the interrupt and disable the WDG. */
+ wdg_control_val = gxp_read_32(gxp, GXP_REG_WDOG_CONTROL);
+ wdg_control_val |= (1 << GXP_WDG_INT_CLEAR_BIT);
+ wdg_control_val &= ~(1 << GXP_WDG_ENABLE_BIT);
+ gxp_write_32(gxp, GXP_REG_WDOG_CONTROL, wdg_control_val);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mcu_wdg_threaded_handler(int irq, void *arg)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)arg;
+
+ gxp_mcu_firmware_crash_handler(gxp, GCIP_FW_CRASH_HW_WDG_TIMEOUT);
+
+ return IRQ_HANDLED;
+}
+
+static int gxp_mcu_register_wdg_irq(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ unsigned int wdg_virq;
+ int ret;
+
+ wdg_virq = irq_of_parse_and_map(dev->of_node, GXP_WDG_DT_IRQ_INDEX);
+ if (!wdg_virq) {
+ dev_warn(dev,
+ "Unable to parse interrupt for HW WDG from the DT");
+ } else {
+ ret = devm_request_threaded_irq(dev, wdg_virq,
+ mcu_wdg_irq_handler,
+ mcu_wdg_threaded_handler,
+ /*flags=*/0, "aurora_mcu_wdg",
+ (void *)gxp);
+ if (ret) {
+ dev_err(dev,
+ "Unable to register MCU WDG IRQ; error=%d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
struct gxp_mcu *gxp_mcu_of(struct gxp_dev *gxp)
{
return &(to_mcu_dev(gxp)->mcu);
@@ -331,9 +353,15 @@ enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
int gxp_mcu_platform_after_probe(struct gxp_dev *gxp)
{
+ int ret;
+
if (gxp_is_direct_mode(gxp))
return 0;
+ ret = gxp_mcu_register_wdg_irq(gxp);
+ if (ret)
+ return ret;
+
gxp_usage_stats_init(gxp);
return gxp_mcu_init(gxp, gxp_mcu_of(gxp));
}
diff --git a/gxp-mcu.c b/gxp-mcu.c
index 0aab0a2..7d6d2c4 100644
--- a/gxp-mcu.c
+++ b/gxp-mcu.c
@@ -5,15 +5,12 @@
* Copyright (C) 2022 Google LLC
*/
-#include <linux/iommu.h>
#include <linux/sizes.h>
#include <gcip/gcip-mem-pool.h>
#include "gxp-config.h"
-#include "gxp-dma.h"
#include "gxp-internal.h"
-#include "gxp-mailbox.h"
#include "gxp-mcu-firmware.h"
#include "gxp-mcu.h"
#include "gxp-uci.h"
@@ -74,43 +71,6 @@ static int gxp_mcu_mem_pools_init(struct gxp_dev *gxp, struct gxp_mcu *mcu)
return 0;
}
-static void gxp_mcu_unmap_resources(struct gxp_mcu *mcu)
-{
- struct gxp_dev *gxp = mcu->gxp;
- struct gxp_iommu_domain *gdomain = gxp_iommu_get_domain_for_dev(gxp);
- int i;
-
- for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++)
- gxp_iommu_unmap(gxp, gdomain, gxp->mbx[i].daddr, gxp->mbx[i].size);
-}
-
-/* TODO(b/268150335): remove this function once MCU FW change lands */
-static int gxp_mcu_map_resources(struct gxp_dev *gxp, struct gxp_mcu *mcu)
-{
- struct gxp_iommu_domain *gdomain = gxp_iommu_get_domain_for_dev(gxp);
- int i, ret;
-
- for (i = GXP_NUM_CORES; i < GXP_NUM_MAILBOXES; i++) {
- gxp->mbx[i].daddr = GXP_MCU_NS_MAILBOX(i - GXP_NUM_CORES);
- ret = gxp_iommu_map(gxp, gdomain, gxp->mbx[i].daddr,
- gxp->mbx[i].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET,
- gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- }
-
- return ret;
-
-err:
- /*
- * Attempt to unmap all resources.
- * Any resource that hadn't been mapped yet will cause `iommu_unmap()`
- * to return immediately, so its safe to try to unmap everything.
- */
- gxp_mcu_unmap_resources(mcu);
- return ret;
-}
-
static void gxp_mcu_mem_pools_exit(struct gxp_mcu *mcu)
{
gcip_mem_pool_exit(&mcu->remap_secure_pool);
@@ -156,16 +116,13 @@ int gxp_mcu_init(struct gxp_dev *gxp, struct gxp_mcu *mcu)
ret = gxp_alloc_shared_buffer(gxp, mcu);
if (ret)
goto err_pools_exit;
- ret = gxp_mcu_map_resources(gxp, mcu);
- if (ret)
- goto err_free_shared_buffer;
/*
* MCU telemetry must be initialized before UCI and KCI to match the
* .log_buffer address in the firmware linker.ld.
*/
ret = gxp_mcu_telemetry_init(mcu);
if (ret)
- goto err_mcu_unmap_resources;
+ goto err_free_shared_buffer;
ret = gxp_uci_init(mcu);
if (ret)
goto err_telemetry_exit;
@@ -178,8 +135,6 @@ err_uci_exit:
gxp_uci_exit(&mcu->uci);
err_telemetry_exit:
gxp_mcu_telemetry_exit(mcu);
-err_mcu_unmap_resources:
- gxp_mcu_unmap_resources(mcu);
err_free_shared_buffer:
gxp_free_shared_buffer(mcu);
err_pools_exit:
@@ -194,7 +149,6 @@ void gxp_mcu_exit(struct gxp_mcu *mcu)
gxp_kci_exit(&mcu->kci);
gxp_uci_exit(&mcu->uci);
gxp_mcu_telemetry_exit(mcu);
- gxp_mcu_unmap_resources(mcu);
gxp_free_shared_buffer(mcu);
gxp_mcu_mem_pools_exit(mcu);
gxp_mcu_firmware_exit(&mcu->fw);