summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Peng <robinpeng@google.com>2022-10-22 13:42:43 +0000
committerRobin Peng <robinpeng@google.com>2022-10-22 13:42:43 +0000
commit570942d2b2be572cedb9c24e54a4b130d660f8da (patch)
tree6c2ed6cf1ef11c351ffb8ece9b388ac1f5d3099a
parent70a30ff938243a3d71de2a4801ffc2df03a12948 (diff)
parentc99afef64f5492b8fa1a393471a7a7195f7952fa (diff)
downloadrio-570942d2b2be572cedb9c24e54a4b130d660f8da.tar.gz
Merge android13-gs-pixel-5.15 into android14-gs-pixel-5.15
Bug: 236259002 Signed-off-by: Robin Peng <robinpeng@google.com> Change-Id: I781d4a4a7f3935d2f67a0508f5071008ffe633e1
-rw-r--r--drivers/edgetpu/edgetpu-mobile-platform.c22
-rw-r--r--drivers/edgetpu/edgetpu-mobile-platform.h6
-rw-r--r--drivers/edgetpu/edgetpu-pm.c13
-rw-r--r--drivers/edgetpu/edgetpu-pm.h12
-rw-r--r--drivers/edgetpu/edgetpu-soc.h5
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.c12
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-kci.c7
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c109
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c2
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h2
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h78
-rw-r--r--drivers/edgetpu/include/linux/acpm_dvfs.h45
-rw-r--r--drivers/edgetpu/mobile-pm.c105
-rw-r--r--drivers/edgetpu/mobile-soc-gsx01.c178
-rw-r--r--drivers/edgetpu/mobile-soc-gsx01.h2
-rw-r--r--drivers/edgetpu/mobile-thermal.c2
-rw-r--r--drivers/edgetpu/rio-platform.c2
-rw-r--r--drivers/edgetpu/rio-pm.c17
18 files changed, 327 insertions, 292 deletions
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.c b/drivers/edgetpu/edgetpu-mobile-platform.c
index c56990a..f9fe1fa 100644
--- a/drivers/edgetpu/edgetpu-mobile-platform.c
+++ b/drivers/edgetpu/edgetpu-mobile-platform.c
@@ -221,6 +221,22 @@ void edgetpu_chip_remove_mmu(struct edgetpu_dev *etdev)
edgetpu_mmu_detach(etdev);
}
+static void edgetpu_platform_parse_pmu(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct device *dev = etdev->dev;
+ u32 reg;
+
+ if (of_find_property(dev->of_node, "pmu-status-base", NULL) &&
+ !of_property_read_u32_index(dev->of_node, "pmu-status-base", 0, &reg)) {
+ etmdev->pmu_status = devm_ioremap(dev, reg, 0x4);
+ if (!etmdev->pmu_status)
+ etdev_err(etdev, "Using ACPM for blk status query\n");
+ } else {
+ etdev_warn(etdev, "Failed to find PMU register base\n");
+ }
+}
+
static int edgetpu_platform_setup_irq(struct edgetpu_mobile_platform_dev *etmdev)
{
struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
@@ -349,6 +365,12 @@ static int edgetpu_mobile_platform_probe(struct platform_device *pdev,
goto out_cleanup_fw;
}
+ /*
+ * Parses PMU before edgetpu_device_add so edgetpu_chip_pm_create can know whether to set
+ * the is_block_down op.
+ */
+ edgetpu_platform_parse_pmu(etmdev);
+
ret = edgetpu_device_add(etdev, &regs, iface_params, ARRAY_SIZE(iface_params));
if (ret) {
dev_err(dev, "edgetpu setup failed: %d", ret);
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.h b/drivers/edgetpu/edgetpu-mobile-platform.h
index 73e37c6..55f465f 100644
--- a/drivers/edgetpu/edgetpu-mobile-platform.h
+++ b/drivers/edgetpu/edgetpu-mobile-platform.h
@@ -33,8 +33,8 @@ struct edgetpu_mobile_platform_pwr {
int (*lpm_up)(struct edgetpu_dev *etdev);
void (*lpm_down)(struct edgetpu_dev *etdev);
- /* Block shutdown callback, may be NULL */
- void (*block_down)(struct edgetpu_dev *etdev);
+ /* Block shutdown status callback, may be NULL */
+ bool (*is_block_down)(struct edgetpu_dev *etdev);
/* After firmware is started on power up */
void (*post_fw_start)(struct edgetpu_dev *etdev);
@@ -79,6 +79,8 @@ struct edgetpu_mobile_platform_dev {
int n_irq;
/* Array of IRQ numbers */
int *irq;
+ /* PMU status base address for block status, maybe NULL */
+ void __iomem *pmu_status;
/* callbacks for chip-dependent implementations */
diff --git a/drivers/edgetpu/edgetpu-pm.c b/drivers/edgetpu/edgetpu-pm.c
index b84febb..b6a4542 100644
--- a/drivers/edgetpu/edgetpu-pm.c
+++ b/drivers/edgetpu/edgetpu-pm.c
@@ -7,6 +7,7 @@
#include <linux/iopoll.h>
#include <linux/mutex.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -253,10 +254,9 @@ bool edgetpu_is_powered(struct edgetpu_dev *etdev)
return etpm->p->power_up_count;
}
-#if IS_ENABLED(CONFIG_PM_SLEEP)
-
-int edgetpu_pm_suspend(struct edgetpu_dev *etdev)
+static int __maybe_unused edgetpu_pm_suspend(struct device *dev)
{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
struct edgetpu_pm *etpm = etdev->pm;
struct edgetpu_list_device_client *lc;
@@ -282,8 +282,9 @@ int edgetpu_pm_suspend(struct edgetpu_dev *etdev)
return -EAGAIN;
}
-int edgetpu_pm_resume(struct edgetpu_dev *etdev)
+static int __maybe_unused edgetpu_pm_resume(struct device *dev)
{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
struct edgetpu_pm *etpm = etdev->pm;
if (etpm && etpm->p->power_up_count)
@@ -294,4 +295,6 @@ int edgetpu_pm_resume(struct edgetpu_dev *etdev)
return 0;
}
-#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
+const struct dev_pm_ops edgetpu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(edgetpu_pm_suspend, edgetpu_pm_resume)
+};
diff --git a/drivers/edgetpu/edgetpu-pm.h b/drivers/edgetpu/edgetpu-pm.h
index 4acdb45..1838c75 100644
--- a/drivers/edgetpu/edgetpu-pm.h
+++ b/drivers/edgetpu/edgetpu-pm.h
@@ -8,6 +8,8 @@
#ifndef __EDGETPU_PM_H__
#define __EDGETPU_PM_H__
+#include <linux/pm.h>
+
#include "edgetpu-internal.h"
struct edgetpu_pm_private;
@@ -29,6 +31,8 @@ struct edgetpu_pm {
struct edgetpu_pm_private *p;
};
+extern const struct dev_pm_ops edgetpu_pm_ops;
+
/*
* These mimic the pm_runtime_{get|put} functions to keep a reference count
* of requests in order to keep the device up and turn it off if the platform
@@ -92,12 +96,4 @@ void edgetpu_pm_shutdown(struct edgetpu_dev *etdev, bool force);
/* Check if device is powered on. power_up_count is not protected by a lock */
bool edgetpu_is_powered(struct edgetpu_dev *etdev);
-#if IS_ENABLED(CONFIG_PM_SLEEP)
-
-int edgetpu_pm_suspend(struct edgetpu_dev *etdev);
-
-int edgetpu_pm_resume(struct edgetpu_dev *etdev);
-
-#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
-
#endif /* __EDGETPU_PM_H__ */
diff --git a/drivers/edgetpu/edgetpu-soc.h b/drivers/edgetpu/edgetpu-soc.h
index bba617a..53d06d2 100644
--- a/drivers/edgetpu/edgetpu-soc.h
+++ b/drivers/edgetpu/edgetpu-soc.h
@@ -27,14 +27,11 @@ int edgetpu_soc_prepare_firmware(struct edgetpu_dev *etdev);
* @flags can be used by platform-specific code to pass additional flags to the SoC
* handler; for calls from generic code this value must be zero.
*/
-long edgetpu_soc_pm_get_rate(int flags);
+long edgetpu_soc_pm_get_rate(struct edgetpu_dev *etdev, int flags);
/* Power management set TPU clock rate */
int edgetpu_soc_pm_set_rate(unsigned long rate);
-/* Set initial TPU freq */
-int edgetpu_soc_pm_set_init_freq(unsigned long freq);
-
/* Set PM policy */
int edgetpu_soc_pm_set_policy(u64 val);
diff --git a/drivers/edgetpu/edgetpu-telemetry.c b/drivers/edgetpu/edgetpu-telemetry.c
index 9376fba..f172e23 100644
--- a/drivers/edgetpu/edgetpu-telemetry.c
+++ b/drivers/edgetpu/edgetpu-telemetry.c
@@ -64,15 +64,23 @@ int edgetpu_telemetry_init(struct edgetpu_dev *etdev,
ret = gcip_telemetry_init(etdev->dev, &etdev->telemetry[i].trace, "telemetry_trace",
trace_mem[i].vaddr, EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE,
gcip_telemetry_fw_trace);
- if (ret)
+ if (ret) {
+ gcip_telemetry_exit(&etdev->telemetry[i].log);
break;
+ }
etdev->telemetry[i].trace_mem = trace_mem[i];
#endif
}
if (ret)
- edgetpu_telemetry_exit(etdev);
+ while (i--) {
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ gcip_telemetry_exit(&etdev->telemetry[i].trace);
+#endif
+ gcip_telemetry_exit(&etdev->telemetry[i].log);
+
+ }
return ret;
}
diff --git a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-kci.c
index 215c533..e11d2a1 100644
--- a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-kci.c
+++ b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -245,8 +245,9 @@ static bool gcip_kci_before_handle_resp(struct gcip_mailbox *mailbox, const void
int ret = gcip_reverse_kci_add_resp(kci, elem);
if (ret)
- dev_warn(kci->dev, "Failed to handle reverse KCI code %u (%d)\n",
- elem->code, ret);
+ dev_warn_ratelimited(kci->dev,
+ "Failed to handle reverse KCI code %u (%d)\n",
+ elem->code, ret);
return false;
}
@@ -448,7 +449,7 @@ static inline void gcip_kci_set_data(struct gcip_kci *kci, void *data)
kci->data = data;
}
-int gcip_kci_init(struct gcip_kci *kci, struct gcip_kci_args *args)
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args)
{
int ret;
struct gcip_mailbox_args mailbox_args;
diff --git a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index 305407d..fc89d4d 100644
--- a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -43,7 +43,7 @@
struct gcip_mailbox_wait_list_elem {
struct list_head list;
void *resp;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
};
/*
@@ -74,7 +74,7 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
}
/*
- * Adds @resp to @mailbox->wait_list. If @async_resp is not NULL, the @resp is asynchronous.
+ * Adds @resp to @mailbox->wait_list. If @awaiter is not NULL, the @resp is asynchronous.
* Otherwise, the @resp is synchronous.
*
* wait_list is a FIFO queue, with sequence number in increasing order.
@@ -82,7 +82,7 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
* Returns 0 on success, or -ENOMEM if failed on allocation.
*/
static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
- struct gcip_mailbox_async_response *async_resp)
+ struct gcip_mailbox_resp_awaiter *awaiter)
{
struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
unsigned long flags;
@@ -91,7 +91,7 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
return -ENOMEM;
entry->resp = resp;
- entry->async_resp = async_resp;
+ entry->awaiter = awaiter;
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
list_add_tail(&entry->list, &mailbox->wait_list);
RELEASE_WAIT_LIST_LOCK(true, flags);
@@ -103,10 +103,10 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
* Pushes @cmd to the command queue of mailbox and returns. @resp should be passed if the request
* is synchronous and want to get the response. If @resp is NULL even though the request is
* synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
- * ignore it. If the request is async, @async_resp should be passed too.
+ * ignore it. If the request is async, @awaiter should be passed too.
*/
static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
- struct gcip_mailbox_async_response *async_resp)
+ struct gcip_mailbox_resp_awaiter *awaiter)
{
int ret = 0;
u32 tail;
@@ -139,7 +139,7 @@ static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, voi
/* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
- ret = gcip_mailbox_push_wait_resp(mailbox, resp, async_resp);
+ ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter);
if (ret)
goto out;
}
@@ -195,7 +195,7 @@ out:
static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
{
struct gcip_mailbox_wait_list_elem *cur, *nxt;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
unsigned long flags;
u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
@@ -219,19 +219,18 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
if (cur_seq == seq) {
memcpy(cur->resp, resp, mailbox->resp_elem_size);
list_del(&cur->list);
- if (cur->async_resp) {
- async_resp = cur->async_resp;
- cancel_delayed_work(&async_resp->timeout_work);
+ if (cur->awaiter) {
+ awaiter = cur->awaiter;
+ cancel_delayed_work(&awaiter->timeout_work);
/*
- * If `handle_async_resp_arrived` callback is defined, @async_resp
+ * If `handle_awaiter_arrived` callback is defined, @awaiter
* will be released from the implementation side. Otherwise, it
* should be freed from here.
*/
- if (mailbox->ops->handle_async_resp_arrived)
- mailbox->ops->handle_async_resp_arrived(mailbox,
- async_resp);
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
else
- gcip_mailbox_release_async_resp(cur->async_resp);
+ gcip_mailbox_release_awaiter(cur->awaiter);
}
kfree(cur);
break;
@@ -360,9 +359,9 @@ static int gcip_mailbox_fetch_one_response(struct gcip_mailbox *mailbox, void *r
/* Handles the timed out asynchronous commands. */
static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
{
- struct gcip_mailbox_async_response *async_resp =
- container_of(work, struct gcip_mailbox_async_response, timeout_work.work);
- struct gcip_mailbox *mailbox = async_resp->mailbox;
+ struct gcip_mailbox_resp_awaiter *awaiter =
+ container_of(work, struct gcip_mailbox_resp_awaiter, timeout_work.work);
+ struct gcip_mailbox *mailbox = awaiter->mailbox;
/*
* This function will acquire the mailbox wait_list_lock. This means if
@@ -372,23 +371,23 @@ static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
* Once this function has the wait_list_lock, no future response
* processing will begin until this response has been removed.
*/
- gcip_mailbox_del_wait_resp(mailbox, async_resp->resp);
+ gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
/*
- * Handle timed out async_resp. If `handle_async_resp_timedout` is defined, @async_resp
+ * Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
* will be released from the implementation side. Otherwise, it should be freed from here.
*/
- if (mailbox->ops->handle_async_resp_timedout)
- mailbox->ops->handle_async_resp_timedout(mailbox, async_resp);
+ if (mailbox->ops->handle_awaiter_timedout)
+ mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
else
- gcip_mailbox_release_async_resp(async_resp);
+ gcip_mailbox_release_awaiter(awaiter);
}
/* Cleans up all the asynchronous responses which are not responded yet. */
-static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
+static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
{
struct gcip_mailbox_wait_list_elem *cur, *nxt;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
struct list_head resps_to_flush;
/* If mailbox->ops is NULL, the mailbox is already released. */
@@ -405,16 +404,16 @@ static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
ACQUIRE_WAIT_LIST_LOCK(false, NULL);
list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
list_del(&cur->list);
- if (cur->async_resp) {
+ if (cur->awaiter) {
list_add_tail(&cur->list, &resps_to_flush);
/*
* Clear the response's destination queue so that if the
* timeout worker is running, it won't try to process
* this response after `wait_list_lock` is released.
*/
- async_resp = cur->async_resp;
- if (mailbox->ops->flush_async_resp)
- mailbox->ops->flush_async_resp(mailbox, async_resp);
+ awaiter = cur->awaiter;
+ if (mailbox->ops->flush_awaiter)
+ mailbox->ops->flush_awaiter(mailbox, awaiter);
} else {
dev_warn(mailbox->dev,
"Unexpected synchronous command pending on mailbox release\n");
@@ -429,9 +428,9 @@ static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
*/
list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
list_del(&cur->list);
- async_resp = cur->async_resp;
- gcip_mailbox_cancel_async_resp_timeout(async_resp);
- gcip_mailbox_release_async_resp(async_resp);
+ awaiter = cur->awaiter;
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ gcip_mailbox_release_awaiter(awaiter);
kfree(cur);
}
}
@@ -507,7 +506,7 @@ err_unset_data:
void gcip_mailbox_release(struct gcip_mailbox *mailbox)
{
- gcip_mailbox_flush_async_resp(mailbox);
+ gcip_mailbox_flush_awaiter(mailbox);
gcip_mailbox_set_ops(mailbox, NULL);
gcip_mailbox_set_data(mailbox, NULL);
}
@@ -563,46 +562,46 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
return 0;
}
-struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
- void *resp, void *data)
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data)
{
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
int ret;
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
+ awaiter = kzalloc(sizeof(*awaiter), GFP_KERNEL);
+ if (!awaiter)
return ERR_PTR(-ENOMEM);
- async_resp->resp = resp;
- async_resp->mailbox = mailbox;
- async_resp->data = data;
- async_resp->release_data = mailbox->ops->release_async_resp_data;
+ awaiter->resp = resp;
+ awaiter->mailbox = mailbox;
+ awaiter->data = data;
+ awaiter->release_data = mailbox->ops->release_awaiter_data;
- INIT_DELAYED_WORK(&async_resp->timeout_work, gcip_mailbox_async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work, msecs_to_jiffies(mailbox->timeout));
+ INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
+ schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
- ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, async_resp->resp, async_resp);
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
if (ret)
goto err_free_resp;
- return async_resp;
+ return awaiter;
err_free_resp:
- gcip_mailbox_cancel_async_resp_timeout(async_resp);
- kfree(async_resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ kfree(awaiter);
return ERR_PTR(ret);
}
-void gcip_mailbox_cancel_async_resp_timeout(struct gcip_mailbox_async_response *async_resp)
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
{
- cancel_delayed_work_sync(&async_resp->timeout_work);
+ cancel_delayed_work_sync(&awaiter->timeout_work);
}
-void gcip_mailbox_release_async_resp(struct gcip_mailbox_async_response *async_resp)
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
{
- if (async_resp->release_data)
- async_resp->release_data(async_resp->data);
- kfree(async_resp);
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
}
void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
diff --git a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
index 3f4a8aa..f557c24 100644
--- a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
+++ b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
@@ -233,7 +233,7 @@ int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const ch
tel->name = name;
tel->dev = dev;
- tel->header = (struct gcip_telemetry_header *)vaddr;
+ tel->header = vaddr;
tel->header->head = 0;
tel->header->tail = 0;
tel->header->size = size;
diff --git a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h
index c4b78be..74e44ae 100644
--- a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h
+++ b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -281,7 +281,7 @@ struct gcip_kci_args {
};
/* Initializes a KCI object. */
-int gcip_kci_init(struct gcip_kci *kci, struct gcip_kci_args *args);
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);
/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
void gcip_kci_cancel_work_queues(struct gcip_kci *kci);
diff --git a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 02af65e..9ea7876 100644
--- a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -88,7 +88,7 @@ static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
struct gcip_mailbox;
/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
-struct gcip_mailbox_async_response {
+struct gcip_mailbox_resp_awaiter {
/* Response. */
void *resp;
/* The work which will be executed when the timeout occurs. */
@@ -102,7 +102,7 @@ struct gcip_mailbox_async_response {
void *data;
/*
* The callback for releasing the @data.
- * It will be set as @release_async_resp_data of struct gcip_mailbox_ops.
+ * It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
*/
void (*release_data)(void *data);
};
@@ -282,39 +282,39 @@ struct gcip_mailbox_ops {
bool (*before_handle_resp)(struct gcip_mailbox *mailbox, const void *resp);
/*
* Handles the asynchronous response which arrives well. How to handle it depends on the
- * chip implementation. However, @async_resp should be released by calling the
- * `gcip_mailbox_release_async_resp` function when the kernel driver doesn't need
- * @async_resp anymore. This is called with the `wait_list_lock` being held.
+ * chip implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called with the `wait_list_lock` being held.
* Context: normal and in_interrupt().
*/
- void (*handle_async_resp_arrived)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Handles the timed out asynchronous response. How to handle it depends on the chip
- * implementation. However, @async_resp should be released by calling the
- * `gcip_mailbox_release_async_resp` function when the kernel driver doesn't need
- * @async_resp anymore. This is called without holding any locks.
+ * implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called without holding any locks.
* Context: normal and in_interrupt().
*/
- void (*handle_async_resp_timedout)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*handle_awaiter_timedout)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Cleans up asynchronous response which is not arrived yet, but also not timed out.
- * The @async_resp should be marked as unprocessable to make it not to be processed by
- * the `handle_async_resp_arrived` or `handle_async_resp_timedout` callbacks in race
- * conditions. Don't have to release @async_resp of this function by calling the
- * `gcip_mailbox_release_async_resp` function. It will be released internally. This is
+ * The @awaiter should be marked as unprocessable to make it not to be processed by
+ * the `handle_awaiter_arrived` or `handle_awaiter_timedout` callbacks in race
+ * conditions. Don't have to release @awaiter of this function by calling the
+ * `gcip_mailbox_release_awaiter` function. It will be released internally. This is
* called with the `wait_list_lock` being held.
* Context: normal.
*/
- void (*flush_async_resp)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*flush_awaiter)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Releases the @data which was passed to the `gcip_mailbox_put_cmd` function. This is
* called without holding any locks.
* Context: normal and in_interrupt().
*/
- void (*release_async_resp_data)(void *data);
+ void (*release_awaiter_data)(void *data);
};
struct gcip_mailbox {
@@ -399,35 +399,35 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
/*
* Executes @cmd command asynchronously. This function returns an instance of
- * `struct gcip_mailbox_async_response` which handles the arrival and time-out of the response.
+ * `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
* The implementation side can cancel the asynchronous response by calling the
- * `gcip_mailbox_cancel_async_resp_timeout` function with it.
+ * `gcip_mailbox_cancel_awaiter_timeout` function with it.
*
- * Arrived asynchronous response will be handled by `handle_async_resp` callback and timed out
- * asynchronous response will be handled by `handle_async_resp_timedout` callback. Those callbacks
- * will pass the @async_resp as a parameter which is the same with the return of this function.
+ * Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
+ * asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
+ * will pass the @awaiter as a parameter which is the same with the return of this function.
* The response can be accessed from `resp` member of it. Also, the @data passed to this function
- * can be accessed from `data` member variable of it. The @async_resp must be released by calling
- * the `gcip_mailbox_release_async_resp` function when it is not needed anymore.
+ * can be accessed from `data` member variable of it. The @awaiter must be released by calling
+ * the `gcip_mailbox_release_awaiter` function when it is not needed anymore.
*
* If the mailbox is released before the response arrives, all the waiting asynchronous responses
- * will be flushed. In this case, the `flush_async_resp` callback will be called for that response
- * and @async_resp don't have to be released by the implementation side.
- * (i.e, the `gcip_mailbox_release_async_resp` function will be called internally.)
+ * will be flushed. In this case, the `flush_awaiter` callback will be called for that response
+ * and @awaiter don't have to be released by the implementation side.
+ * (i.e, the `gcip_mailbox_release_awaiter` function will be called internally.)
*
- * The caller defines the way of cleaning up the @data to the `release_async_resp_data` callback.
- * This callback will be called when the `gcip_mailbox_release_async_resp` function is called or
+ * The caller defines the way of cleaning up the @data to the `release_awaiter_data` callback.
+ * This callback will be called when the `gcip_mailbox_release_awaiter` function is called or
* the response is flushed.
*
* If this function fails to request the command, it will return the error pointer. In this case,
- * the caller should free @data explicitly. (i.e, the callback `release_async_resp_data` will not
+ * the caller should free @data explicitly. (i.e, the callback `release_awaiter_data` will not
* be.)
*
* Note: the asynchronous responses fetched from @resp_queue should be released by calling the
- * `gcip_mailbox_release_async_resp` function.
+ * `gcip_mailbox_release_awaiter` function.
*/
-struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
- void *resp, void *data);
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data);
/*
* Cancels the timeout work of the asynchronous response. In normally, the response arrives and
@@ -439,13 +439,13 @@ struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *ma
*
* Note: this function will cancel the timeout work synchronously.
*/
-void gcip_mailbox_cancel_async_resp_timeout(struct gcip_mailbox_async_response *async_resp);
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
/*
- * Releases @async_resp. Every fetched (arrived or timed out) asynchronous responses should be
- * released by calling this. It will call the `release_async_resp_data` callback internally.
+ * Releases @awaiter. Every fetched (arrived or timed out) asynchronous responses should be
+ * released by calling this. It will call the `release_awaiter_data` callback internally.
*/
-void gcip_mailbox_release_async_resp(struct gcip_mailbox_async_response *async_resp);
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Consume one response and handle it. This can be used for consuming one response quickly and then
diff --git a/drivers/edgetpu/include/linux/acpm_dvfs.h b/drivers/edgetpu/include/linux/acpm_dvfs.h
deleted file mode 100644
index 8b49a7c..0000000
--- a/drivers/edgetpu/include/linux/acpm_dvfs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Fallback header for systems without Exynos ACPM support.
- * TODO(b/240363978): Remove once ACPM is ready.
- *
- * Copyright (C) 2021 Google, Inc.
- */
-
-#ifndef __ACPM_DVFS_H__
-#define __ACPM_DVFS_H__
-
-static inline int exynos_acpm_set_init_freq(unsigned int dfs_id, unsigned long freq)
-{
- return 0;
-}
-
-static inline int exynos_acpm_set_policy(unsigned int id, unsigned long policy)
-{
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_EDGETPU_TEST)
-
-int exynos_acpm_set_rate(unsigned int id, unsigned long rate);
-unsigned long exynos_acpm_get_rate(unsigned int id, unsigned long dbg_val);
-
-#else /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
-
-extern unsigned long exynos_acpm_rate;
-
-static inline int exynos_acpm_set_rate(unsigned int id, unsigned long rate)
-{
- exynos_acpm_rate = rate;
- return 0;
-}
-
-static inline unsigned long exynos_acpm_get_rate(unsigned int id,
- unsigned long dbg_val)
-{
- return exynos_acpm_rate;
-}
-
-#endif /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
-
-#endif /* __ACPM_DVFS_H__ */
diff --git a/drivers/edgetpu/mobile-pm.c b/drivers/edgetpu/mobile-pm.c
index 04caa01..8ad78e8 100644
--- a/drivers/edgetpu/mobile-pm.c
+++ b/drivers/edgetpu/mobile-pm.c
@@ -24,10 +24,6 @@
#include "edgetpu-pm.c"
#include "edgetpu-soc.h"
-static int power_state = TPU_DEFAULT_POWER_STATE;
-
-module_param(power_state, int, 0660);
-
enum edgetpu_pwr_state edgetpu_active_states[EDGETPU_NUM_STATES] = {
TPU_ACTIVE_UUD,
TPU_ACTIVE_SUD,
@@ -37,47 +33,16 @@ enum edgetpu_pwr_state edgetpu_active_states[EDGETPU_NUM_STATES] = {
uint32_t *edgetpu_states_display = edgetpu_active_states;
-static int mobile_pwr_state_init(struct device *dev)
-{
- int ret;
- int curr_state;
-
- pm_runtime_enable(dev);
- curr_state = edgetpu_soc_pm_get_rate(0);
-
- if (curr_state > TPU_OFF) {
- ret = pm_runtime_get_sync(dev);
- if (ret) {
- pm_runtime_put_noidle(dev);
- dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
- return ret;
- }
- }
-
- ret = edgetpu_soc_pm_set_init_freq(curr_state);
- if (ret) {
- dev_err(dev, "error initializing tpu state: %d\n", ret);
- if (curr_state > TPU_OFF)
- pm_runtime_put_sync(dev);
- return ret;
- }
-
- return ret;
-}
-
static int mobile_pwr_state_set_locked(struct edgetpu_mobile_platform_dev *etmdev, u64 val)
{
int ret;
- int curr_state;
- struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
struct device *dev = etdev->dev;
- curr_state = edgetpu_soc_pm_get_rate(0);
-
- dev_dbg(dev, "Power state %d -> %llu\n", curr_state, val);
+ dev_dbg(dev, "Power state to %llu\n", val);
- if (curr_state == TPU_OFF && val > TPU_OFF) {
+ if (val > TPU_OFF && (!platform_pwr->is_block_down || platform_pwr->is_block_down(etdev))) {
ret = pm_runtime_get_sync(dev);
if (ret) {
pm_runtime_put_noidle(dev);
@@ -93,14 +58,13 @@ static int mobile_pwr_state_set_locked(struct edgetpu_mobile_platform_dev *etmde
return ret;
}
- if (curr_state != TPU_OFF && val == TPU_OFF) {
+ if (val == TPU_OFF &&
+ (!platform_pwr->is_block_down || !platform_pwr->is_block_down(etdev))) {
ret = pm_runtime_put_sync(dev);
if (ret) {
dev_err(dev, "%s: pm_runtime_put_sync returned %d\n", __func__, ret);
return ret;
}
- if (platform_pwr->block_down)
- platform_pwr->block_down(etdev);
}
return ret;
@@ -111,7 +75,7 @@ static int mobile_pwr_state_get_locked(void *data, u64 *val)
struct edgetpu_dev *etdev = (typeof(etdev))data;
struct device *dev = etdev->dev;
- *val = edgetpu_soc_pm_get_rate(0);
+ *val = edgetpu_soc_pm_get_rate(etdev, 0);
dev_dbg(dev, "current tpu state: %llu\n", *val);
return 0;
@@ -215,27 +179,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_state, mobile_pwr_state_get, mobile_pwr_st
DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_min_pwr_state, mobile_min_pwr_state_get, mobile_min_pwr_state_set,
"%llu\n");
-static int mobile_get_initial_pwr_state(struct device *dev)
-{
- switch (power_state) {
- case TPU_ACTIVE_UUD:
- case TPU_ACTIVE_SUD:
- case TPU_ACTIVE_UD:
- case TPU_ACTIVE_NOM:
- dev_info(dev, "Initial power state: %d\n", power_state);
- break;
- case TPU_OFF:
- dev_warn(dev, "Power state %d prevents control core booting", power_state);
- fallthrough;
- default:
- dev_warn(dev, "Power state %d is invalid\n", power_state);
- dev_warn(dev, "defaulting to active nominal\n");
- power_state = TPU_ACTIVE_NOM;
- break;
- }
- return power_state;
-}
-
static int mobile_power_down(struct edgetpu_pm *etpm);
static int mobile_power_up(struct edgetpu_pm *etpm)
@@ -243,12 +186,19 @@ static int mobile_power_up(struct edgetpu_pm *etpm)
struct edgetpu_dev *etdev = etpm->etdev;
struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- int ret = mobile_pwr_state_set(etpm->etdev, mobile_get_initial_pwr_state(etdev->dev));
+ int ret;
+
+ if (platform_pwr->is_block_down && !platform_pwr->is_block_down(etdev))
+ return -EAGAIN;
etdev_info(etpm->etdev, "Powering up\n");
- if (ret)
+ ret = pm_runtime_get_sync(etdev->dev);
+ if (ret) {
+ pm_runtime_put_noidle(etdev->dev);
+ etdev_err(etdev, "pm_runtime_get_sync returned %d\n", ret);
return ret;
+ }
if (platform_pwr->lpm_up)
platform_pwr->lpm_up(etdev);
@@ -318,7 +268,6 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
struct edgetpu_dev *etdev = etpm->etdev;
struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- u64 val;
int res = 0;
int min_state = platform_pwr->min_state;
@@ -329,11 +278,7 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
return 0;
}
- if (mobile_pwr_state_get(etdev, &val)) {
- etdev_warn(etdev, "Failed to read current power state\n");
- val = TPU_ACTIVE_NOM;
- }
- if (val == TPU_OFF) {
+ if (platform_pwr->is_block_down && platform_pwr->is_block_down(etdev)) {
etdev_dbg(etdev, "Device already off, skipping shutdown\n");
return 0;
}
@@ -364,7 +309,11 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
etdev_warn(etdev, "CPU reset request failed (%d)\n", res);
}
- mobile_pwr_state_set(etdev, TPU_OFF);
+ res = pm_runtime_put_sync(etdev->dev);
+ if (res) {
+ etdev_err(etdev, "pm_runtime_put_sync returned %d\n", res);
+ return res;
+ }
edgetpu_soc_pm_power_down(etdev);
@@ -387,16 +336,18 @@ static int mobile_pm_after_create(struct edgetpu_pm *etpm)
struct device *dev = etdev->dev;
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- ret = mobile_pwr_state_init(dev);
- if (ret)
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_put_noidle(dev);
+ dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
return ret;
+ }
mutex_init(&platform_pwr->policy_lock);
mutex_init(&platform_pwr->state_lock);
- ret = mobile_pwr_state_set(etdev, mobile_get_initial_pwr_state(dev));
- if (ret)
- return ret;
platform_pwr->debugfs_dir = debugfs_create_dir("power", edgetpu_fs_debugfs_dir());
if (IS_ERR_OR_NULL(platform_pwr->debugfs_dir)) {
dev_warn(etdev->dev, "Failed to create debug FS power");
diff --git a/drivers/edgetpu/mobile-soc-gsx01.c b/drivers/edgetpu/mobile-soc-gsx01.c
index d8a2535..f194b77 100644
--- a/drivers/edgetpu/mobile-soc-gsx01.c
+++ b/drivers/edgetpu/mobile-soc-gsx01.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/gsa/gsa_tpu.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
@@ -14,9 +15,6 @@
#include <soc/google/exynos_pm_qos.h>
#include <soc/google/gs_tmu.h>
-/* TODO(b/240363978): Use system ACPM header once it's ready. */
-#include "include/linux/acpm_dvfs.h"
-
#include "edgetpu-internal.h"
#include "edgetpu-firmware.h"
#include "edgetpu-kci.h"
@@ -26,8 +24,6 @@
#include "mobile-firmware.h"
#include "mobile-soc-gsx01.h"
-#define TPU_ACPM_DOMAIN 9
-
#define MAX_VOLTAGE_VAL 1250000
#define TPU_DEBUG_REQ (1 << 31)
@@ -60,10 +56,10 @@
#define SSMT_BYPASS (1 << 31)
-/* TODO(b/240363978): Remove once ACPM is ready. */
-#if !IS_ENABLED(CONFIG_EDGETPU_TEST)
-unsigned long exynos_acpm_rate = 0;
-#endif /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
+#define PLL_CON3_OFFSET 0x10c
+#define PLL_DIV_M_POS 16
+#define PLL_DIV_M_WIDTH 10
+#define TO_PLL_DIV_M(val) (((val) >> PLL_DIV_M_POS) & (BIT(PLL_DIV_M_WIDTH) - 1))
static int gsx01_parse_ssmt(struct edgetpu_mobile_platform_dev *etmdev)
{
@@ -104,6 +100,31 @@ static int gsx01_parse_ssmt(struct edgetpu_mobile_platform_dev *etmdev)
return 0;
}
+static int gsx01_parse_cmu(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct platform_device *pdev = to_platform_device(etdev->dev);
+ struct edgetpu_soc_data *soc_data = etdev->soc_data;
+ struct resource *res;
+ void __iomem *cmu_base;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
+ if (!res) {
+ etdev_warn(etdev, "Failed to find CMU register base");
+ return -EINVAL;
+ }
+ cmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cmu_base)) {
+ ret = PTR_ERR(cmu_base);
+ etdev_warn(etdev, "Failed to map CMU register base: %d", ret);
+ return ret;
+ }
+ soc_data->cmu_base = cmu_base;
+
+ return 0;
+}
+
int edgetpu_soc_init(struct edgetpu_dev *etdev)
{
struct platform_device *pdev = to_platform_device(etdev->dev);
@@ -118,6 +139,11 @@ int edgetpu_soc_init(struct edgetpu_dev *etdev)
ret = gsx01_parse_ssmt(etmdev);
if (ret)
dev_warn(etdev->dev, "SSMT setup failed (%d). Context isolation not enforced", ret);
+
+ ret = gsx01_parse_cmu(etmdev);
+ if (ret)
+ dev_warn(etdev->dev, "CMU setup failed (%d). Can't query TPU core frequency.", ret);
+
return 0;
}
@@ -269,29 +295,77 @@ void edgetpu_soc_handle_reverse_kci(struct edgetpu_dev *etdev,
}
}
-long edgetpu_soc_pm_get_rate(int flags)
+static unsigned long edgetpu_pm_rate;
+
+long edgetpu_soc_pm_get_rate(struct edgetpu_dev *etdev, int flags)
{
- return exynos_acpm_get_rate(TPU_ACPM_DOMAIN, flags);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ void __iomem *cmu_base = etdev->soc_data->cmu_base;
+ long curr_state;
+ u32 pll_con3;
+
+ if (IS_ENABLED(CONFIG_EDGETPU_TEST))
+ return edgetpu_pm_rate;
+
+ if (!cmu_base)
+ return -EINVAL;
+
+ if (!platform_pwr->is_block_down)
+ etdev_warn(etdev,
+ "Querying the CMU PLL register when TPU_OFF might lead to crash.");
+ else if (platform_pwr->is_block_down(etdev))
+ return 0;
+
+ pll_con3 = readl(cmu_base + PLL_CON3_OFFSET);
+
+ /*
+ * Below values must match the CMU PLL (pll_con3_pll_tpu) values in the spec and firmware.
+ * See https://drive.google.com/file/d/16S9yxmGwkOltdO2w4dC8tpAt99chn-aq/view and
+ * power_manager.cc for more details.
+ */
+ switch (TO_PLL_DIV_M(pll_con3)) {
+ case 221:
+ curr_state = TPU_ACTIVE_UUD;
+ break;
+ case 153:
+ curr_state = TPU_ACTIVE_SUD;
+ break;
+ case 206:
+ curr_state = TPU_ACTIVE_UD;
+ break;
+ case 182:
+ curr_state = TPU_ACTIVE_NOM;
+ break;
+ default:
+ etdev_err(etdev, "Invalid DIV_M read from PLL: %lu\n", TO_PLL_DIV_M(pll_con3));
+ curr_state = -EINVAL;
+ }
+
+ etdev_dbg(etdev, "current tpu state: %ld\n", curr_state);
+
+ return curr_state;
}
int edgetpu_soc_pm_set_rate(unsigned long rate)
{
- return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, rate);
-}
+ if (IS_ENABLED(CONFIG_EDGETPU_TEST))
+ edgetpu_pm_rate = rate;
-int edgetpu_soc_pm_set_init_freq(unsigned long freq)
-{
- return exynos_acpm_set_init_freq(TPU_ACPM_DOMAIN, freq);
+ return -EOPNOTSUPP;
}
int edgetpu_soc_pm_set_policy(u64 val)
{
- return exynos_acpm_set_policy(TPU_ACPM_DOMAIN, val);
+ return 0;
}
static int edgetpu_core_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
+
return 0;
}
@@ -307,7 +381,10 @@ static int edgetpu_core_rate_set(void *data, u64 val)
static int edgetpu_ctl_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
+
return 0;
}
@@ -323,7 +400,10 @@ static int edgetpu_ctl_rate_set(void *data, u64 val)
static int edgetpu_axi_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
+
return 0;
}
@@ -339,23 +419,29 @@ static int edgetpu_axi_rate_set(void *data, u64 val)
static int edgetpu_apb_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
+
return 0;
}
static int edgetpu_uart_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
+
return 0;
}
static int edgetpu_vdd_int_m_set(void *data, u64 val)
{
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing INT_M voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing INT_M voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -367,18 +453,21 @@ static int edgetpu_vdd_int_m_set(void *data, u64 val)
static int edgetpu_vdd_int_m_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
+
return 0;
}
static int edgetpu_vdd_tpu_set(void *data, u64 val)
{
int ret;
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -391,18 +480,21 @@ static int edgetpu_vdd_tpu_set(void *data, u64 val)
static int edgetpu_vdd_tpu_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
+
return 0;
}
static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
{
int ret;
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -415,7 +507,10 @@ static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
static int edgetpu_vdd_tpu_m_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
+
return 0;
}
@@ -442,7 +537,6 @@ void edgetpu_soc_pm_power_down(struct edgetpu_dev *etdev)
int edgetpu_soc_pm_init(struct edgetpu_dev *etdev)
{
- struct device *dev = etdev->dev;
struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
@@ -454,14 +548,18 @@ int edgetpu_soc_pm_init(struct edgetpu_dev *etdev)
dev_warn(etdev->dev, "tpu_performance BTS scenario not found\n");
etdev->soc_data->scenario_count = 0;
- debugfs_create_file("vdd_tpu", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu);
- debugfs_create_file("vdd_tpu_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu_m);
- debugfs_create_file("vdd_int_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_int_m);
- debugfs_create_file("core_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_core_rate);
- debugfs_create_file("ctl_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_ctl_rate);
- debugfs_create_file("axi_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_axi_rate);
- debugfs_create_file("apb_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_apb_rate);
- debugfs_create_file("uart_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_uart_rate);
+ debugfs_create_file("vdd_tpu", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_vdd_tpu);
+ debugfs_create_file("vdd_tpu_m", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_vdd_tpu_m);
+ debugfs_create_file("vdd_int_m", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_vdd_int_m);
+ debugfs_create_file("core_rate", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_core_rate);
+ debugfs_create_file("ctl_rate", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_ctl_rate);
+ debugfs_create_file("axi_rate", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_axi_rate);
+ debugfs_create_file("apb_rate", 0440, platform_pwr->debugfs_dir, etdev, &fops_tpu_apb_rate);
+ debugfs_create_file("uart_rate", 0440, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_uart_rate);
return 0;
}
diff --git a/drivers/edgetpu/mobile-soc-gsx01.h b/drivers/edgetpu/mobile-soc-gsx01.h
index d07d1d4..9d32884 100644
--- a/drivers/edgetpu/mobile-soc-gsx01.h
+++ b/drivers/edgetpu/mobile-soc-gsx01.h
@@ -20,6 +20,8 @@ struct edgetpu_soc_data {
void __iomem **ssmt_base;
/* Number of SSMTs */
uint num_ssmts;
+ /* Virtual address of the CMU block for this chip. */
+ void __iomem *cmu_base;
/* INT/MIF requests for memory bandwidth */
struct exynos_pm_qos_request int_min;
struct exynos_pm_qos_request mif_min;
diff --git a/drivers/edgetpu/mobile-thermal.c b/drivers/edgetpu/mobile-thermal.c
index 457cf9f..c8fb0a2 100644
--- a/drivers/edgetpu/mobile-thermal.c
+++ b/drivers/edgetpu/mobile-thermal.c
@@ -179,7 +179,7 @@ static int edgetpu_get_requested_power(struct thermal_cooling_device *cdev,
unsigned long state_original;
struct edgetpu_thermal *cooling = cdev->devdata;
- state_original = edgetpu_soc_pm_get_rate(0);
+ state_original = edgetpu_soc_pm_get_rate(cooling->etdev, 0);
return edgetpu_state2power_internal(state_original, power, cooling);
}
diff --git a/drivers/edgetpu/rio-platform.c b/drivers/edgetpu/rio-platform.c
index 7c65d5d..ddc3968 100644
--- a/drivers/edgetpu/rio-platform.c
+++ b/drivers/edgetpu/rio-platform.c
@@ -15,6 +15,7 @@
#include "edgetpu-config.h"
#include "edgetpu-internal.h"
#include "edgetpu-mobile-platform.h"
+#include "edgetpu-pm.h"
#include "rio-platform.h"
#include "edgetpu-mobile-platform.c"
@@ -144,6 +145,7 @@ static struct platform_driver edgetpu_platform_driver = {
.driver = {
.name = "edgetpu_platform",
.of_match_table = edgetpu_of_match,
+ .pm = &edgetpu_pm_ops,
},
};
diff --git a/drivers/edgetpu/rio-pm.c b/drivers/edgetpu/rio-pm.c
index 29e5dbf..0b6aa45 100644
--- a/drivers/edgetpu/rio-pm.c
+++ b/drivers/edgetpu/rio-pm.c
@@ -16,8 +16,6 @@
#include "mobile-soc-gsx01.h"
#include "mobile-pm.h"
-#define TPU_DEFAULT_POWER_STATE TPU_ACTIVE_NOM
-
#include "mobile-pm.c"
#define SHUTDOWN_DELAY_US_MIN 20
@@ -143,22 +141,22 @@ static int rio_lpm_up(struct edgetpu_dev *etdev)
return 0;
}
-static void rio_block_down(struct edgetpu_dev *etdev)
+static bool rio_is_block_down(struct edgetpu_dev *etdev)
{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
int timeout_cnt = 0;
int curr_state;
do {
/* Delay 20us per retry till blk shutdown finished */
usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
- /* Only poll for BLK status instead of CLK rate */
- curr_state = edgetpu_soc_pm_get_rate(1);
+ curr_state = readl(etmdev->pmu_status);
if (!curr_state)
- break;
+ return true;
timeout_cnt++;
} while (timeout_cnt < SHUTDOWN_MAX_DELAY_COUNT);
- if (timeout_cnt == SHUTDOWN_MAX_DELAY_COUNT)
- etdev_warn(etdev, "blk_shutdown timeout\n");
+
+ return false;
}
static void rio_post_fw_start(struct edgetpu_dev *etdev)
@@ -176,7 +174,8 @@ int edgetpu_chip_pm_create(struct edgetpu_dev *etdev)
platform_pwr->lpm_up = rio_lpm_up;
platform_pwr->lpm_down = rio_lpm_down;
- platform_pwr->block_down = rio_block_down;
+ if (etmdev->pmu_status)
+ platform_pwr->is_block_down = rio_is_block_down;
platform_pwr->post_fw_start = rio_post_fw_start;
return edgetpu_mobile_pm_create(etdev);