summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_internal.h')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_internal.h103
1 files changed, 96 insertions, 7 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
index 68ded7d..d7f19fb 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -224,7 +224,7 @@ void kbase_pm_reset_done(struct kbase_device *kbdev);
* power off in progress and kbase_pm_context_active() was called instead of
* kbase_csf_scheduler_pm_active().
*
- * Return: 0 on success, error code on error
+ * Return: 0 on success, or -ETIMEDOUT code on timeout error.
*/
int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
#else
@@ -247,12 +247,27 @@ int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
* must ensure that this is not the case by, for example, calling
* kbase_pm_wait_for_poweroff_work_complete()
*
- * Return: 0 on success, error code on error
+ * Return: 0 on success, or -ETIMEDOUT error code on timeout error.
*/
int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
#endif
/**
+ * kbase_pm_killable_wait_for_desired_state - Wait for the desired power state to be
+ * reached in a killable state.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function is same as kbase_pm_wait_for_desired_state(), expect that it would
+ * allow the SIGKILL signal to interrupt the wait.
+ * This function is supposed to be called from the code that is executed in ioctl or
+ * Userspace context, wherever it is safe to do so.
+ *
+ * Return: 0 on success, or -ETIMEDOUT code on timeout error or -ERESTARTSYS if the
+ * wait was interrupted.
+ */
+int kbase_pm_killable_wait_for_desired_state(struct kbase_device *kbdev);
+
+/**
* kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
@@ -269,6 +284,37 @@ int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
*/
int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
+#if MALI_USE_CSF
+/**
+ * kbase_pm_wait_for_cores_down_scale - Wait for the downscaling of shader cores
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function can be called to ensure that the downscaling of cores is
+ * effectively complete and it would be safe to lower the voltage.
+ * The function assumes that caller had exercised the MCU state machine for the
+ * downscale request through the kbase_pm_update_state() function.
+ *
+ * This function needs to be used by the caller to safely wait for the completion
+ * of downscale request, instead of kbase_pm_wait_for_desired_state().
+ * The downscale request would trigger a state change in MCU state machine
+ * and so when MCU reaches the stable ON state, it can be inferred that
+ * downscaling is complete. But it has been observed that the wake up of the
+ * waiting thread can get delayed by few milli seconds and by the time the
+ * thread wakes up the power down transition could have started (after the
+ * completion of downscale request).
+ * On the completion of power down transition another wake up signal would be
+ * sent, but again by the time thread wakes up the power up transition can begin.
+ * And the power up transition could then get blocked inside the platform specific
+ * callback_power_on() function due to the thread that called into Kbase (from the
+ * platform specific code) to perform the downscaling and then ended up waiting
+ * for the completion of downscale request.
+ *
+ * Return: 0 on success, error code on error or remaining jiffies on timeout.
+ */
+int kbase_pm_wait_for_cores_down_scale(struct kbase_device *kbdev);
+#endif
+
/**
* kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state
* machines after changing shader core
@@ -436,8 +482,26 @@ void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
* This function effectively just waits for the @gpu_poweroff_wait_work work
* item to complete, if it was enqueued. GPU may not have been powered down
* before this function returns.
+ *
+ * Return: 0 on success, error code on error
+ */
+int kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_killable_wait_for_poweroff_work_complete - Wait for the poweroff workqueue to
+ * complete in killable state.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function is same as kbase_pm_wait_for_poweroff_work_complete(), expect that
+ * it would allow the SIGKILL signal to interrupt the wait.
+ * This function is supposed to be called from the code that is executed in ioctl or
+ * Userspace context, wherever it is safe to do so.
+ *
+ * Return: 0 on success, or -ETIMEDOUT code on timeout error or -ERESTARTSYS if the
+ * wait was interrupted.
*/
-void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
+int kbase_pm_killable_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
/**
* kbase_pm_wait_for_gpu_power_down - Wait for the GPU power down to complete
@@ -800,7 +864,7 @@ bool kbase_pm_no_runnables_sched_suspendable(struct kbase_device *kbdev)
/**
* kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the
- * MCU core powered in accordance to the active
+ * MCU shader Core powered in accordance to the active
* power management policy
*
* @kbdev: Device pointer
@@ -826,6 +890,8 @@ static inline bool kbase_pm_mcu_is_in_desired_state(struct kbase_device *kbdev)
{
bool in_desired_state = true;
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
if (kbase_pm_is_mcu_desired(kbdev) && kbdev->pm.backend.mcu_state != KBASE_MCU_ON)
in_desired_state = false;
else if (!kbase_pm_is_mcu_desired(kbdev) &&
@@ -869,7 +935,7 @@ static inline void kbase_pm_lock(struct kbase_device *kbdev)
#if !MALI_USE_CSF
mutex_lock(&kbdev->js_data.runpool_mutex);
#endif /* !MALI_USE_CSF */
- mutex_lock(&kbdev->pm.lock);
+ rt_mutex_lock(&kbdev->pm.lock);
}
/**
@@ -879,7 +945,7 @@ static inline void kbase_pm_lock(struct kbase_device *kbdev)
*/
static inline void kbase_pm_unlock(struct kbase_device *kbdev)
{
- mutex_unlock(&kbdev->pm.lock);
+ rt_mutex_unlock(&kbdev->pm.lock);
#if !MALI_USE_CSF
mutex_unlock(&kbdev->js_data.runpool_mutex);
#endif /* !MALI_USE_CSF */
@@ -964,4 +1030,27 @@ static inline void kbase_pm_disable_db_mirror_interrupt(struct kbase_device *kbd
}
#endif
+/**
+ * kbase_pm_l2_allow_mmu_page_migration - L2 state allows MMU page migration or not
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Check whether the L2 state is in power transition phase or not. If it is, the MMU
+ * page migration should be deferred. The caller must hold hwaccess_lock, and, if MMU
+ * page migration is intended, immediately start the MMU migration action without
+ * dropping the lock. When page migration begins, a flag is set in kbdev that would
+ * prevent the L2 state machine traversing into power transition phases, until
+ * the MMU migration action ends.
+ *
+ * Return: true if MMU page migration is allowed
+ */
+static inline bool kbase_pm_l2_allow_mmu_page_migration(struct kbase_device *kbdev)
+{
+ struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ return (backend->l2_state != KBASE_L2_PEND_ON && backend->l2_state != KBASE_L2_PEND_OFF);
+}
+
#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */