diff options
author | Aurora zuma automerger <aurora-zuma-automerger@google.com> | 2022-10-27 11:05:51 +0000 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2022-10-31 22:34:29 -0700 |
commit | 205a8ff3884b375d5a25c2655eed06ba3d104cc9 (patch) | |
tree | 82ebe7bba619aa10c4146e23b4200f57d3bb710c | |
parent | 46ba9d8075b5ea2e9a480e69abc399f0802fc7ad (diff) | |
download | zuma-205a8ff3884b375d5a25c2655eed06ba3d104cc9.tar.gz |
gxp: [Copybara Auto Merge] Merge branch 'zuma' into 'android13-gs-pixel-5.15'
gxp: Handle Core TelemetryRead rcki
Bug: 249096610
gxp: Add GXP_HAS_LAP to config
Bug: 249227451
gxp: remove explicit values of LPM PSM enum
gxp: temporarily set slice_index to 0
Bug: 255706432
Bug: 242011394
gxp: do power votes in VD wakelock acquisition
Bug: 253990922, 253555787
GitOrigin-RevId: b0244e60187f1960a915129b6a5fbde9c87e10b8
Change-Id: If54605069117d87e2baa2d1adb23b35b9c5a4d37
-rw-r--r-- | callisto-platform.c | 6 | ||||
-rw-r--r-- | callisto/iova.h | 2 | ||||
-rw-r--r-- | callisto/lpm.h | 6 | ||||
-rw-r--r-- | gxp-client.c | 38 | ||||
-rw-r--r-- | gxp-client.h | 16 | ||||
-rw-r--r-- | gxp-common-platform.c | 10 | ||||
-rw-r--r-- | gxp-config.h | 4 | ||||
-rw-r--r-- | gxp-core-telemetry.c | 16 | ||||
-rw-r--r-- | gxp-core-telemetry.h | 9 | ||||
-rw-r--r-- | gxp-dma-iommu.c | 61 | ||||
-rw-r--r-- | gxp-kci.c | 19 | ||||
-rw-r--r-- | gxp-kci.h | 7 | ||||
-rw-r--r-- | gxp-vd.c | 3 | ||||
-rw-r--r-- | gxp.h | 11 |
14 files changed, 148 insertions, 60 deletions
diff --git a/callisto-platform.c b/callisto-platform.c index 65b40ca..5221f62 100644 --- a/callisto-platform.c +++ b/callisto-platform.c @@ -342,8 +342,10 @@ static int callisto_platform_after_vd_block_ready(struct gxp_dev *gxp, operation |= KCI_ALLOCATE_VMBOX_OP_LINK_OFFLOAD_VMBOX; pasid = gxp_iommu_aux_get_pasid(gxp, vd->domain); - ret = gxp_kci_allocate_vmbox(kci, pasid, vd->num_cores, vd->slice_index, - vd->tpu_client_id, operation); + /* TODO(b/255706432): Adopt vd->slice_index after the firmware supports this. */ + ret = gxp_kci_allocate_vmbox(kci, pasid, vd->num_cores, + /*slice_index=*/0, vd->tpu_client_id, + operation); if (ret) { if (ret != GCIP_KCI_ERROR_UNIMPLEMENTED) { dev_err(gxp->dev, diff --git a/callisto/iova.h b/callisto/iova.h index 29bf901..95f71e6 100644 --- a/callisto/iova.h +++ b/callisto/iova.h @@ -9,10 +9,8 @@ #define __CALLISTO_IOVA_H__ /* IOVAs from system firmware's view */ -#define GXP_IOVA_SYNC_BARRIERS (0x100000) #define GXP_IOVA_MAILBOX(_x_) (0x18390000 + (_x_) * 0x00020000) #define GXP_IOVA_EXT_TPU_MBX (0x1A050000) -#define GXP_IOVA_AURORA_TOP (0x25C00000) #define GXP_IOVA_FIRMWARE(_x_) (0xFA000000 + (_x_) * 0x00100000) #define GXP_IOVA_SHARED_BUFFER (0xFA3A8000) #define GXP_SHARED_BUFFER_SIZE (0x00010000) /* 64K, per core */ diff --git a/callisto/lpm.h b/callisto/lpm.h index 4f3189b..eb9e98d 100644 --- a/callisto/lpm.h +++ b/callisto/lpm.h @@ -9,9 +9,9 @@ #define __CALLISTO_LPM_H__ enum gxp_lpm_psm { - LPM_PSM_CORE0 = 0, - LPM_PSM_CORE1 = LPM_PSM_CORE0 + 1, - LPM_PSM_CORE2 = LPM_PSM_CORE0 + 2, + LPM_PSM_CORE0, + LPM_PSM_CORE1, + LPM_PSM_CORE2, LPM_PSM_MCU, LPM_PSM_TOP, LPM_NUM_PSMS, diff --git a/gxp-client.c b/gxp-client.c index 148435b..431e3ea 100644 --- a/gxp-client.c +++ b/gxp-client.c @@ -135,8 +135,7 @@ static int gxp_client_request_power_states(struct gxp_client *client, } int gxp_client_acquire_block_wakelock(struct gxp_client *client, - bool *acquired_wakelock, - struct gxp_power_states requested_states) + bool *acquired_wakelock) { struct gxp_dev *gxp = client->gxp; int ret; @@ -164,15 +163,8 @@ int gxp_client_acquire_block_wakelock(struct gxp_client *client, client->tgid = current->tgid; client->pid = current->pid; - ret = gxp_client_request_power_states(client, requested_states); - if (ret) - goto err_vd_block_unready; - return 0; -err_vd_block_unready: - if (client->vd && *acquired_wakelock) - gxp_vd_block_unready(client->vd); err_wakelock_release: if (*acquired_wakelock) { gxp_wakelock_release(gxp); @@ -195,15 +187,16 @@ void gxp_client_release_block_wakelock(struct gxp_client *client) if (client->has_vd_wakelock) gxp_client_release_vd_wakelock(client); - gxp_client_request_power_states(client, off_states); gxp_wakelock_release(gxp); client->has_block_wakelock = false; } -int gxp_client_acquire_vd_wakelock(struct gxp_client *client) +int gxp_client_acquire_vd_wakelock(struct gxp_client *client, + struct gxp_power_states requested_states) { struct gxp_dev *gxp = client->gxp; int ret = 0; + enum gxp_virtual_device_state orig_state; lockdep_assert_held(&client->semaphore); if (!client->has_block_wakelock) { @@ -220,6 +213,7 @@ int gxp_client_acquire_vd_wakelock(struct gxp_client *client) if (!client->has_vd_wakelock) { down_write(&gxp->vd_semaphore); + orig_state = client->vd->state; if (client->vd->state == GXP_VD_READY || client->vd->state == GXP_VD_OFF) ret = gxp_vd_run(client->vd); else @@ -227,9 +221,26 @@ int gxp_client_acquire_vd_wakelock(struct gxp_client *client) up_write(&gxp->vd_semaphore); } - if (!ret) - client->has_vd_wakelock = true; + if (ret) + goto out; + + ret = gxp_client_request_power_states(client, requested_states); + if (ret) + goto out_release_vd_wakelock; + client->has_vd_wakelock = true; + return 0; + +out_release_vd_wakelock: + if (!client->has_vd_wakelock) { + down_write(&gxp->vd_semaphore); + if (orig_state == GXP_VD_READY || orig_state == GXP_VD_OFF) + gxp_vd_stop(client->vd); + else + gxp_vd_suspend(client->vd); + up_write(&gxp->vd_semaphore); + } +out: return ret; } @@ -254,5 +265,6 @@ void gxp_client_release_vd_wakelock(struct gxp_client *client) gxp_vd_suspend(client->vd); up_write(&gxp->vd_semaphore); + gxp_client_request_power_states(client, off_states); client->has_vd_wakelock = false; } diff --git a/gxp-client.h b/gxp-client.h index c47de42..735d10a 100644 --- a/gxp-client.h +++ b/gxp-client.h @@ -80,12 +80,10 @@ void gxp_client_destroy(struct gxp_client *client); */ int gxp_client_allocate_virtual_device(struct gxp_client *client, uint core_count); /** - * gxp_client_acquire_block_wakelock() - Acquires a block wakelock and requests - * power votes. + * gxp_client_acquire_block_wakelock() - Acquires a block wakelock. * - * @client: The client to acquire wakelock and request power votes. + * @client: The client to acquire wakelock. * @acquired_wakelock: True if block wakelock has been acquired by this client. - * @requested_states: The requested power states. * * The caller must have locked client->semaphore. * @@ -94,8 +92,7 @@ int gxp_client_allocate_virtual_device(struct gxp_client *client, uint core_coun * * Otherwise - Errno returned by block wakelock acquisition */ int gxp_client_acquire_block_wakelock(struct gxp_client *client, - bool *acquired_wakelock, - struct gxp_power_states requested_states); + bool *acquired_wakelock); /** * gxp_client_release_block_wakelock() - Releases the holded block wakelock and * revokes the power votes. @@ -106,6 +103,10 @@ void gxp_client_release_block_wakelock(struct gxp_client *client); /** * gxp_client_acquire_vd_wakelock() - Acquires a VD wakelock for the current * virtual device to start the virtual device or resume it if it's suspended. + * Also the client can request the power votes tied with the acquired wakelock. + * + * @client: The client to acquire wakelock and request power votes. + * @requested_states: The requested power states. * * The caller must have locked client->semaphore. * @@ -114,7 +115,8 @@ void gxp_client_release_block_wakelock(struct gxp_client *client); * * -EINVAL - No holded block wakelock * * -ENODEV - VD state is unavailable */ -int gxp_client_acquire_vd_wakelock(struct gxp_client *client); +int gxp_client_acquire_vd_wakelock(struct gxp_client *client, + struct gxp_power_states requested_states); /** * gxp_client_release_vd_wakelock() - Releases the holded VD wakelock to suspend * the current virtual device. diff --git a/gxp-common-platform.c b/gxp-common-platform.c index e7d5a27..42c4195 100644 --- a/gxp-common-platform.c +++ b/gxp-common-platform.c @@ -1164,11 +1164,8 @@ static int gxp_acquire_wake_lock(struct gxp_client *client, /* Acquire a BLOCK wakelock if requested */ if (ibuf.components_to_wake & WAKELOCK_BLOCK) { - power_states.power = aur_state_array[ibuf.gxp_power_state]; - power_states.memory = aur_memory_state_array[ibuf.memory_power_state]; - power_states.low_clkmux = requested_low_clkmux; ret = gxp_client_acquire_block_wakelock( - client, &acquired_block_wakelock, power_states); + client, &acquired_block_wakelock); if (ret) { dev_err(gxp->dev, "Failed to acquire BLOCK wakelock for client (ret=%d)\n", @@ -1179,7 +1176,10 @@ static int gxp_acquire_wake_lock(struct gxp_client *client, /* Acquire a VIRTUAL_DEVICE wakelock if requested */ if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) { - ret = gxp_client_acquire_vd_wakelock(client); + power_states.power = aur_state_array[ibuf.gxp_power_state]; + power_states.memory = aur_memory_state_array[ibuf.memory_power_state]; + power_states.low_clkmux = requested_low_clkmux; + ret = gxp_client_acquire_vd_wakelock(client, power_states); if (ret) { dev_err(gxp->dev, "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n", diff --git a/gxp-config.h b/gxp-config.h index 2f50bd8..55b6d4b 100644 --- a/gxp-config.h +++ b/gxp-config.h @@ -34,6 +34,10 @@ #define GXP_USE_LEGACY_MAILBOX 0 #endif +#ifndef GXP_HAS_LAP +#define GXP_HAS_LAP 1 +#endif + /* LPM address space starts at lpm_version register */ #define GXP_LPM_BASE GXP_REG_LPM_VERSION #define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0 diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c index 0572648..48e333a 100644 --- a/gxp-core-telemetry.c +++ b/gxp-core-telemetry.c @@ -25,12 +25,8 @@ static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type) return device_status & GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED; } -static void telemetry_status_notification_work(struct work_struct *work) +void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core) { - struct gxp_core_telemetry_work *telem_work = - container_of(work, struct gxp_core_telemetry_work, work); - struct gxp_dev *gxp = telem_work->gxp; - uint core = telem_work->core; struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr; /* Wake any threads waiting on a core telemetry disable ACK */ @@ -50,6 +46,16 @@ static void telemetry_status_notification_work(struct work_struct *work) mutex_unlock(&mgr->lock); } +static void telemetry_status_notification_work(struct work_struct *work) +{ + struct gxp_core_telemetry_work *telem_work = + container_of(work, struct gxp_core_telemetry_work, work); + struct gxp_dev *gxp = telem_work->gxp; + uint core = telem_work->core; + + gxp_core_telemetry_status_notify(gxp, core); +} + int gxp_core_telemetry_init(struct gxp_dev *gxp) { struct gxp_core_telemetry_manager *mgr; diff --git a/gxp-core-telemetry.h b/gxp-core-telemetry.h index c512292..432be71 100644 --- a/gxp-core-telemetry.h +++ b/gxp-core-telemetry.h @@ -135,4 +135,13 @@ int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type); struct work_struct * gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core); +/** + * gxp_core_telemetry_status_notify() - Checks the telemetry status of the + * specified core and signals the eventfd. + * @gxp: The GXP device to obtain the handler for + * @core: The physical core number to obtain the handler + * + */ +void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core); + #endif /* __GXP_CORE_TELEMETRY_H__ */ diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c index 3ce768f..c87e032 100644 --- a/gxp-dma-iommu.c +++ b/gxp-dma-iommu.c @@ -125,6 +125,48 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token) return -EAGAIN; } +#if GXP_HAS_LAP + +/* No need to map CSRs when local access path exists. */ + +#define gxp_map_csrs(...) 0 +#define gxp_unmap_csrs(...) + +#else /* !GXP_HAS_LAP */ + +#define SYNC_BARRIERS_SIZE 0x100000 + +static int gxp_map_csrs(struct gxp_dev *gxp, struct iommu_domain *domain, + struct gxp_mapped_resource *regs) +{ + int ret = iommu_map(domain, GXP_IOVA_AURORA_TOP, gxp->regs.paddr, + gxp->regs.size, IOMMU_READ | IOMMU_WRITE); + if (ret) + return ret; + /* + * Firmware expects to access the sync barriers at a separate + * address, lower than the rest of the AURORA_TOP registers. + */ + ret = iommu_map(domain, GXP_IOVA_SYNC_BARRIERS, + gxp->regs.paddr + GXP_IOVA_SYNC_BARRIERS, + SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret) { + iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size); + return ret; + } + + return 0; +} + +static void gxp_unmap_csrs(struct gxp_dev *gxp, struct iommu_domain *domain, + struct gxp_mapped_resource *regs) +{ + iommu_unmap(domain, GXP_IOVA_SYNC_BARRIERS, SYNC_BARRIERS_SIZE); + iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size); +} + +#endif /* GXP_HAS_LAP */ + /* Maps the shared buffer region to @domain. */ static int gxp_map_core_shared_buffer(struct gxp_dev *gxp, struct iommu_domain *domain, @@ -244,8 +286,6 @@ void gxp_dma_exit(struct gxp_dev *gxp) "Failed to unregister SysMMU fault handler\n"); } -#define SYNC_BARRIERS_SIZE 0x100000 -#define SYNC_BARRIERS_TOP_OFFSET 0x100000 #define EXT_TPU_MBX_SIZE 0x2000 void gxp_dma_init_default_resources(struct gxp_dev *gxp) @@ -257,7 +297,6 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp) gxp->mbx[i].daddr = GXP_IOVA_MAILBOX(i); for (core = 0; core < GXP_NUM_CORES; core++) gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core); - gxp->regs.daddr = GXP_IOVA_AURORA_TOP; gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA; } @@ -289,19 +328,10 @@ int gxp_dma_map_core_resources(struct gxp_dev *gxp, uint i; struct iommu_domain *domain = gdomain->domain; - ret = iommu_map(domain, gxp->regs.daddr, gxp->regs.paddr, - gxp->regs.size, IOMMU_READ | IOMMU_WRITE); - if (ret) - goto err; - /* - * Firmware expects to access the sync barriers at a separate - * address, lower than the rest of the AURORA_TOP registers. - */ - ret = iommu_map(domain, GXP_IOVA_SYNC_BARRIERS, - gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET, - SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE); + ret = gxp_map_csrs(gxp, domain, &gxp->regs); if (ret) goto err; + for (i = 0; i < GXP_NUM_CORES; i++) { if (!(BIT(i) & core_list)) continue; @@ -385,8 +415,7 @@ void gxp_dma_unmap_core_resources(struct gxp_dev *gxp, continue; iommu_unmap(domain, gxp->mbx[i].daddr, gxp->mbx[i].size); } - iommu_unmap(domain, GXP_IOVA_SYNC_BARRIERS, SYNC_BARRIERS_SIZE); - iommu_unmap(domain, gxp->regs.daddr, gxp->regs.size); + gxp_unmap_csrs(gxp, domain, &gxp->regs); } static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size, @@ -13,6 +13,7 @@ #include <gcip/gcip-telemetry.h> #include "gxp-config.h" +#include "gxp-core-telemetry.h" #include "gxp-dma.h" #include "gxp-kci.h" #include "gxp-lpm.h" @@ -92,7 +93,23 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci, if (resp->code <= GCIP_RKCI_CHIP_CODE_LAST) { /* TODO(b/239638427): Handle reverse kci */ - dev_dbg(gxp->dev, "Reverse KCI received: %#x", resp->code); + switch (resp->code) { + case GXP_RKCI_CODE_CORE_TELEMETRY_READ: { + uint core; + uint core_list = (uint)(resp->status); + + for (core = 0; core < GXP_NUM_CORES; core++) { + if (BIT(core) & core_list) { + gxp_core_telemetry_status_notify(gxp, + core); + } + } + break; + } + default: + dev_dbg(gxp->dev, "Reverse KCI received: %#x", + resp->code); + } return; } @@ -46,6 +46,13 @@ */ #define KCI_ALLOCATE_VMBOX_OFFLOAD_TYPE_TPU 0 +/* + * Chip specific reverse KCI request codes. + */ +enum gxp_reverse_rkci_code { + GXP_RKCI_CODE_CORE_TELEMETRY_READ = GCIP_RKCI_CHIP_CODE_FIRST + 4, +}; + struct gxp_mcu; struct gxp_kci { @@ -248,8 +248,9 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, goto error_unassign_cores; } } + /* TODO(b/255706432): Adopt vd->slice_index after the firmware supports this. */ err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list, - vd->slice_index); + /*slice_index=*/0); if (err) goto error_destroy_fw_data; err = map_core_telemetry_buffers(gxp, vd, vd->core_list); @@ -764,14 +764,15 @@ struct gxp_acquire_wakelock_ioctl { * Acquire a wakelock and request minimum power states for the DSP subsystem * and the memory interface. * - * Upon a successful return, the specified components will be powered on and if - * they were not already running at the specified or higher power states, - * requests will have been sent to transition both the DSP subsystem and - * memory interface to the specified states. + * Upon a successful return, the specified components will be powered on. + * If the specified components contain VIRTUAL_DEVICE, and they were not + * already running at the specified or higher power states, requests will + * have been sent to transition both the DSP subsystem and memory interface + * to the specified states. * * If the same client invokes this IOCTL for the same component more than once * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the - * second call will update requested power states, but have no other effects. + * second call may update requested power states, but have no other effects. * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required. * * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are |