summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2024-04-17 15:55:09 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2024-04-19 08:24:21 +0200
commit8d3bb9cb3b7d4ec0fcee785c3986ee7da4e8092b (patch)
tree5d116e881949e5bf8cfe9295fde2461b03189098
parentc449d2a618efd20b33191781eba453b86a6fedb2 (diff)
parentf726690397df8ebfe2242192e30c4f61eebde06b (diff)
downloadcommon-8d3bb9cb3b7d4ec0fcee785c3986ee7da4e8092b.tar.gz
Merge f726690397df ("arm64: dts: qcom: sdm845: fix USB DP/DM HS PHY interrupts") into android13-5.10-lts
Steps on the way to 5.10.210 Change-Id: I79784c14ae0a4c8258eafb39de747de46aa12a79 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi16
-rw-r--r--arch/parisc/kernel/firmware.c4
-rw-r--r--drivers/bus/mhi/host/main.c4
-rw-r--r--include/linux/async.h2
-rw-r--r--kernel/async.c85
5 files changed, 79 insertions, 32 deletions
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5c696ebf5c20..e3c6b05869e7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3565,10 +3565,10 @@
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <150000000>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";
@@ -3613,10 +3613,10 @@
<&gcc GCC_USB30_SEC_MASTER_CLK>;
assigned-clock-rates = <19200000>, <150000000>;
- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 7ed28ddcaba7..25050b0ab6fd 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
#ifdef CONFIG_64BIT
if(unlikely(parisc_narrow_firmware)) {
if((address & 0xff000000) == 0xf0000000)
- return 0xf0f0f0f000000000UL | (u32)address;
+ return (0xfffffff0UL << 32) | (u32)address;
if((address & 0xf0000000) == 0xf0000000)
- return 0xffffffff00000000UL | (u32)address;
+ return (0xffffffffUL << 32) | (u32)address;
}
#endif
return address;
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 614dd287cb4f..49c0f5ad0b73 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -569,6 +569,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
+ read_unlock_bh(&mhi_chan->lock);
+
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
@@ -591,6 +593,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
kfree(buf_info->cb_buf);
}
}
+
+ read_lock_bh(&mhi_chan->lock);
}
break;
} /* CC_EOT */
diff --git a/include/linux/async.h b/include/linux/async.h
index 0a17cd27f348..d5496a520a38 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
diff --git a/kernel/async.c b/kernel/async.c
index 1746cd65e271..5dba7461fc75 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
+static async_cookie_t __async_schedule_node_domain(async_func_t func,
+ void *data, int node,
+ struct async_domain *domain,
+ struct async_entry *entry)
+{
+ async_cookie_t newcookie;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&entry->domain_list);
+ INIT_LIST_HEAD(&entry->global_list);
+ INIT_WORK(&entry->work, async_run_entry_fn);
+ entry->func = func;
+ entry->data = data;
+ entry->domain = domain;
+
+ spin_lock_irqsave(&async_lock, flags);
+
+ /* allocate cookie and queue */
+ newcookie = entry->cookie = next_cookie++;
+
+ list_add_tail(&entry->domain_list, &domain->pending);
+ if (domain->registered)
+ list_add_tail(&entry->global_list, &async_global_pending);
+
+ atomic_inc(&entry_count);
+ spin_unlock_irqrestore(&async_lock, flags);
+
+ /* schedule for execution */
+ queue_work_node(node, system_unbound_wq, &entry->work);
+
+ return newcookie;
+}
+
/**
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
* @func: function to execute asynchronously
@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
func(data, newcookie);
return newcookie;
}
- INIT_LIST_HEAD(&entry->domain_list);
- INIT_LIST_HEAD(&entry->global_list);
- INIT_WORK(&entry->work, async_run_entry_fn);
- entry->func = func;
- entry->data = data;
- entry->domain = domain;
-
- spin_lock_irqsave(&async_lock, flags);
-
- /* allocate cookie and queue */
- newcookie = entry->cookie = next_cookie++;
-
- list_add_tail(&entry->domain_list, &domain->pending);
- if (domain->registered)
- list_add_tail(&entry->global_list, &async_global_pending);
-
- atomic_inc(&entry_count);
- spin_unlock_irqrestore(&async_lock, flags);
-
- /* schedule for execution */
- queue_work_node(node, system_unbound_wq, &entry->work);
- return newcookie;
+ return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
@@ -232,6 +244,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
EXPORT_SYMBOL_GPL(async_schedule_node);
/**
+ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function.
+ *
+ * If the asynchronous execution of @func is scheduled successfully, return
+ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
+ * that will run the function synchronously then.
+ */
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
+{
+ struct async_entry *entry;
+
+ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
+
+ /* Give up if there is no memory or too much work. */
+ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
+ kfree(entry);
+ return false;
+ }
+
+ __async_schedule_node_domain(func, dev, dev_to_node(dev),
+ &async_dfl_domain, entry);
+ return true;
+}
+
+/**
* async_synchronize_full - synchronize all asynchronous function calls
*
* This function waits until all asynchronous function calls have been done.