mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
net: macb: remove illusion about TBQPH/RBQPH being per-queue
The MACB driver acts as if TBQPH/RBQPH are configurable on a per queue basis; this is a lie. A single register configures the upper 32 bits of each DMA descriptor buffers for all queues. Concrete actions: - Drop GEM_TBQPH/GEM_RBQPH macros which have a queue index argument. Only use MACB_TBQPH/MACB_RBQPH constants. - Drop struct macb_queue->TBQPH/RBQPH fields. - In macb_init_buffers(): do a single write to TBQPH and RBQPH for all queues instead of a write per queue. - In macb_tx_error_task(): drop the write to TBQPH. - In macb_alloc_consistent(): if allocations give different upper 32-bits, fail. Previously, it would have lead to silent memory corruption as queues would have used the upper 32 bits of the alloc from queue 0 and their own low 32 bits. - In macb_suspend(): if we use the tie off descriptor for suspend, do the write once for all queues instead of once per queue. Fixes:fff8019a08("net: macb: Add 64 bit addressing support for GEM") Fixes:ae1f2a56d2("net: macb: Added support for many RX queues") Reviewed-by: Sean Anderson <sean.anderson@linux.dev> Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com> Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20250923-macb-fixes-v6-2-772d655cdeb6@bootlin.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
9665aa15ef
commit
fca3dc859b
@@ -213,10 +213,8 @@
|
||||
|
||||
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
|
||||
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
|
||||
#define GEM_TBQPH(hw_q) (0x04C8)
|
||||
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
|
||||
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
|
||||
#define GEM_RBQPH(hw_q) (0x04D4)
|
||||
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
|
||||
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
|
||||
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
|
||||
@@ -1214,10 +1212,8 @@ struct macb_queue {
|
||||
unsigned int IDR;
|
||||
unsigned int IMR;
|
||||
unsigned int TBQP;
|
||||
unsigned int TBQPH;
|
||||
unsigned int RBQS;
|
||||
unsigned int RBQP;
|
||||
unsigned int RBQPH;
|
||||
|
||||
/* Lock to protect tx_head and tx_tail */
|
||||
spinlock_t tx_ptr_lock;
|
||||
|
||||
@@ -495,19 +495,19 @@ static void macb_init_buffers(struct macb *bp)
|
||||
struct macb_queue *queue;
|
||||
unsigned int q;
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
/* Single register for all queues' high 32 bits. */
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
||||
macb_writel(bp, RBQPH,
|
||||
upper_32_bits(bp->queues[0].rx_ring_dma));
|
||||
macb_writel(bp, TBQPH,
|
||||
upper_32_bits(bp->queues[0].tx_ring_dma));
|
||||
}
|
||||
#endif
|
||||
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
||||
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
queue_writel(queue, RBQPH,
|
||||
upper_32_bits(queue->rx_ring_dma));
|
||||
#endif
|
||||
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
queue_writel(queue, TBQPH,
|
||||
upper_32_bits(queue->tx_ring_dma));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1166,10 +1166,6 @@ static void macb_tx_error_task(struct work_struct *work)
|
||||
|
||||
/* Reinitialize the TX desc queue */
|
||||
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
|
||||
#endif
|
||||
/* Make TX ring reflect state of hardware */
|
||||
queue->tx_head = 0;
|
||||
queue->tx_tail = 0;
|
||||
@@ -2546,6 +2542,7 @@ static int macb_alloc_consistent(struct macb *bp)
|
||||
{
|
||||
struct macb_queue *queue;
|
||||
unsigned int q;
|
||||
u32 upper;
|
||||
int size;
|
||||
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
||||
@@ -2553,7 +2550,9 @@ static int macb_alloc_consistent(struct macb *bp)
|
||||
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
||||
&queue->tx_ring_dma,
|
||||
GFP_KERNEL);
|
||||
if (!queue->tx_ring)
|
||||
upper = upper_32_bits(queue->tx_ring_dma);
|
||||
if (!queue->tx_ring ||
|
||||
upper != upper_32_bits(bp->queues[0].tx_ring_dma))
|
||||
goto out_err;
|
||||
netdev_dbg(bp->dev,
|
||||
"Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
|
||||
@@ -2567,8 +2566,11 @@ static int macb_alloc_consistent(struct macb *bp)
|
||||
|
||||
size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
|
||||
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
||||
&queue->rx_ring_dma, GFP_KERNEL);
|
||||
if (!queue->rx_ring)
|
||||
&queue->rx_ring_dma,
|
||||
GFP_KERNEL);
|
||||
upper = upper_32_bits(queue->rx_ring_dma);
|
||||
if (!queue->rx_ring ||
|
||||
upper != upper_32_bits(bp->queues[0].rx_ring_dma))
|
||||
goto out_err;
|
||||
netdev_dbg(bp->dev,
|
||||
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
|
||||
@@ -4309,12 +4311,6 @@ static int macb_init(struct platform_device *pdev)
|
||||
queue->TBQP = GEM_TBQP(hw_q - 1);
|
||||
queue->RBQP = GEM_RBQP(hw_q - 1);
|
||||
queue->RBQS = GEM_RBQS(hw_q - 1);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
||||
queue->TBQPH = GEM_TBQPH(hw_q - 1);
|
||||
queue->RBQPH = GEM_RBQPH(hw_q - 1);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* queue0 uses legacy registers */
|
||||
queue->ISR = MACB_ISR;
|
||||
@@ -4323,12 +4319,6 @@ static int macb_init(struct platform_device *pdev)
|
||||
queue->IMR = MACB_IMR;
|
||||
queue->TBQP = MACB_TBQP;
|
||||
queue->RBQP = MACB_RBQP;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
|
||||
queue->TBQPH = MACB_TBQPH;
|
||||
queue->RBQPH = MACB_RBQPH;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* get irq: here we use the linux queue index, not the hardware
|
||||
@@ -5452,6 +5442,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
|
||||
*/
|
||||
tmp = macb_readl(bp, NCR);
|
||||
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
|
||||
macb_writel(bp, RBQPH,
|
||||
upper_32_bits(bp->rx_ring_tieoff_dma));
|
||||
#endif
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues;
|
||||
++q, ++queue) {
|
||||
/* Disable RX queues */
|
||||
@@ -5461,10 +5456,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
|
||||
/* Tie off RX queues */
|
||||
queue_writel(queue, RBQP,
|
||||
lower_32_bits(bp->rx_ring_tieoff_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
queue_writel(queue, RBQPH,
|
||||
upper_32_bits(bp->rx_ring_tieoff_dma));
|
||||
#endif
|
||||
}
|
||||
/* Disable all interrupts */
|
||||
queue_writel(queue, IDR, -1);
|
||||
|
||||
Reference in New Issue
Block a user