aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings2012-12-20 12:48:20 -0600
committerBen Hutchings2013-02-26 08:55:49 -0600
commit3a68f19d7afb80f548d016effbc6ed52643a8085 (patch)
tree0d087eac21de23332dddb6b27406d9c9919b0bc1
parenteb970ff07c15f13eb474f643fd165ebe3e4e24b2 (diff)
downloadam43-linux-kernel-3a68f19d7afb80f548d016effbc6ed52643a8085.tar.gz
am43-linux-kernel-3a68f19d7afb80f548d016effbc6ed52643a8085.tar.xz
am43-linux-kernel-3a68f19d7afb80f548d016effbc6ed52643a8085.zip
sfc: Properly sync RX DMA buffer when it is not the last in the page
We may currently allocate two RX DMA buffers to a page, and only unmap the page when the second is completed. We do not sync the first RX buffer to be completed; this can result in packet loss or corruption if the last RX buffer completed in a NAPI poll is the first in a page and is not DMA-coherent. (In the middle of a NAPI poll, we will handle the following RX completion and unmap the page *before* looking at the content of the first buffer.) Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/rx.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b..a5754916478 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
236} 236}
237 237
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 238static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf) 239 struct efx_rx_buffer *rx_buf,
240 unsigned int used_len)
240{ 241{
241 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
242 struct efx_rx_page_state *state; 243 struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 state->dma_addr, 248 state->dma_addr,
248 efx_rx_buf_size(efx), 249 efx_rx_buf_size(efx),
249 DMA_FROM_DEVICE); 250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
250 } 255 }
251 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
252 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, 257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf) 275 struct efx_rx_buffer *rx_buf)
271{ 276{
272 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 278 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 279}
275 280
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
535 goto out; 540 goto out;
536 } 541 }
537 542
538 /* Release card resources - assumes all RX buffers consumed in-order 543 /* Release and/or sync DMA mapping - assumes all RX buffers
539 * per RX queue 544 * consumed in-order per RX queue
540 */ 545 */
541 efx_unmap_rx_buffer(efx, rx_buf); 546 efx_unmap_rx_buffer(efx, rx_buf, len);
542 547
543 /* Prefetch nice and early so data will (hopefully) be in cache by 548 /* Prefetch nice and early so data will (hopefully) be in cache by
544 * the time we look at it. 549 * the time we look at it.