diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d780a0d096b..879ff5849bb 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold; | |||
90 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, | 90 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, |
91 | struct efx_rx_buffer *buf) | 91 | struct efx_rx_buffer *buf) |
92 | { | 92 | { |
93 | /* Offset is always within one page, so we don't need to consider | 93 | return buf->page_offset + efx->type->rx_buffer_hash_size; |
94 | * the page order. | ||
95 | */ | ||
96 | return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + | ||
97 | efx->type->rx_buffer_hash_size; | ||
98 | } | 94 | } |
99 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | 95 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
100 | { | 96 | { |
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
187 | struct efx_nic *efx = rx_queue->efx; | 183 | struct efx_nic *efx = rx_queue->efx; |
188 | struct efx_rx_buffer *rx_buf; | 184 | struct efx_rx_buffer *rx_buf; |
189 | struct page *page; | 185 | struct page *page; |
186 | unsigned int page_offset; | ||
190 | struct efx_rx_page_state *state; | 187 | struct efx_rx_page_state *state; |
191 | dma_addr_t dma_addr; | 188 | dma_addr_t dma_addr; |
192 | unsigned index, count; | 189 | unsigned index, count; |
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
211 | state->dma_addr = dma_addr; | 208 | state->dma_addr = dma_addr; |
212 | 209 | ||
213 | dma_addr += sizeof(struct efx_rx_page_state); | 210 | dma_addr += sizeof(struct efx_rx_page_state); |
211 | page_offset = sizeof(struct efx_rx_page_state); | ||
214 | 212 | ||
215 | split: | 213 | split: |
216 | index = rx_queue->added_count & rx_queue->ptr_mask; | 214 | index = rx_queue->added_count & rx_queue->ptr_mask; |
217 | rx_buf = efx_rx_buffer(rx_queue, index); | 215 | rx_buf = efx_rx_buffer(rx_queue, index); |
218 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | 216 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
219 | rx_buf->u.page = page; | 217 | rx_buf->u.page = page; |
218 | rx_buf->page_offset = page_offset; | ||
220 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 219 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
221 | rx_buf->flags = EFX_RX_BUF_PAGE; | 220 | rx_buf->flags = EFX_RX_BUF_PAGE; |
222 | ++rx_queue->added_count; | 221 | ++rx_queue->added_count; |
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
227 | /* Use the second half of the page */ | 226 | /* Use the second half of the page */ |
228 | get_page(page); | 227 | get_page(page); |
229 | dma_addr += (PAGE_SIZE >> 1); | 228 | dma_addr += (PAGE_SIZE >> 1); |
229 | page_offset += (PAGE_SIZE >> 1); | ||
230 | ++count; | 230 | ++count; |
231 | goto split; | 231 | goto split; |
232 | } | 232 | } |
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | 238 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
239 | struct efx_rx_buffer *rx_buf) | 239 | struct efx_rx_buffer *rx_buf, |
240 | unsigned int used_len) | ||
240 | { | 241 | { |
241 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { | 242 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
242 | struct efx_rx_page_state *state; | 243 | struct efx_rx_page_state *state; |
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
247 | state->dma_addr, | 248 | state->dma_addr, |
248 | efx_rx_buf_size(efx), | 249 | efx_rx_buf_size(efx), |
249 | DMA_FROM_DEVICE); | 250 | DMA_FROM_DEVICE); |
251 | } else if (used_len) { | ||
252 | dma_sync_single_for_cpu(&efx->pci_dev->dev, | ||
253 | rx_buf->dma_addr, used_len, | ||
254 | DMA_FROM_DEVICE); | ||
250 | } | 255 | } |
251 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { | 256 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
252 | dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, | 257 | dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, |
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, | |||
269 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | 274 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
270 | struct efx_rx_buffer *rx_buf) | 275 | struct efx_rx_buffer *rx_buf) |
271 | { | 276 | { |
272 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | 277 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); |
273 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | 278 | efx_free_rx_buffer(rx_queue->efx, rx_buf); |
274 | } | 279 | } |
275 | 280 | ||
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
535 | goto out; | 540 | goto out; |
536 | } | 541 | } |
537 | 542 | ||
538 | /* Release card resources - assumes all RX buffers consumed in-order | 543 | /* Release and/or sync DMA mapping - assumes all RX buffers |
539 | * per RX queue | 544 | * consumed in-order per RX queue |
540 | */ | 545 | */ |
541 | efx_unmap_rx_buffer(efx, rx_buf); | 546 | efx_unmap_rx_buffer(efx, rx_buf, len); |
542 | 547 | ||
543 | /* Prefetch nice and early so data will (hopefully) be in cache by | 548 | /* Prefetch nice and early so data will (hopefully) be in cache by |
544 | * the time we look at it. | 549 | * the time we look at it. |