aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/renesas/ravb_main.c')
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c149
1 files changed, 89 insertions, 60 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 467d41698fd5..585e90f8341d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -171,34 +171,67 @@ static struct mdiobb_ops bb_ops = {
171 .get_mdio_data = ravb_get_mdio_data, 171 .get_mdio_data = ravb_get_mdio_data,
172}; 172};
173 173
174/* Free skb's and DMA buffers for Ethernet AVB */ 174/* Free TX skb function for AVB-IP */
175static void ravb_ring_free(struct net_device *ndev, int q) 175static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
176{ 176{
177 struct ravb_private *priv = netdev_priv(ndev); 177 struct ravb_private *priv = netdev_priv(ndev);
178 int ring_size; 178 struct net_device_stats *stats = &priv->stats[q];
179 int i; 179 struct ravb_tx_desc *desc;
180 int free_num = 0;
181 int entry;
182 u32 size;
180 183
181 /* Free RX skb ringbuffer */ 184 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
182 if (priv->rx_skb[q]) { 185 bool txed;
183 for (i = 0; i < priv->num_rx_ring[q]; i++)
184 dev_kfree_skb(priv->rx_skb[q][i]);
185 }
186 kfree(priv->rx_skb[q]);
187 priv->rx_skb[q] = NULL;
188 186
189 /* Free TX skb ringbuffer */ 187 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
190 if (priv->tx_skb[q]) { 188 NUM_TX_DESC);
191 for (i = 0; i < priv->num_tx_ring[q]; i++) 189 desc = &priv->tx_ring[q][entry];
192 dev_kfree_skb(priv->tx_skb[q][i]); 190 txed = desc->die_dt == DT_FEMPTY;
191 if (free_txed_only && !txed)
192 break;
193 /* Descriptor type must be checked before all other reads */
194 dma_rmb();
195 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
196 /* Free the original skb. */
197 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
198 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
199 size, DMA_TO_DEVICE);
200 /* Last packet descriptor? */
201 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
202 entry /= NUM_TX_DESC;
203 dev_kfree_skb_any(priv->tx_skb[q][entry]);
204 priv->tx_skb[q][entry] = NULL;
205 if (txed)
206 stats->tx_packets++;
207 }
208 free_num++;
209 }
210 if (txed)
211 stats->tx_bytes += size;
212 desc->die_dt = DT_EEMPTY;
193 } 213 }
194 kfree(priv->tx_skb[q]); 214 return free_num;
195 priv->tx_skb[q] = NULL; 215}
196 216
197 /* Free aligned TX buffers */ 217/* Free skb's and DMA buffers for Ethernet AVB */
198 kfree(priv->tx_align[q]); 218static void ravb_ring_free(struct net_device *ndev, int q)
199 priv->tx_align[q] = NULL; 219{
220 struct ravb_private *priv = netdev_priv(ndev);
221 int ring_size;
222 int i;
200 223
201 if (priv->rx_ring[q]) { 224 if (priv->rx_ring[q]) {
225 for (i = 0; i < priv->num_rx_ring[q]; i++) {
226 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
227
228 if (!dma_mapping_error(ndev->dev.parent,
229 le32_to_cpu(desc->dptr)))
230 dma_unmap_single(ndev->dev.parent,
231 le32_to_cpu(desc->dptr),
232 PKT_BUF_SZ,
233 DMA_FROM_DEVICE);
234 }
202 ring_size = sizeof(struct ravb_ex_rx_desc) * 235 ring_size = sizeof(struct ravb_ex_rx_desc) *
203 (priv->num_rx_ring[q] + 1); 236 (priv->num_rx_ring[q] + 1);
204 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 237 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -207,12 +240,32 @@ static void ravb_ring_free(struct net_device *ndev, int q)
207 } 240 }
208 241
209 if (priv->tx_ring[q]) { 242 if (priv->tx_ring[q]) {
243 ravb_tx_free(ndev, q, false);
244
210 ring_size = sizeof(struct ravb_tx_desc) * 245 ring_size = sizeof(struct ravb_tx_desc) *
211 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 246 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
212 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 247 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
213 priv->tx_desc_dma[q]); 248 priv->tx_desc_dma[q]);
214 priv->tx_ring[q] = NULL; 249 priv->tx_ring[q] = NULL;
215 } 250 }
251
252 /* Free RX skb ringbuffer */
253 if (priv->rx_skb[q]) {
254 for (i = 0; i < priv->num_rx_ring[q]; i++)
255 dev_kfree_skb(priv->rx_skb[q][i]);
256 }
257 kfree(priv->rx_skb[q]);
258 priv->rx_skb[q] = NULL;
259
260 /* Free aligned TX buffers */
261 kfree(priv->tx_align[q]);
262 priv->tx_align[q] = NULL;
263
264 /* Free TX skb ringbuffer.
265 * SKBs are freed by ravb_tx_free() call above.
266 */
267 kfree(priv->tx_skb[q]);
268 priv->tx_skb[q] = NULL;
216} 269}
217 270
218/* Format skb and descriptor buffer for Ethernet AVB */ 271/* Format skb and descriptor buffer for Ethernet AVB */
@@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_device *ndev)
420 return 0; 473 return 0;
421} 474}
422 475
423/* Free TX skb function for AVB-IP */
424static int ravb_tx_free(struct net_device *ndev, int q)
425{
426 struct ravb_private *priv = netdev_priv(ndev);
427 struct net_device_stats *stats = &priv->stats[q];
428 struct ravb_tx_desc *desc;
429 int free_num = 0;
430 int entry;
431 u32 size;
432
433 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
434 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
435 NUM_TX_DESC);
436 desc = &priv->tx_ring[q][entry];
437 if (desc->die_dt != DT_FEMPTY)
438 break;
439 /* Descriptor type must be checked before all other reads */
440 dma_rmb();
441 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
442 /* Free the original skb. */
443 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
444 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
445 size, DMA_TO_DEVICE);
446 /* Last packet descriptor? */
447 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
448 entry /= NUM_TX_DESC;
449 dev_kfree_skb_any(priv->tx_skb[q][entry]);
450 priv->tx_skb[q][entry] = NULL;
451 stats->tx_packets++;
452 }
453 free_num++;
454 }
455 stats->tx_bytes += size;
456 desc->die_dt = DT_EEMPTY;
457 }
458 return free_num;
459}
460
461static void ravb_get_tx_tstamp(struct net_device *ndev) 476static void ravb_get_tx_tstamp(struct net_device *ndev)
462{ 477{
463 struct ravb_private *priv = netdev_priv(ndev); 478 struct ravb_private *priv = netdev_priv(ndev);
@@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
797 spin_lock_irqsave(&priv->lock, flags); 812 spin_lock_irqsave(&priv->lock, flags);
798 /* Clear TX interrupt */ 813 /* Clear TX interrupt */
799 ravb_write(ndev, ~mask, TIS); 814 ravb_write(ndev, ~mask, TIS);
800 ravb_tx_free(ndev, q); 815 ravb_tx_free(ndev, q, true);
801 netif_wake_subqueue(ndev, q); 816 netif_wake_subqueue(ndev, q);
802 mmiowb(); 817 mmiowb();
803 spin_unlock_irqrestore(&priv->lock, flags); 818 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1330,6 +1345,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1330 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1345 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1331 entry / NUM_TX_DESC * DPTR_ALIGN; 1346 entry / NUM_TX_DESC * DPTR_ALIGN;
1332 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; 1347 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1348 /* Zero length DMA descriptors are problematic as they seem to
1349 * terminate DMA transfers. Avoid them by simply using a length of
1350 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1351 *
1352 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1353 * data by the call to skb_put_padto() above this is safe with
1354 * respect to both the length of the first DMA descriptor (len)
1355 * overflowing the available data and the length of the second DMA
1356 * descriptor (skb->len - len) being negative.
1357 */
1358 if (len == 0)
1359 len = DPTR_ALIGN;
1360
1333 memcpy(buffer, skb->data, len); 1361 memcpy(buffer, skb->data, len);
1334 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); 1362 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1335 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1363 if (dma_mapping_error(ndev->dev.parent, dma_addr))
@@ -1380,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1380 1408
1381 priv->cur_tx[q] += NUM_TX_DESC; 1409 priv->cur_tx[q] += NUM_TX_DESC;
1382 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1410 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1383 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) 1411 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1412 !ravb_tx_free(ndev, q, true))
1384 netif_stop_subqueue(ndev, q); 1413 netif_stop_subqueue(ndev, q);
1385 1414
1386exit: 1415exit: