aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrank Li2013-03-03 11:34:25 -0600
committerDavid S. Miller2013-03-04 13:12:07 -0600
commitde5fb0a053482d89262c3284b67884cd2c621adc (patch)
tree818812a3b5c064fcf09d2e7da817d416c8db44f6
parent3f736868b47687d1336fe88185560b22bb92021e (diff)
downloadam43-linux-kernel-de5fb0a053482d89262c3284b67884cd2c621adc.tar.gz
am43-linux-kernel-de5fb0a053482d89262c3284b67884cd2c621adc.tar.xz
am43-linux-kernel-de5fb0a053482d89262c3284b67884cd2c621adc.zip
net: fec: put tx to napi poll function to fix dead lock
up stack ndo_start_xmit already hold lock. fec_enet_start_xmit needn't spin lock. stat_xmit just update fep->cur_tx fec_enet_tx just update fep->dirty_tx Reserve a empty bdb to check full or empty cur_tx == dirty_tx means full cur_tx == dirty_tx +1 means empty So needn't is_full variable. Fix spin lock deadlock ================================= [ INFO: inconsistent lock state ] 3.8.0-rc5+ #107 Not tainted --------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. ptp4l/615 [HC1[1]:SC0[0]:HE0:SE1] takes: (&(&list->lock)->rlock#3){?.-...}, at: [<8042c3c4>] skb_queue_tail+0x20/0x50 {HARDIRQ-ON-W} state was registered at: [<80067250>] mark_lock+0x154/0x4e8 [<800676f4>] mark_irqflags+0x110/0x1a4 [<80069208>] __lock_acquire+0x494/0x9c0 [<80069ce8>] lock_acquire+0x90/0xa4 [<80527ad0>] _raw_spin_lock_bh+0x44/0x54 [<804877e0>] first_packet_length+0x38/0x1f0 [<804879e4>] udp_poll+0x4c/0x5c [<804231f8>] sock_poll+0x24/0x28 [<800d27f0>] do_poll.isra.10+0x120/0x254 [<800d36e4>] do_sys_poll+0x15c/0x1e8 [<800d3828>] sys_poll+0x60/0xc8 [<8000e780>] ret_fast_syscall+0x0/0x3c *** DEADLOCK *** 1 lock held by ptp4l/615: #0: (&(&fep->hw_lock)->rlock){-.-...}, at: [<80355f9c>] fec_enet_tx+0x24/0x268 stack backtrace: Backtrace: [<800121e0>] (dump_backtrace+0x0/0x10c) from [<80516210>] (dump_stack+0x18/0x1c) r6:8063b1fc r5:bf38b2f8 r4:bf38b000 r3:bf38b000 [<805161f8>] (dump_stack+0x0/0x1c) from [<805189d0>] (print_usage_bug.part.34+0x164/0x1a4) [<8051886c>] (print_usage_bug.part.34+0x0/0x1a4) from [<80518a88>] (print_usage_bug+0x78/0x88) r8:80065664 r7:bf38b2f8 r6:00000002 r5:00000000 r4:bf38b000 [<80518a10>] (print_usage_bug+0x0/0x88) from [<80518b58>] (mark_lock_irq+0xc0/0x270) r7:bf38b000 r6:00000002 r5:bf38b2f8 r4:00000000 [<80518a98>] (mark_lock_irq+0x0/0x270) from [<80067270>] (mark_lock+0x174/0x4e8) [<800670fc>] (mark_lock+0x0/0x4e8) from [<80067744>] (mark_irqflags+0x160/0x1a4) [<800675e4>] (mark_irqflags+0x0/0x1a4) from [<80069208>] (__lock_acquire+0x494/0x9c0) r5:00000002 r4:bf38b2f8 [<80068d74>] (__lock_acquire+0x0/0x9c0) from [<80069ce8>] (lock_acquire+0x90/0xa4) [<80069c58>] (lock_acquire+0x0/0xa4) from [<805278d8>] (_raw_spin_lock_irqsave+0x4c/0x60) [<8052788c>] (_raw_spin_lock_irqsave+0x0/0x60) from [<8042c3c4>] (skb_queue_tail+0x20/0x50) r6:bfbb2180 r5:bf1d0190 r4:bf1d0184 [<8042c3a4>] (skb_queue_tail+0x0/0x50) from [<8042c4cc>] (sock_queue_err_skb+0xd8/0x188) r6:00000056 r5:bfbb2180 r4:bf1d0000 r3:00000000 [<8042c3f4>] (sock_queue_err_skb+0x0/0x188) from [<8042d15c>] (skb_tstamp_tx+0x70/0xa0) r6:bf0dddb0 r5:bf1d0000 r4:bfbb2180 r3:00000004 [<8042d0ec>] (skb_tstamp_tx+0x0/0xa0) from [<803561d0>] (fec_enet_tx+0x258/0x268) r6:c089d260 r5:00001c00 r4:bfbd0000 [<80355f78>] (fec_enet_tx+0x0/0x268) from [<803562cc>] (fec_enet_interrupt+0xec/0xf8) [<803561e0>] (fec_enet_interrupt+0x0/0xf8) from [<8007d5b0>] (handle_irq_event_percpu+0x54/0x1a0) [<8007d55c>] (handle_irq_event_percpu+0x0/0x1a0) from [<8007d740>] (handle_irq_event+0x44/0x64) [<8007d6fc>] (handle_irq_event+0x0/0x64) from [<80080690>] (handle_fasteoi_irq+0xc4/0x15c) r6:bf0dc000 r5:bf811290 r4:bf811240 r3:00000000 [<800805cc>] (handle_fasteoi_irq+0x0/0x15c) from [<8007ceec>] (generic_handle_irq+0x28/0x38) r5:807130c8 r4:00000096 [<8007cec4>] (generic_handle_irq+0x0/0x38) from [<8000f16c>] (handle_IRQ+0x54/0xb4) r4:8071d280 r3:00000180 [<8000f118>] (handle_IRQ+0x0/0xb4) from [<80008544>] (gic_handle_irq+0x30/0x64) r8:8000e924 r7:f4000100 r6:bf0ddef8 r5:8071c974 r4:f400010c r3:00000000 [<80008514>] (gic_handle_irq+0x0/0x64) from [<8000e2e4>] (__irq_svc+0x44/0x5c) Exception stack(0xbf0ddef8 to 0xbf0ddf40) Signed-off-by: Frank Li <Frank.Li@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/fec.c85
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
2 files changed, 41 insertions, 47 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141..069a155d16e 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
246 struct bufdesc *bdp; 246 struct bufdesc *bdp;
247 void *bufaddr; 247 void *bufaddr;
248 unsigned short status; 248 unsigned short status;
249 unsigned long flags; 249 unsigned int index;
250 250
251 if (!fep->link) { 251 if (!fep->link) {
252 /* Link is down or autonegotiation is in progress. */ 252 /* Link is down or autonegotiation is in progress. */
253 return NETDEV_TX_BUSY; 253 return NETDEV_TX_BUSY;
254 } 254 }
255 255
256 spin_lock_irqsave(&fep->hw_lock, flags);
257 /* Fill in a Tx ring entry */ 256 /* Fill in a Tx ring entry */
258 bdp = fep->cur_tx; 257 bdp = fep->cur_tx;
259 258
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
264 * This should not happen, since ndev->tbusy should be set. 263 * This should not happen, since ndev->tbusy should be set.
265 */ 264 */
266 printk("%s: tx queue full!.\n", ndev->name); 265 printk("%s: tx queue full!.\n", ndev->name);
267 spin_unlock_irqrestore(&fep->hw_lock, flags);
268 return NETDEV_TX_BUSY; 266 return NETDEV_TX_BUSY;
269 } 267 }
270 268
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
280 * 4-byte boundaries. Use bounce buffers to copy data 278 * 4-byte boundaries. Use bounce buffers to copy data
281 * and get it aligned. Ugh. 279 * and get it aligned. Ugh.
282 */ 280 */
281 if (fep->bufdesc_ex)
282 index = (struct bufdesc_ex *)bdp -
283 (struct bufdesc_ex *)fep->tx_bd_base;
284 else
285 index = bdp - fep->tx_bd_base;
286
283 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 287 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
284 unsigned int index;
285 if (fep->bufdesc_ex)
286 index = (struct bufdesc_ex *)bdp -
287 (struct bufdesc_ex *)fep->tx_bd_base;
288 else
289 index = bdp - fep->tx_bd_base;
290 memcpy(fep->tx_bounce[index], skb->data, skb->len); 288 memcpy(fep->tx_bounce[index], skb->data, skb->len);
291 bufaddr = fep->tx_bounce[index]; 289 bufaddr = fep->tx_bounce[index];
292 } 290 }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
300 swap_buffer(bufaddr, skb->len); 298 swap_buffer(bufaddr, skb->len);
301 299
302 /* Save skb pointer */ 300 /* Save skb pointer */
303 fep->tx_skbuff[fep->skb_cur] = skb; 301 fep->tx_skbuff[index] = skb;
304
305 ndev->stats.tx_bytes += skb->len;
306 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
307 302
308 /* Push the data cache so the CPM does not get stale memory 303 /* Push the data cache so the CPM does not get stale memory
309 * data. 304 * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
331 ebdp->cbd_esc = BD_ENET_TX_INT; 326 ebdp->cbd_esc = BD_ENET_TX_INT;
332 } 327 }
333 } 328 }
334 /* Trigger transmission start */
335 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
336
337 /* If this was the last BD in the ring, start at the beginning again. */ 329 /* If this was the last BD in the ring, start at the beginning again. */
338 if (status & BD_ENET_TX_WRAP) 330 if (status & BD_ENET_TX_WRAP)
339 bdp = fep->tx_bd_base; 331 bdp = fep->tx_bd_base;
340 else 332 else
341 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 333 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
342 334
343 if (bdp == fep->dirty_tx) { 335 fep->cur_tx = bdp;
344 fep->tx_full = 1; 336
337 if (fep->cur_tx == fep->dirty_tx)
345 netif_stop_queue(ndev); 338 netif_stop_queue(ndev);
346 }
347 339
348 fep->cur_tx = bdp; 340 /* Trigger transmission start */
341 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
349 342
350 skb_tx_timestamp(skb); 343 skb_tx_timestamp(skb);
351 344
352 spin_unlock_irqrestore(&fep->hw_lock, flags);
353
354 return NETDEV_TX_OK; 345 return NETDEV_TX_OK;
355} 346}
356 347
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
406 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
407 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
408 399
409 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
410 fep->cur_rx = fep->rx_bd_base; 400 fep->cur_rx = fep->rx_bd_base;
411 401
412 /* Reset SKB transmit buffers. */
413 fep->skb_cur = fep->skb_dirty = 0;
414 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 402 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
415 if (fep->tx_skbuff[i]) { 403 if (fep->tx_skbuff[i]) {
416 dev_kfree_skb_any(fep->tx_skbuff[i]); 404 dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
573 struct bufdesc *bdp; 561 struct bufdesc *bdp;
574 unsigned short status; 562 unsigned short status;
575 struct sk_buff *skb; 563 struct sk_buff *skb;
564 int index = 0;
576 565
577 fep = netdev_priv(ndev); 566 fep = netdev_priv(ndev);
578 spin_lock(&fep->hw_lock);
579 bdp = fep->dirty_tx; 567 bdp = fep->dirty_tx;
580 568
569 /* get next bdp of dirty_tx */
570 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
571 bdp = fep->tx_bd_base;
572 else
573 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
574
581 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 575 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
582 if (bdp == fep->cur_tx && fep->tx_full == 0) 576
577 /* current queue is empty */
578 if (bdp == fep->cur_tx)
583 break; 579 break;
584 580
581 if (fep->bufdesc_ex)
582 index = (struct bufdesc_ex *)bdp -
583 (struct bufdesc_ex *)fep->tx_bd_base;
584 else
585 index = bdp - fep->tx_bd_base;
586
585 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 587 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
586 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 588 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
587 bdp->cbd_bufaddr = 0; 589 bdp->cbd_bufaddr = 0;
588 590
589 skb = fep->tx_skbuff[fep->skb_dirty]; 591 skb = fep->tx_skbuff[index];
592
590 /* Check for errors. */ 593 /* Check for errors. */
591 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 594 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
592 BD_ENET_TX_RL | BD_ENET_TX_UN | 595 BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
631 634
632 /* Free the sk buffer associated with this last transmit */ 635 /* Free the sk buffer associated with this last transmit */
633 dev_kfree_skb_any(skb); 636 dev_kfree_skb_any(skb);
634 fep->tx_skbuff[fep->skb_dirty] = NULL; 637 fep->tx_skbuff[index] = NULL;
635 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 638
639 fep->dirty_tx = bdp;
636 640
637 /* Update pointer to next buffer descriptor to be transmitted */ 641 /* Update pointer to next buffer descriptor to be transmitted */
638 if (status & BD_ENET_TX_WRAP) 642 if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
642 646
643 /* Since we have freed up a buffer, the ring is no longer full 647 /* Since we have freed up a buffer, the ring is no longer full
644 */ 648 */
645 if (fep->tx_full) { 649 if (fep->dirty_tx != fep->cur_tx) {
646 fep->tx_full = 0;
647 if (netif_queue_stopped(ndev)) 650 if (netif_queue_stopped(ndev))
648 netif_wake_queue(ndev); 651 netif_wake_queue(ndev);
649 } 652 }
650 } 653 }
651 fep->dirty_tx = bdp; 654 return;
652 spin_unlock(&fep->hw_lock);
653} 655}
654 656
655 657
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
816 int_events = readl(fep->hwp + FEC_IEVENT); 818 int_events = readl(fep->hwp + FEC_IEVENT);
817 writel(int_events, fep->hwp + FEC_IEVENT); 819 writel(int_events, fep->hwp + FEC_IEVENT);
818 820
819 if (int_events & FEC_ENET_RXF) { 821 if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
820 ret = IRQ_HANDLED; 822 ret = IRQ_HANDLED;
821 823
822 /* Disable the RX interrupt */ 824 /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
827 } 829 }
828 } 830 }
829 831
830 /* Transmit OK, or non-fatal error. Update the buffer
831 * descriptors. FEC handles all errors, we just discover
832 * them as part of the transmit process.
833 */
834 if (int_events & FEC_ENET_TXF) {
835 ret = IRQ_HANDLED;
836 fec_enet_tx(ndev);
837 }
838
839 if (int_events & FEC_ENET_MII) { 832 if (int_events & FEC_ENET_MII) {
840 ret = IRQ_HANDLED; 833 ret = IRQ_HANDLED;
841 complete(&fep->mdio_done); 834 complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
851 int pkts = fec_enet_rx(ndev, budget); 844 int pkts = fec_enet_rx(ndev, budget);
852 struct fec_enet_private *fep = netdev_priv(ndev); 845 struct fec_enet_private *fep = netdev_priv(ndev);
853 846
847 fec_enet_tx(ndev);
848
854 if (pkts < budget) { 849 if (pkts < budget) {
855 napi_complete(napi); 850 napi_complete(napi);
856 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 851 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
1646 1641
1647 /* ...and the same for transmit */ 1642 /* ...and the same for transmit */
1648 bdp = fep->tx_bd_base; 1643 bdp = fep->tx_bd_base;
1644 fep->cur_tx = bdp;
1649 for (i = 0; i < TX_RING_SIZE; i++) { 1645 for (i = 0; i < TX_RING_SIZE; i++) {
1650 1646
1651 /* Initialize the BD for every fragment in the page. */ 1647 /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
1657 /* Set the last buffer to wrap */ 1653 /* Set the last buffer to wrap */
1658 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1654 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1659 bdp->cbd_sc |= BD_SC_WRAP; 1655 bdp->cbd_sc |= BD_SC_WRAP;
1656 fep->dirty_tx = bdp;
1660 1657
1661 fec_restart(ndev, 0); 1658 fec_restart(ndev, 0);
1662 1659
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c..c0f63be91ff 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -214,8 +214,6 @@ struct fec_enet_private {
214 unsigned char *tx_bounce[TX_RING_SIZE]; 214 unsigned char *tx_bounce[TX_RING_SIZE];
215 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 215 struct sk_buff *tx_skbuff[TX_RING_SIZE];
216 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 216 struct sk_buff *rx_skbuff[RX_RING_SIZE];
217 ushort skb_cur;
218 ushort skb_dirty;
219 217
220 /* CPM dual port RAM relative addresses */ 218 /* CPM dual port RAM relative addresses */
221 dma_addr_t bd_dma; 219 dma_addr_t bd_dma;
@@ -227,7 +225,6 @@ struct fec_enet_private {
227 /* The ring entries to be free()ed */ 225 /* The ring entries to be free()ed */
228 struct bufdesc *dirty_tx; 226 struct bufdesc *dirty_tx;
229 227
230 uint tx_full;
231 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 228 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
232 spinlock_t hw_lock; 229 spinlock_t hw_lock;
233 230