aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/rx.c')
-rw-r--r--drivers/net/xen-netback/rx.c70
1 files changed, 49 insertions, 21 deletions
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index f152246c7dfb..29c7645f5780 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,22 +33,37 @@
33#include <xen/xen.h> 33#include <xen/xen.h>
34#include <xen/events.h> 34#include <xen/events.h>
35 35
36/*
37 * Update the needed ring page slots for the first SKB queued.
38 * Note that any call sequence outside the RX thread calling this function
39 * needs to wake up the RX thread via a call of xenvif_kick_thread()
40 * afterwards in order to avoid a race with putting the thread to sleep.
41 */
42static void xenvif_update_needed_slots(struct xenvif_queue *queue,
43 const struct sk_buff *skb)
44{
45 unsigned int needed = 0;
46
47 if (skb) {
48 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
49 if (skb_is_gso(skb))
50 needed++;
51 if (skb->sw_hash)
52 needed++;
53 }
54
55 WRITE_ONCE(queue->rx_slots_needed, needed);
56}
57
36static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) 58static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
37{ 59{
38 RING_IDX prod, cons; 60 RING_IDX prod, cons;
39 struct sk_buff *skb; 61 unsigned int needed;
40 int needed;
41 62
42 skb = skb_peek(&queue->rx_queue); 63 needed = READ_ONCE(queue->rx_slots_needed);
43 if (!skb) 64 if (!needed)
44 return false; 65 return false;
45 66
46 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
47 if (skb_is_gso(skb))
48 needed++;
49 if (skb->sw_hash)
50 needed++;
51
52 do { 67 do {
53 prod = queue->rx.sring->req_prod; 68 prod = queue->rx.sring->req_prod;
54 cons = queue->rx.req_cons; 69 cons = queue->rx.req_cons;
@@ -73,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
73 88
74 spin_lock_irqsave(&queue->rx_queue.lock, flags); 89 spin_lock_irqsave(&queue->rx_queue.lock, flags);
75 90
76 __skb_queue_tail(&queue->rx_queue, skb); 91 if (queue->rx_queue_len >= queue->rx_queue_max) {
77
78 queue->rx_queue_len += skb->len;
79 if (queue->rx_queue_len > queue->rx_queue_max) {
80 struct net_device *dev = queue->vif->dev; 92 struct net_device *dev = queue->vif->dev;
81 93
82 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 94 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
95 kfree_skb(skb);
96 queue->vif->dev->stats.rx_dropped++;
97 } else {
98 if (skb_queue_empty(&queue->rx_queue))
99 xenvif_update_needed_slots(queue, skb);
100
101 __skb_queue_tail(&queue->rx_queue, skb);
102
103 queue->rx_queue_len += skb->len;
83 } 104 }
84 105
85 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); 106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -93,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
93 114
94 skb = __skb_dequeue(&queue->rx_queue); 115 skb = __skb_dequeue(&queue->rx_queue);
95 if (skb) { 116 if (skb) {
117 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
118
96 queue->rx_queue_len -= skb->len; 119 queue->rx_queue_len -= skb->len;
97 if (queue->rx_queue_len < queue->rx_queue_max) { 120 if (queue->rx_queue_len < queue->rx_queue_max) {
98 struct netdev_queue *txq; 121 struct netdev_queue *txq;
@@ -127,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
127 break; 150 break;
128 xenvif_rx_dequeue(queue); 151 xenvif_rx_dequeue(queue);
129 kfree_skb(skb); 152 kfree_skb(skb);
153 queue->vif->dev->stats.rx_dropped++;
130 } 154 }
131} 155}
132 156
@@ -467,27 +491,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
467 xenvif_rx_copy_flush(queue); 491 xenvif_rx_copy_flush(queue);
468} 492}
469 493
470static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) 494static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
471{ 495{
472 RING_IDX prod, cons; 496 RING_IDX prod, cons;
473 497
474 prod = queue->rx.sring->req_prod; 498 prod = queue->rx.sring->req_prod;
475 cons = queue->rx.req_cons; 499 cons = queue->rx.req_cons;
476 500
501 return prod - cons;
502}
503
504static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
505{
506 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
507
477 return !queue->stalled && 508 return !queue->stalled &&
478 prod - cons < 1 && 509 xenvif_rx_queue_slots(queue) < needed &&
479 time_after(jiffies, 510 time_after(jiffies,
480 queue->last_rx_time + queue->vif->stall_timeout); 511 queue->last_rx_time + queue->vif->stall_timeout);
481} 512}
482 513
483static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) 514static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
484{ 515{
485 RING_IDX prod, cons; 516 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
486
487 prod = queue->rx.sring->req_prod;
488 cons = queue->rx.req_cons;
489 517
490 return queue->stalled && prod - cons >= 1; 518 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
491} 519}
492 520
493bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) 521bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)