aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_qp.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3eff35c2d453..2684605fe67f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -41,13 +41,13 @@
41 41
42#include "qib.h" 42#include "qib.h"
43 43
44#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 44#define RVT_BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
45#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 45#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE-1)
46 46
47static inline unsigned mk_qpn(struct qib_qpn_table *qpt, 47static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48 struct qpn_map *map, unsigned off) 48 struct qpn_map *map, unsigned off)
49{ 49{
50 return (map - qpt->map) * BITS_PER_PAGE + off; 50 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
51} 51}
52 52
53static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 53static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
@@ -59,7 +59,7 @@ static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
59 if (((off & qpt->mask) >> 1) >= n) 59 if (((off & qpt->mask) >> 1) >= n)
60 off = (off | qpt->mask) + 2; 60 off = (off | qpt->mask) + 2;
61 } else 61 } else
62 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 62 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
63 return off; 63 return off;
64} 64}
65 65
@@ -147,8 +147,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
147 qpn = 2; 147 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) 148 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
149 qpn = (qpn | qpt->mask) + 2; 149 qpn = (qpn | qpt->mask) + 2;
150 offset = qpn & BITS_PER_PAGE_MASK; 150 offset = qpn & RVT_BITS_PER_PAGE_MASK;
151 map = &qpt->map[qpn / BITS_PER_PAGE]; 151 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
152 max_scan = qpt->nmaps - !offset; 152 max_scan = qpt->nmaps - !offset;
153 for (i = 0;;) { 153 for (i = 0;;) {
154 if (unlikely(!map->page)) { 154 if (unlikely(!map->page)) {
@@ -173,7 +173,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
173 * We just need to be sure we don't loop 173 * We just need to be sure we don't loop
174 * forever. 174 * forever.
175 */ 175 */
176 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); 176 } while (offset < RVT_BITS_PER_PAGE && qpn < QPN_MAX);
177 /* 177 /*
178 * In order to keep the number of pages allocated to a 178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing 179 * minimum, we scan the all existing pages before increasing
@@ -204,9 +204,9 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204{ 204{
205 struct qpn_map *map; 205 struct qpn_map *map;
206 206
207 map = qpt->map + qpn / BITS_PER_PAGE; 207 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
208 if (map->page) 208 if (map->page)
209 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 209 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
210} 210}
211 211
212static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) 212static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)