]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - rpmsg/rpmsg.git/blob - drivers/nvme/target/rdma.c
Merge tag 'xfs-fixes-for-4.19-rc7' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[rpmsg/rpmsg.git] / drivers / nvme / target / rdma.c
1 /*
2  * NVMe over Fabrics RDMA target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/atomic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/nvme.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/wait.h>
25 #include <linux/inet.h>
26 #include <asm/unaligned.h>
28 #include <rdma/ib_verbs.h>
29 #include <rdma/rdma_cm.h>
30 #include <rdma/rw.h>
32 #include <linux/nvme-rdma.h>
33 #include "nvmet.h"
35 /*
36  * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
37  */
38 #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE     PAGE_SIZE
39 #define NVMET_RDMA_MAX_INLINE_SGE               4
40 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE         max_t(int, SZ_16K, PAGE_SIZE)
42 struct nvmet_rdma_cmd {
43         struct ib_sge           sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
44         struct ib_cqe           cqe;
45         struct ib_recv_wr       wr;
46         struct scatterlist      inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
47         struct nvme_command     *nvme_cmd;
48         struct nvmet_rdma_queue *queue;
49 };
51 enum {
52         NVMET_RDMA_REQ_INLINE_DATA      = (1 << 0),
53         NVMET_RDMA_REQ_INVALIDATE_RKEY  = (1 << 1),
54 };
56 struct nvmet_rdma_rsp {
57         struct ib_sge           send_sge;
58         struct ib_cqe           send_cqe;
59         struct ib_send_wr       send_wr;
61         struct nvmet_rdma_cmd   *cmd;
62         struct nvmet_rdma_queue *queue;
64         struct ib_cqe           read_cqe;
65         struct rdma_rw_ctx      rw;
67         struct nvmet_req        req;
69         bool                    allocated;
70         u8                      n_rdma;
71         u32                     flags;
72         u32                     invalidate_rkey;
74         struct list_head        wait_list;
75         struct list_head        free_list;
76 };
78 enum nvmet_rdma_queue_state {
79         NVMET_RDMA_Q_CONNECTING,
80         NVMET_RDMA_Q_LIVE,
81         NVMET_RDMA_Q_DISCONNECTING,
82 };
84 struct nvmet_rdma_queue {
85         struct rdma_cm_id       *cm_id;
86         struct nvmet_port       *port;
87         struct ib_cq            *cq;
88         atomic_t                sq_wr_avail;
89         struct nvmet_rdma_device *dev;
90         spinlock_t              state_lock;
91         enum nvmet_rdma_queue_state state;
92         struct nvmet_cq         nvme_cq;
93         struct nvmet_sq         nvme_sq;
95         struct nvmet_rdma_rsp   *rsps;
96         struct list_head        free_rsps;
97         spinlock_t              rsps_lock;
98         struct nvmet_rdma_cmd   *cmds;
100         struct work_struct      release_work;
101         struct list_head        rsp_wait_list;
102         struct list_head        rsp_wr_wait_list;
103         spinlock_t              rsp_wr_wait_lock;
105         int                     idx;
106         int                     host_qid;
107         int                     recv_queue_size;
108         int                     send_queue_size;
110         struct list_head        queue_list;
111 };
113 struct nvmet_rdma_device {
114         struct ib_device        *device;
115         struct ib_pd            *pd;
116         struct ib_srq           *srq;
117         struct nvmet_rdma_cmd   *srq_cmds;
118         size_t                  srq_size;
119         struct kref             ref;
120         struct list_head        entry;
121         int                     inline_data_size;
122         int                     inline_page_count;
123 };
125 static bool nvmet_rdma_use_srq;
126 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
127 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
129 static DEFINE_IDA(nvmet_rdma_queue_ida);
130 static LIST_HEAD(nvmet_rdma_queue_list);
131 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
133 static LIST_HEAD(device_list);
134 static DEFINE_MUTEX(device_list_mutex);
136 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
137 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
138 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
143 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
145 static int num_pages(int len)
147         return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
150 /* XXX: really should move to a generic header sooner or later.. */
151 static inline u32 get_unaligned_le24(const u8 *p)
153         return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
156 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
158         return nvme_is_write(rsp->req.cmd) &&
159                 rsp->req.transfer_len &&
160                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
163 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
165         return !nvme_is_write(rsp->req.cmd) &&
166                 rsp->req.transfer_len &&
167                 !rsp->req.rsp->status &&
168                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
171 static inline struct nvmet_rdma_rsp *
172 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
174         struct nvmet_rdma_rsp *rsp;
175         unsigned long flags;
177         spin_lock_irqsave(&queue->rsps_lock, flags);
178         rsp = list_first_entry_or_null(&queue->free_rsps,
179                                 struct nvmet_rdma_rsp, free_list);
180         if (likely(rsp))
181                 list_del(&rsp->free_list);
182         spin_unlock_irqrestore(&queue->rsps_lock, flags);
184         if (unlikely(!rsp)) {
185                 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
186                 if (unlikely(!rsp))
187                         return NULL;
188                 rsp->allocated = true;
189         }
191         return rsp;
194 static inline void
195 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
197         unsigned long flags;
199         if (rsp->allocated) {
200                 kfree(rsp);
201                 return;
202         }
204         spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
205         list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
206         spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
209 static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
210                                 struct nvmet_rdma_cmd *c)
212         struct scatterlist *sg;
213         struct ib_sge *sge;
214         int i;
216         if (!ndev->inline_data_size)
217                 return;
219         sg = c->inline_sg;
220         sge = &c->sge[1];
222         for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
223                 if (sge->length)
224                         ib_dma_unmap_page(ndev->device, sge->addr,
225                                         sge->length, DMA_FROM_DEVICE);
226                 if (sg_page(sg))
227                         __free_page(sg_page(sg));
228         }
231 static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
232                                 struct nvmet_rdma_cmd *c)
234         struct scatterlist *sg;
235         struct ib_sge *sge;
236         struct page *pg;
237         int len;
238         int i;
240         if (!ndev->inline_data_size)
241                 return 0;
243         sg = c->inline_sg;
244         sg_init_table(sg, ndev->inline_page_count);
245         sge = &c->sge[1];
246         len = ndev->inline_data_size;
248         for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
249                 pg = alloc_page(GFP_KERNEL);
250                 if (!pg)
251                         goto out_err;
252                 sg_assign_page(sg, pg);
253                 sge->addr = ib_dma_map_page(ndev->device,
254                         pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
255                 if (ib_dma_mapping_error(ndev->device, sge->addr))
256                         goto out_err;
257                 sge->length = min_t(int, len, PAGE_SIZE);
258                 sge->lkey = ndev->pd->local_dma_lkey;
259                 len -= sge->length;
260         }
262         return 0;
263 out_err:
264         for (; i >= 0; i--, sg--, sge--) {
265                 if (sge->length)
266                         ib_dma_unmap_page(ndev->device, sge->addr,
267                                         sge->length, DMA_FROM_DEVICE);
268                 if (sg_page(sg))
269                         __free_page(sg_page(sg));
270         }
271         return -ENOMEM;
274 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
275                         struct nvmet_rdma_cmd *c, bool admin)
277         /* NVMe command / RDMA RECV */
278         c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
279         if (!c->nvme_cmd)
280                 goto out;
282         c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
283                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
284         if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
285                 goto out_free_cmd;
287         c->sge[0].length = sizeof(*c->nvme_cmd);
288         c->sge[0].lkey = ndev->pd->local_dma_lkey;
290         if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
291                 goto out_unmap_cmd;
293         c->cqe.done = nvmet_rdma_recv_done;
295         c->wr.wr_cqe = &c->cqe;
296         c->wr.sg_list = c->sge;
297         c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
299         return 0;
301 out_unmap_cmd:
302         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
303                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
304 out_free_cmd:
305         kfree(c->nvme_cmd);
307 out:
308         return -ENOMEM;
311 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
312                 struct nvmet_rdma_cmd *c, bool admin)
314         if (!admin)
315                 nvmet_rdma_free_inline_pages(ndev, c);
316         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
317                                 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
318         kfree(c->nvme_cmd);
321 static struct nvmet_rdma_cmd *
322 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
323                 int nr_cmds, bool admin)
325         struct nvmet_rdma_cmd *cmds;
326         int ret = -EINVAL, i;
328         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
329         if (!cmds)
330                 goto out;
332         for (i = 0; i < nr_cmds; i++) {
333                 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
334                 if (ret)
335                         goto out_free;
336         }
338         return cmds;
340 out_free:
341         while (--i >= 0)
342                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
343         kfree(cmds);
344 out:
345         return ERR_PTR(ret);
348 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
349                 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
351         int i;
353         for (i = 0; i < nr_cmds; i++)
354                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
355         kfree(cmds);
358 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
359                 struct nvmet_rdma_rsp *r)
361         /* NVMe CQE / RDMA SEND */
362         r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
363         if (!r->req.rsp)
364                 goto out;
366         r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
367                         sizeof(*r->req.rsp), DMA_TO_DEVICE);
368         if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
369                 goto out_free_rsp;
371         r->send_sge.length = sizeof(*r->req.rsp);
372         r->send_sge.lkey = ndev->pd->local_dma_lkey;
374         r->send_cqe.done = nvmet_rdma_send_done;
376         r->send_wr.wr_cqe = &r->send_cqe;
377         r->send_wr.sg_list = &r->send_sge;
378         r->send_wr.num_sge = 1;
379         r->send_wr.send_flags = IB_SEND_SIGNALED;
381         /* Data In / RDMA READ */
382         r->read_cqe.done = nvmet_rdma_read_data_done;
383         return 0;
385 out_free_rsp:
386         kfree(r->req.rsp);
387 out:
388         return -ENOMEM;
391 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
392                 struct nvmet_rdma_rsp *r)
394         ib_dma_unmap_single(ndev->device, r->send_sge.addr,
395                                 sizeof(*r->req.rsp), DMA_TO_DEVICE);
396         kfree(r->req.rsp);
399 static int
400 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
402         struct nvmet_rdma_device *ndev = queue->dev;
403         int nr_rsps = queue->recv_queue_size * 2;
404         int ret = -EINVAL, i;
406         queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
407                         GFP_KERNEL);
408         if (!queue->rsps)
409                 goto out;
411         for (i = 0; i < nr_rsps; i++) {
412                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
414                 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
415                 if (ret)
416                         goto out_free;
418                 list_add_tail(&rsp->free_list, &queue->free_rsps);
419         }
421         return 0;
423 out_free:
424         while (--i >= 0) {
425                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
427                 list_del(&rsp->free_list);
428                 nvmet_rdma_free_rsp(ndev, rsp);
429         }
430         kfree(queue->rsps);
431 out:
432         return ret;
435 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
437         struct nvmet_rdma_device *ndev = queue->dev;
438         int i, nr_rsps = queue->recv_queue_size * 2;
440         for (i = 0; i < nr_rsps; i++) {
441                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
443                 list_del(&rsp->free_list);
444                 nvmet_rdma_free_rsp(ndev, rsp);
445         }
446         kfree(queue->rsps);
449 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
450                 struct nvmet_rdma_cmd *cmd)
452         int ret;
454         ib_dma_sync_single_for_device(ndev->device,
455                 cmd->sge[0].addr, cmd->sge[0].length,
456                 DMA_FROM_DEVICE);
458         if (ndev->srq)
459                 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
460         else
461                 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
463         if (unlikely(ret))
464                 pr_err("post_recv cmd failed\n");
466         return ret;
469 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
471         spin_lock(&queue->rsp_wr_wait_lock);
472         while (!list_empty(&queue->rsp_wr_wait_list)) {
473                 struct nvmet_rdma_rsp *rsp;
474                 bool ret;
476                 rsp = list_entry(queue->rsp_wr_wait_list.next,
477                                 struct nvmet_rdma_rsp, wait_list);
478                 list_del(&rsp->wait_list);
480                 spin_unlock(&queue->rsp_wr_wait_lock);
481                 ret = nvmet_rdma_execute_command(rsp);
482                 spin_lock(&queue->rsp_wr_wait_lock);
484                 if (!ret) {
485                         list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
486                         break;
487                 }
488         }
489         spin_unlock(&queue->rsp_wr_wait_lock);
493 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
495         struct nvmet_rdma_queue *queue = rsp->queue;
497         atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
499         if (rsp->n_rdma) {
500                 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
501                                 queue->cm_id->port_num, rsp->req.sg,
502                                 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
503         }
505         if (rsp->req.sg != rsp->cmd->inline_sg)
506                 sgl_free(rsp->req.sg);
508         if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
509                 nvmet_rdma_process_wr_wait_list(queue);
511         nvmet_rdma_put_rsp(rsp);
514 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
516         if (queue->nvme_sq.ctrl) {
517                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
518         } else {
519                 /*
520                  * we didn't setup the controller yet in case
521                  * of admin connect error, just disconnect and
522                  * cleanup the queue
523                  */
524                 nvmet_rdma_queue_disconnect(queue);
525         }
528 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
530         struct nvmet_rdma_rsp *rsp =
531                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
533         nvmet_rdma_release_rsp(rsp);
535         if (unlikely(wc->status != IB_WC_SUCCESS &&
536                      wc->status != IB_WC_WR_FLUSH_ERR)) {
537                 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
538                         wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
539                 nvmet_rdma_error_comp(rsp->queue);
540         }
543 static void nvmet_rdma_queue_response(struct nvmet_req *req)
545         struct nvmet_rdma_rsp *rsp =
546                 container_of(req, struct nvmet_rdma_rsp, req);
547         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
548         struct ib_send_wr *first_wr;
550         if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
551                 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
552                 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
553         } else {
554                 rsp->send_wr.opcode = IB_WR_SEND;
555         }
557         if (nvmet_rdma_need_data_out(rsp))
558                 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
559                                 cm_id->port_num, NULL, &rsp->send_wr);
560         else
561                 first_wr = &rsp->send_wr;
563         nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
565         ib_dma_sync_single_for_device(rsp->queue->dev->device,
566                 rsp->send_sge.addr, rsp->send_sge.length,
567                 DMA_TO_DEVICE);
569         if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
570                 pr_err("sending cmd response failed\n");
571                 nvmet_rdma_release_rsp(rsp);
572         }
575 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
577         struct nvmet_rdma_rsp *rsp =
578                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
579         struct nvmet_rdma_queue *queue = cq->cq_context;
581         WARN_ON(rsp->n_rdma <= 0);
582         atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
583         rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
584                         queue->cm_id->port_num, rsp->req.sg,
585                         rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
586         rsp->n_rdma = 0;
588         if (unlikely(wc->status != IB_WC_SUCCESS)) {
589                 nvmet_req_uninit(&rsp->req);
590                 nvmet_rdma_release_rsp(rsp);
591                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
592                         pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
593                                 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
594                         nvmet_rdma_error_comp(queue);
595                 }
596                 return;
597         }
599         nvmet_req_execute(&rsp->req);
602 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
603                 u64 off)
605         int sg_count = num_pages(len);
606         struct scatterlist *sg;
607         int i;
609         sg = rsp->cmd->inline_sg;
610         for (i = 0; i < sg_count; i++, sg++) {
611                 if (i < sg_count - 1)
612                         sg_unmark_end(sg);
613                 else
614                         sg_mark_end(sg);
615                 sg->offset = off;
616                 sg->length = min_t(int, len, PAGE_SIZE - off);
617                 len -= sg->length;
618                 if (!i)
619                         off = 0;
620         }
622         rsp->req.sg = rsp->cmd->inline_sg;
623         rsp->req.sg_cnt = sg_count;
626 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
628         struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
629         u64 off = le64_to_cpu(sgl->addr);
630         u32 len = le32_to_cpu(sgl->length);
632         if (!nvme_is_write(rsp->req.cmd))
633                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
635         if (off + len > rsp->queue->dev->inline_data_size) {
636                 pr_err("invalid inline data offset!\n");
637                 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
638         }
640         /* no data command? */
641         if (!len)
642                 return 0;
644         nvmet_rdma_use_inline_sg(rsp, len, off);
645         rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
646         rsp->req.transfer_len += len;
647         return 0;
650 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
651                 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
653         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
654         u64 addr = le64_to_cpu(sgl->addr);
655         u32 len = get_unaligned_le24(sgl->length);
656         u32 key = get_unaligned_le32(sgl->key);
657         int ret;
659         /* no data command? */
660         if (!len)
661                 return 0;
663         rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
664         if (!rsp->req.sg)
665                 return NVME_SC_INTERNAL;
667         ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
668                         rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
669                         nvmet_data_dir(&rsp->req));
670         if (ret < 0)
671                 return NVME_SC_INTERNAL;
672         rsp->req.transfer_len += len;
673         rsp->n_rdma += ret;
675         if (invalidate) {
676                 rsp->invalidate_rkey = key;
677                 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
678         }
680         return 0;
683 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
685         struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
687         switch (sgl->type >> 4) {
688         case NVME_SGL_FMT_DATA_DESC:
689                 switch (sgl->type & 0xf) {
690                 case NVME_SGL_FMT_OFFSET:
691                         return nvmet_rdma_map_sgl_inline(rsp);
692                 default:
693                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
694                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
695                 }
696         case NVME_KEY_SGL_FMT_DATA_DESC:
697                 switch (sgl->type & 0xf) {
698                 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
699                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
700                 case NVME_SGL_FMT_ADDRESS:
701                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
702                 default:
703                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
704                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
705                 }
706         default:
707                 pr_err("invalid SGL type: %#x\n", sgl->type);
708                 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
709         }
712 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
714         struct nvmet_rdma_queue *queue = rsp->queue;
716         if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
717                         &queue->sq_wr_avail) < 0)) {
718                 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
719                                 1 + rsp->n_rdma, queue->idx,
720                                 queue->nvme_sq.ctrl->cntlid);
721                 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
722                 return false;
723         }
725         if (nvmet_rdma_need_data_in(rsp)) {
726                 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
727                                 queue->cm_id->port_num, &rsp->read_cqe, NULL))
728                         nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
729         } else {
730                 nvmet_req_execute(&rsp->req);
731         }
733         return true;
736 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
737                 struct nvmet_rdma_rsp *cmd)
739         u16 status;
741         ib_dma_sync_single_for_cpu(queue->dev->device,
742                 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
743                 DMA_FROM_DEVICE);
744         ib_dma_sync_single_for_cpu(queue->dev->device,
745                 cmd->send_sge.addr, cmd->send_sge.length,
746                 DMA_TO_DEVICE);
748         if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
749                         &queue->nvme_sq, &nvmet_rdma_ops))
750                 return;
752         status = nvmet_rdma_map_sgl(cmd);
753         if (status)
754                 goto out_err;
756         if (unlikely(!nvmet_rdma_execute_command(cmd))) {
757                 spin_lock(&queue->rsp_wr_wait_lock);
758                 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
759                 spin_unlock(&queue->rsp_wr_wait_lock);
760         }
762         return;
764 out_err:
765         nvmet_req_complete(&cmd->req, status);
768 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
770         struct nvmet_rdma_cmd *cmd =
771                 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
772         struct nvmet_rdma_queue *queue = cq->cq_context;
773         struct nvmet_rdma_rsp *rsp;
775         if (unlikely(wc->status != IB_WC_SUCCESS)) {
776                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
777                         pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
778                                 wc->wr_cqe, ib_wc_status_msg(wc->status),
779                                 wc->status);
780                         nvmet_rdma_error_comp(queue);
781                 }
782                 return;
783         }
785         if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
786                 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
787                 nvmet_rdma_error_comp(queue);
788                 return;
789         }
791         cmd->queue = queue;
792         rsp = nvmet_rdma_get_rsp(queue);
793         if (unlikely(!rsp)) {
794                 /*
795                  * we get here only under memory pressure,
796                  * silently drop and have the host retry
797                  * as we can't even fail it.
798                  */
799                 nvmet_rdma_post_recv(queue->dev, cmd);
800                 return;
801         }
802         rsp->queue = queue;
803         rsp->cmd = cmd;
804         rsp->flags = 0;
805         rsp->req.cmd = cmd->nvme_cmd;
806         rsp->req.port = queue->port;
807         rsp->n_rdma = 0;
809         if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
810                 unsigned long flags;
812                 spin_lock_irqsave(&queue->state_lock, flags);
813                 if (queue->state == NVMET_RDMA_Q_CONNECTING)
814                         list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
815                 else
816                         nvmet_rdma_put_rsp(rsp);
817                 spin_unlock_irqrestore(&queue->state_lock, flags);
818                 return;
819         }
821         nvmet_rdma_handle_command(queue, rsp);
824 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
826         if (!ndev->srq)
827                 return;
829         nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
830         ib_destroy_srq(ndev->srq);
833 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
835         struct ib_srq_init_attr srq_attr = { NULL, };
836         struct ib_srq *srq;
837         size_t srq_size;
838         int ret, i;
840         srq_size = 4095;        /* XXX: tune */
842         srq_attr.attr.max_wr = srq_size;
843         srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
844         srq_attr.attr.srq_limit = 0;
845         srq_attr.srq_type = IB_SRQT_BASIC;
846         srq = ib_create_srq(ndev->pd, &srq_attr);
847         if (IS_ERR(srq)) {
848                 /*
849                  * If SRQs aren't supported we just go ahead and use normal
850                  * non-shared receive queues.
851                  */
852                 pr_info("SRQ requested but not supported.\n");
853                 return 0;
854         }
856         ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
857         if (IS_ERR(ndev->srq_cmds)) {
858                 ret = PTR_ERR(ndev->srq_cmds);
859                 goto out_destroy_srq;
860         }
862         ndev->srq = srq;
863         ndev->srq_size = srq_size;
865         for (i = 0; i < srq_size; i++) {
866                 ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
867                 if (ret)
868                         goto out_free_cmds;
869         }
871         return 0;
873 out_free_cmds:
874         nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
875 out_destroy_srq:
876         ib_destroy_srq(srq);
877         return ret;
880 static void nvmet_rdma_free_dev(struct kref *ref)
882         struct nvmet_rdma_device *ndev =
883                 container_of(ref, struct nvmet_rdma_device, ref);
885         mutex_lock(&device_list_mutex);
886         list_del(&ndev->entry);
887         mutex_unlock(&device_list_mutex);
889         nvmet_rdma_destroy_srq(ndev);
890         ib_dealloc_pd(ndev->pd);
892         kfree(ndev);
895 static struct nvmet_rdma_device *
896 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
898         struct nvmet_port *port = cm_id->context;
899         struct nvmet_rdma_device *ndev;
900         int inline_page_count;
901         int inline_sge_count;
902         int ret;
904         mutex_lock(&device_list_mutex);
905         list_for_each_entry(ndev, &device_list, entry) {
906                 if (ndev->device->node_guid == cm_id->device->node_guid &&
907                     kref_get_unless_zero(&ndev->ref))
908                         goto out_unlock;
909         }
911         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
912         if (!ndev)
913                 goto out_err;
915         inline_page_count = num_pages(port->inline_data_size);
916         inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
917                                 cm_id->device->attrs.max_recv_sge) - 1;
918         if (inline_page_count > inline_sge_count) {
919                 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
920                         port->inline_data_size, cm_id->device->name,
921                         inline_sge_count * PAGE_SIZE);
922                 port->inline_data_size = inline_sge_count * PAGE_SIZE;
923                 inline_page_count = inline_sge_count;
924         }
925         ndev->inline_data_size = port->inline_data_size;
926         ndev->inline_page_count = inline_page_count;
927         ndev->device = cm_id->device;
928         kref_init(&ndev->ref);
930         ndev->pd = ib_alloc_pd(ndev->device, 0);
931         if (IS_ERR(ndev->pd))
932                 goto out_free_dev;
934         if (nvmet_rdma_use_srq) {
935                 ret = nvmet_rdma_init_srq(ndev);
936                 if (ret)
937                         goto out_free_pd;
938         }
940         list_add(&ndev->entry, &device_list);
941 out_unlock:
942         mutex_unlock(&device_list_mutex);
943         pr_debug("added %s.\n", ndev->device->name);
944         return ndev;
946 out_free_pd:
947         ib_dealloc_pd(ndev->pd);
948 out_free_dev:
949         kfree(ndev);
950 out_err:
951         mutex_unlock(&device_list_mutex);
952         return NULL;
955 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
957         struct ib_qp_init_attr qp_attr;
958         struct nvmet_rdma_device *ndev = queue->dev;
959         int comp_vector, nr_cqe, ret, i;
961         /*
962          * Spread the io queues across completion vectors,
963          * but still keep all admin queues on vector 0.
964          */
965         comp_vector = !queue->host_qid ? 0 :
966                 queue->idx % ndev->device->num_comp_vectors;
968         /*
969          * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
970          */
971         nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
973         queue->cq = ib_alloc_cq(ndev->device, queue,
974                         nr_cqe + 1, comp_vector,
975                         IB_POLL_WORKQUEUE);
976         if (IS_ERR(queue->cq)) {
977                 ret = PTR_ERR(queue->cq);
978                 pr_err("failed to create CQ cqe= %d ret= %d\n",
979                        nr_cqe + 1, ret);
980                 goto out;
981         }
983         memset(&qp_attr, 0, sizeof(qp_attr));
984         qp_attr.qp_context = queue;
985         qp_attr.event_handler = nvmet_rdma_qp_event;
986         qp_attr.send_cq = queue->cq;
987         qp_attr.recv_cq = queue->cq;
988         qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
989         qp_attr.qp_type = IB_QPT_RC;
990         /* +1 for drain */
991         qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
992         qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
993         qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
994                                         ndev->device->attrs.max_send_sge);
996         if (ndev->srq) {
997                 qp_attr.srq = ndev->srq;
998         } else {
999                 /* +1 for drain */
1000                 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1001                 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1002         }
1004         ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1005         if (ret) {
1006                 pr_err("failed to create_qp ret= %d\n", ret);
1007                 goto err_destroy_cq;
1008         }
1010         atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1012         pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1013                  __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1014                  qp_attr.cap.max_send_wr, queue->cm_id);
1016         if (!ndev->srq) {
1017                 for (i = 0; i < queue->recv_queue_size; i++) {
1018                         queue->cmds[i].queue = queue;
1019                         ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1020                         if (ret)
1021                                 goto err_destroy_qp;
1022                 }
1023         }
1025 out:
1026         return ret;
1028 err_destroy_qp:
1029         rdma_destroy_qp(queue->cm_id);
1030 err_destroy_cq:
1031         ib_free_cq(queue->cq);
1032         goto out;
1035 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1037         struct ib_qp *qp = queue->cm_id->qp;
1039         ib_drain_qp(qp);
1040         rdma_destroy_id(queue->cm_id);
1041         ib_destroy_qp(qp);
1042         ib_free_cq(queue->cq);
1045 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1047         pr_debug("freeing queue %d\n", queue->idx);
1049         nvmet_sq_destroy(&queue->nvme_sq);
1051         nvmet_rdma_destroy_queue_ib(queue);
1052         if (!queue->dev->srq) {
1053                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1054                                 queue->recv_queue_size,
1055                                 !queue->host_qid);
1056         }
1057         nvmet_rdma_free_rsps(queue);
1058         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1059         kfree(queue);
1062 static void nvmet_rdma_release_queue_work(struct work_struct *w)
1064         struct nvmet_rdma_queue *queue =
1065                 container_of(w, struct nvmet_rdma_queue, release_work);
1066         struct nvmet_rdma_device *dev = queue->dev;
1068         nvmet_rdma_free_queue(queue);
1070         kref_put(&dev->ref, nvmet_rdma_free_dev);
1073 static int
1074 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1075                                 struct nvmet_rdma_queue *queue)
1077         struct nvme_rdma_cm_req *req;
1079         req = (struct nvme_rdma_cm_req *)conn->private_data;
1080         if (!req || conn->private_data_len == 0)
1081                 return NVME_RDMA_CM_INVALID_LEN;
1083         if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1084                 return NVME_RDMA_CM_INVALID_RECFMT;
1086         queue->host_qid = le16_to_cpu(req->qid);
1088         /*
1089          * req->hsqsize corresponds to our recv queue size plus 1
1090          * req->hrqsize corresponds to our send queue size
1091          */
1092         queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1093         queue->send_queue_size = le16_to_cpu(req->hrqsize);
1095         if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1096                 return NVME_RDMA_CM_INVALID_HSQSIZE;
1098         /* XXX: Should we enforce some kind of max for IO queues? */
1100         return 0;
1103 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1104                                 enum nvme_rdma_cm_status status)
1106         struct nvme_rdma_cm_rej rej;
1108         pr_debug("rejecting connect request: status %d (%s)\n",
1109                  status, nvme_rdma_cm_msg(status));
1111         rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1112         rej.sts = cpu_to_le16(status);
1114         return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1117 static struct nvmet_rdma_queue *
1118 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1119                 struct rdma_cm_id *cm_id,
1120                 struct rdma_cm_event *event)
1122         struct nvmet_rdma_queue *queue;
1123         int ret;
1125         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1126         if (!queue) {
1127                 ret = NVME_RDMA_CM_NO_RSC;
1128                 goto out_reject;
1129         }
1131         ret = nvmet_sq_init(&queue->nvme_sq);
1132         if (ret) {
1133                 ret = NVME_RDMA_CM_NO_RSC;
1134                 goto out_free_queue;
1135         }
1137         ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1138         if (ret)
1139                 goto out_destroy_sq;
1141         /*
1142          * Schedules the actual release because calling rdma_destroy_id from
1143          * inside a CM callback would trigger a deadlock. (great API design..)
1144          */
1145         INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1146         queue->dev = ndev;
1147         queue->cm_id = cm_id;
1149         spin_lock_init(&queue->state_lock);
1150         queue->state = NVMET_RDMA_Q_CONNECTING;
1151         INIT_LIST_HEAD(&queue->rsp_wait_list);
1152         INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1153         spin_lock_init(&queue->rsp_wr_wait_lock);
1154         INIT_LIST_HEAD(&queue->free_rsps);
1155         spin_lock_init(&queue->rsps_lock);
1156         INIT_LIST_HEAD(&queue->queue_list);
1158         queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1159         if (queue->idx < 0) {
1160                 ret = NVME_RDMA_CM_NO_RSC;
1161                 goto out_destroy_sq;
1162         }
1164         ret = nvmet_rdma_alloc_rsps(queue);
1165         if (ret) {
1166                 ret = NVME_RDMA_CM_NO_RSC;
1167                 goto out_ida_remove;
1168         }
1170         if (!ndev->srq) {
1171                 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1172                                 queue->recv_queue_size,
1173                                 !queue->host_qid);
1174                 if (IS_ERR(queue->cmds)) {
1175                         ret = NVME_RDMA_CM_NO_RSC;
1176                         goto out_free_responses;
1177                 }
1178         }
1180         ret = nvmet_rdma_create_queue_ib(queue);
1181         if (ret) {
1182                 pr_err("%s: creating RDMA queue failed (%d).\n",
1183                         __func__, ret);
1184                 ret = NVME_RDMA_CM_NO_RSC;
1185                 goto out_free_cmds;
1186         }
1188         return queue;
1190 out_free_cmds:
1191         if (!ndev->srq) {
1192                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1193                                 queue->recv_queue_size,
1194                                 !queue->host_qid);
1195         }
1196 out_free_responses:
1197         nvmet_rdma_free_rsps(queue);
1198 out_ida_remove:
1199         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1200 out_destroy_sq:
1201         nvmet_sq_destroy(&queue->nvme_sq);
1202 out_free_queue:
1203         kfree(queue);
1204 out_reject:
1205         nvmet_rdma_cm_reject(cm_id, ret);
1206         return NULL;
1209 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1211         struct nvmet_rdma_queue *queue = priv;
1213         switch (event->event) {
1214         case IB_EVENT_COMM_EST:
1215                 rdma_notify(queue->cm_id, event->event);
1216                 break;
1217         default:
1218                 pr_err("received IB QP event: %s (%d)\n",
1219                        ib_event_msg(event->event), event->event);
1220                 break;
1221         }
1224 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1225                 struct nvmet_rdma_queue *queue,
1226                 struct rdma_conn_param *p)
1228         struct rdma_conn_param  param = { };
1229         struct nvme_rdma_cm_rep priv = { };
1230         int ret = -ENOMEM;
1232         param.rnr_retry_count = 7;
1233         param.flow_control = 1;
1234         param.initiator_depth = min_t(u8, p->initiator_depth,
1235                 queue->dev->device->attrs.max_qp_init_rd_atom);
1236         param.private_data = &priv;
1237         param.private_data_len = sizeof(priv);
1238         priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1239         priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1241         ret = rdma_accept(cm_id, &param);
1242         if (ret)
1243                 pr_err("rdma_accept failed (error code = %d)\n", ret);
1245         return ret;
1248 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1249                 struct rdma_cm_event *event)
1251         struct nvmet_rdma_device *ndev;
1252         struct nvmet_rdma_queue *queue;
1253         int ret = -EINVAL;
1255         ndev = nvmet_rdma_find_get_device(cm_id);
1256         if (!ndev) {
1257                 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1258                 return -ECONNREFUSED;
1259         }
1261         queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1262         if (!queue) {
1263                 ret = -ENOMEM;
1264                 goto put_device;
1265         }
1266         queue->port = cm_id->context;
1268         if (queue->host_qid == 0) {
1269                 /* Let inflight controller teardown complete */
1270                 flush_scheduled_work();
1271         }
1273         ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1274         if (ret) {
1275                 schedule_work(&queue->release_work);
1276                 /* Destroying rdma_cm id is not needed here */
1277                 return 0;
1278         }
1280         mutex_lock(&nvmet_rdma_queue_mutex);
1281         list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1282         mutex_unlock(&nvmet_rdma_queue_mutex);
1284         return 0;
1286 put_device:
1287         kref_put(&ndev->ref, nvmet_rdma_free_dev);
1289         return ret;
1292 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1294         unsigned long flags;
1296         spin_lock_irqsave(&queue->state_lock, flags);
1297         if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1298                 pr_warn("trying to establish a connected queue\n");
1299                 goto out_unlock;
1300         }
1301         queue->state = NVMET_RDMA_Q_LIVE;
1303         while (!list_empty(&queue->rsp_wait_list)) {
1304                 struct nvmet_rdma_rsp *cmd;
1306                 cmd = list_first_entry(&queue->rsp_wait_list,
1307                                         struct nvmet_rdma_rsp, wait_list);
1308                 list_del(&cmd->wait_list);
1310                 spin_unlock_irqrestore(&queue->state_lock, flags);
1311                 nvmet_rdma_handle_command(queue, cmd);
1312                 spin_lock_irqsave(&queue->state_lock, flags);
1313         }
1315 out_unlock:
1316         spin_unlock_irqrestore(&queue->state_lock, flags);
1319 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1321         bool disconnect = false;
1322         unsigned long flags;
1324         pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1326         spin_lock_irqsave(&queue->state_lock, flags);
1327         switch (queue->state) {
1328         case NVMET_RDMA_Q_CONNECTING:
1329         case NVMET_RDMA_Q_LIVE:
1330                 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1331                 disconnect = true;
1332                 break;
1333         case NVMET_RDMA_Q_DISCONNECTING:
1334                 break;
1335         }
1336         spin_unlock_irqrestore(&queue->state_lock, flags);
1338         if (disconnect) {
1339                 rdma_disconnect(queue->cm_id);
1340                 schedule_work(&queue->release_work);
1341         }
1344 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1346         bool disconnect = false;
1348         mutex_lock(&nvmet_rdma_queue_mutex);
1349         if (!list_empty(&queue->queue_list)) {
1350                 list_del_init(&queue->queue_list);
1351                 disconnect = true;
1352         }
1353         mutex_unlock(&nvmet_rdma_queue_mutex);
1355         if (disconnect)
1356                 __nvmet_rdma_queue_disconnect(queue);
1359 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1360                 struct nvmet_rdma_queue *queue)
1362         WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1364         mutex_lock(&nvmet_rdma_queue_mutex);
1365         if (!list_empty(&queue->queue_list))
1366                 list_del_init(&queue->queue_list);
1367         mutex_unlock(&nvmet_rdma_queue_mutex);
1369         pr_err("failed to connect queue %d\n", queue->idx);
1370         schedule_work(&queue->release_work);
1373 /**
1374  * nvme_rdma_device_removal() - Handle RDMA device removal
1375  * @cm_id:      rdma_cm id, used for nvmet port
1376  * @queue:      nvmet rdma queue (cm id qp_context)
1377  *
1378  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1379  * to unplug. Note that this event can be generated on a normal
1380  * queue cm_id and/or a device bound listener cm_id (where in this
1381  * case queue will be null).
1382  *
1383  * We registered an ib_client to handle device removal for queues,
1384  * so we only need to handle the listening port cm_ids. In this case
1385  * we nullify the priv to prevent double cm_id destruction and destroying
1386  * the cm_id implicitely by returning a non-zero rc to the callout.
1387  */
1388 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1389                 struct nvmet_rdma_queue *queue)
1391         struct nvmet_port *port;
1393         if (queue) {
1394                 /*
1395                  * This is a queue cm_id. we have registered
1396                  * an ib_client to handle queues removal
1397                  * so don't interfear and just return.
1398                  */
1399                 return 0;
1400         }
1402         port = cm_id->context;
1404         /*
1405          * This is a listener cm_id. Make sure that
1406          * future remove_port won't invoke a double
1407          * cm_id destroy. use atomic xchg to make sure
1408          * we don't compete with remove_port.
1409          */
1410         if (xchg(&port->priv, NULL) != cm_id)
1411                 return 0;
1413         /*
1414          * We need to return 1 so that the core will destroy
1415          * it's own ID.  What a great API design..
1416          */
1417         return 1;
1420 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1421                 struct rdma_cm_event *event)
1423         struct nvmet_rdma_queue *queue = NULL;
1424         int ret = 0;
1426         if (cm_id->qp)
1427                 queue = cm_id->qp->qp_context;
1429         pr_debug("%s (%d): status %d id %p\n",
1430                 rdma_event_msg(event->event), event->event,
1431                 event->status, cm_id);
1433         switch (event->event) {
1434         case RDMA_CM_EVENT_CONNECT_REQUEST:
1435                 ret = nvmet_rdma_queue_connect(cm_id, event);
1436                 break;
1437         case RDMA_CM_EVENT_ESTABLISHED:
1438                 nvmet_rdma_queue_established(queue);
1439                 break;
1440         case RDMA_CM_EVENT_ADDR_CHANGE:
1441         case RDMA_CM_EVENT_DISCONNECTED:
1442         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1443                 nvmet_rdma_queue_disconnect(queue);
1444                 break;
1445         case RDMA_CM_EVENT_DEVICE_REMOVAL:
1446                 ret = nvmet_rdma_device_removal(cm_id, queue);
1447                 break;
1448         case RDMA_CM_EVENT_REJECTED:
1449                 pr_debug("Connection rejected: %s\n",
1450                          rdma_reject_msg(cm_id, event->status));
1451                 /* FALLTHROUGH */
1452         case RDMA_CM_EVENT_UNREACHABLE:
1453         case RDMA_CM_EVENT_CONNECT_ERROR:
1454                 nvmet_rdma_queue_connect_fail(cm_id, queue);
1455                 break;
1456         default:
1457                 pr_err("received unrecognized RDMA CM event %d\n",
1458                         event->event);
1459                 break;
1460         }
1462         return ret;
1465 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1467         struct nvmet_rdma_queue *queue;
1469 restart:
1470         mutex_lock(&nvmet_rdma_queue_mutex);
1471         list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1472                 if (queue->nvme_sq.ctrl == ctrl) {
1473                         list_del_init(&queue->queue_list);
1474                         mutex_unlock(&nvmet_rdma_queue_mutex);
1476                         __nvmet_rdma_queue_disconnect(queue);
1477                         goto restart;
1478                 }
1479         }
1480         mutex_unlock(&nvmet_rdma_queue_mutex);
1483 static int nvmet_rdma_add_port(struct nvmet_port *port)
1485         struct rdma_cm_id *cm_id;
1486         struct sockaddr_storage addr = { };
1487         __kernel_sa_family_t af;
1488         int ret;
1490         switch (port->disc_addr.adrfam) {
1491         case NVMF_ADDR_FAMILY_IP4:
1492                 af = AF_INET;
1493                 break;
1494         case NVMF_ADDR_FAMILY_IP6:
1495                 af = AF_INET6;
1496                 break;
1497         default:
1498                 pr_err("address family %d not supported\n",
1499                                 port->disc_addr.adrfam);
1500                 return -EINVAL;
1501         }
1503         if (port->inline_data_size < 0) {
1504                 port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1505         } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1506                 pr_warn("inline_data_size %u is too large, reducing to %u\n",
1507                         port->inline_data_size,
1508                         NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1509                 port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1510         }
1512         ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1513                         port->disc_addr.trsvcid, &addr);
1514         if (ret) {
1515                 pr_err("malformed ip/port passed: %s:%s\n",
1516                         port->disc_addr.traddr, port->disc_addr.trsvcid);
1517                 return ret;
1518         }
1520         cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1521                         RDMA_PS_TCP, IB_QPT_RC);
1522         if (IS_ERR(cm_id)) {
1523                 pr_err("CM ID creation failed\n");
1524                 return PTR_ERR(cm_id);
1525         }
1527         /*
1528          * Allow both IPv4 and IPv6 sockets to bind a single port
1529          * at the same time.
1530          */
1531         ret = rdma_set_afonly(cm_id, 1);
1532         if (ret) {
1533                 pr_err("rdma_set_afonly failed (%d)\n", ret);
1534                 goto out_destroy_id;
1535         }
1537         ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1538         if (ret) {
1539                 pr_err("binding CM ID to %pISpcs failed (%d)\n",
1540                         (struct sockaddr *)&addr, ret);
1541                 goto out_destroy_id;
1542         }
1544         ret = rdma_listen(cm_id, 128);
1545         if (ret) {
1546                 pr_err("listening to %pISpcs failed (%d)\n",
1547                         (struct sockaddr *)&addr, ret);
1548                 goto out_destroy_id;
1549         }
1551         pr_info("enabling port %d (%pISpcs)\n",
1552                 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1553         port->priv = cm_id;
1554         return 0;
1556 out_destroy_id:
1557         rdma_destroy_id(cm_id);
1558         return ret;
1561 static void nvmet_rdma_remove_port(struct nvmet_port *port)
1563         struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1565         if (cm_id)
1566                 rdma_destroy_id(cm_id);
1569 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1570                 struct nvmet_port *port, char *traddr)
1572         struct rdma_cm_id *cm_id = port->priv;
1574         if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1575                 struct nvmet_rdma_rsp *rsp =
1576                         container_of(req, struct nvmet_rdma_rsp, req);
1577                 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
1578                 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
1580                 sprintf(traddr, "%pISc", addr);
1581         } else {
1582                 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
1583         }
1586 static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1587         .owner                  = THIS_MODULE,
1588         .type                   = NVMF_TRTYPE_RDMA,
1589         .msdbd                  = 1,
1590         .has_keyed_sgls         = 1,
1591         .add_port               = nvmet_rdma_add_port,
1592         .remove_port            = nvmet_rdma_remove_port,
1593         .queue_response         = nvmet_rdma_queue_response,
1594         .delete_ctrl            = nvmet_rdma_delete_ctrl,
1595         .disc_traddr            = nvmet_rdma_disc_port_addr,
1596 };
1598 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1600         struct nvmet_rdma_queue *queue, *tmp;
1601         struct nvmet_rdma_device *ndev;
1602         bool found = false;
1604         mutex_lock(&device_list_mutex);
1605         list_for_each_entry(ndev, &device_list, entry) {
1606                 if (ndev->device == ib_device) {
1607                         found = true;
1608                         break;
1609                 }
1610         }
1611         mutex_unlock(&device_list_mutex);
1613         if (!found)
1614                 return;
1616         /*
1617          * IB Device that is used by nvmet controllers is being removed,
1618          * delete all queues using this device.
1619          */
1620         mutex_lock(&nvmet_rdma_queue_mutex);
1621         list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1622                                  queue_list) {
1623                 if (queue->dev->device != ib_device)
1624                         continue;
1626                 pr_info("Removing queue %d\n", queue->idx);
1627                 list_del_init(&queue->queue_list);
1628                 __nvmet_rdma_queue_disconnect(queue);
1629         }
1630         mutex_unlock(&nvmet_rdma_queue_mutex);
1632         flush_scheduled_work();
1635 static struct ib_client nvmet_rdma_ib_client = {
1636         .name   = "nvmet_rdma",
1637         .remove = nvmet_rdma_remove_one
1638 };
1640 static int __init nvmet_rdma_init(void)
1642         int ret;
1644         ret = ib_register_client(&nvmet_rdma_ib_client);
1645         if (ret)
1646                 return ret;
1648         ret = nvmet_register_transport(&nvmet_rdma_ops);
1649         if (ret)
1650                 goto err_ib_client;
1652         return 0;
1654 err_ib_client:
1655         ib_unregister_client(&nvmet_rdma_ib_client);
1656         return ret;
1659 static void __exit nvmet_rdma_exit(void)
1661         nvmet_unregister_transport(&nvmet_rdma_ops);
1662         ib_unregister_client(&nvmet_rdma_ib_client);
1663         WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
1664         ida_destroy(&nvmet_rdma_queue_ida);
1667 module_init(nvmet_rdma_init);
1668 module_exit(nvmet_rdma_exit);
1670 MODULE_LICENSE("GPL v2");
1671 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */