]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - rpmsg/rpmsg.git/blob - drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
Merge tag 'devicetree-fixes-for-4.19-3' of git://git.kernel.org/pub/scm/linux/kernel...
[rpmsg/rpmsg.git] / drivers / scsi / ibmvscsi_tgt / ibmvscsi_tgt.c
1 /*******************************************************************************
2  * IBM Virtual SCSI Target Driver
3  * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4  *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
5  *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
6  *
7  * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
8  * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
11  * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  ****************************************************************************/
25 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31 #include <linux/list.h>
32 #include <linux/string.h>
33 #include <linux/delay.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
38 #include <asm/hvcall.h>
39 #include <asm/vio.h>
41 #include <scsi/viosrp.h>
43 #include "ibmvscsi_tgt.h"
45 #define IBMVSCSIS_VERSION       "v0.2"
47 #define INITIAL_SRP_LIMIT       800
48 #define DEFAULT_MAX_SECTORS     256
49 #define MAX_TXU                 1024 * 1024
51 static uint max_vdma_size = MAX_H_COPY_RDMA;
53 static char system_id[SYS_ID_NAME_LEN] = "";
54 static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
55 static uint partition_number = -1;
57 /* Adapter list and lock to control it */
58 static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
59 static LIST_HEAD(ibmvscsis_dev_list);
61 static long ibmvscsis_parse_command(struct scsi_info *vscsi,
62                                     struct viosrp_crq *crq);
64 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
66 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
67                                       struct srp_rsp *rsp)
68 {
69         u32 residual_count = se_cmd->residual_count;
71         if (!residual_count)
72                 return;
74         if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
75                 if (se_cmd->data_direction == DMA_TO_DEVICE) {
76                         /* residual data from an underflow write */
77                         rsp->flags = SRP_RSP_FLAG_DOUNDER;
78                         rsp->data_out_res_cnt = cpu_to_be32(residual_count);
79                 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
80                         /* residual data from an underflow read */
81                         rsp->flags = SRP_RSP_FLAG_DIUNDER;
82                         rsp->data_in_res_cnt = cpu_to_be32(residual_count);
83                 }
84         } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
85                 if (se_cmd->data_direction == DMA_TO_DEVICE) {
86                         /* residual data from an overflow write */
87                         rsp->flags = SRP_RSP_FLAG_DOOVER;
88                         rsp->data_out_res_cnt = cpu_to_be32(residual_count);
89                 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
90                         /* residual data from an overflow read */
91                         rsp->flags = SRP_RSP_FLAG_DIOVER;
92                         rsp->data_in_res_cnt = cpu_to_be32(residual_count);
93                 }
94         }
95 }
97 /**
98  * connection_broken() - Determine if the connection to the client is good
99  * @vscsi:      Pointer to our adapter structure
100  *
101  * This function attempts to send a ping MAD to the client. If the call to
102  * queue the request returns H_CLOSED then the connection has been broken
103  * and the function returns TRUE.
104  *
105  * EXECUTION ENVIRONMENT:
106  *      Interrupt or Process environment
107  */
108 static bool connection_broken(struct scsi_info *vscsi)
110         struct viosrp_crq *crq;
111         u64 buffer[2] = { 0, 0 };
112         long h_return_code;
113         bool rc = false;
115         /* create a PING crq */
116         crq = (struct viosrp_crq *)&buffer;
117         crq->valid = VALID_CMD_RESP_EL;
118         crq->format = MESSAGE_IN_CRQ;
119         crq->status = PING;
121         h_return_code = h_send_crq(vscsi->dds.unit_id,
122                                    cpu_to_be64(buffer[MSG_HI]),
123                                    cpu_to_be64(buffer[MSG_LOW]));
125         dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
127         if (h_return_code == H_CLOSED)
128                 rc = true;
130         return rc;
133 /**
134  * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
135  * @vscsi:      Pointer to our adapter structure
136  *
137  * This function calls h_free_q then frees the interrupt bit etc.
138  * It must release the lock before doing so because of the time it can take
139  * for h_free_crq in PHYP
140  * NOTE: the caller must make sure that state and or flags will prevent
141  *       interrupt handler from scheduling work.
142  * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
143  *       we can't do it here, because we don't have the lock
144  *
145  * EXECUTION ENVIRONMENT:
146  *      Process level
147  */
148 static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
150         long qrc;
151         long rc = ADAPT_SUCCESS;
152         int ticks = 0;
154         do {
155                 qrc = h_free_crq(vscsi->dds.unit_id);
156                 switch (qrc) {
157                 case H_SUCCESS:
158                         spin_lock_bh(&vscsi->intr_lock);
159                         vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
160                         spin_unlock_bh(&vscsi->intr_lock);
161                         break;
163                 case H_HARDWARE:
164                 case H_PARAMETER:
165                         dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
166                                 qrc);
167                         rc = ERROR;
168                         break;
170                 case H_BUSY:
171                 case H_LONG_BUSY_ORDER_1_MSEC:
172                         /* msleep not good for small values */
173                         usleep_range(1000, 2000);
174                         ticks += 1;
175                         break;
176                 case H_LONG_BUSY_ORDER_10_MSEC:
177                         usleep_range(10000, 20000);
178                         ticks += 10;
179                         break;
180                 case H_LONG_BUSY_ORDER_100_MSEC:
181                         msleep(100);
182                         ticks += 100;
183                         break;
184                 case H_LONG_BUSY_ORDER_1_SEC:
185                         ssleep(1);
186                         ticks += 1000;
187                         break;
188                 case H_LONG_BUSY_ORDER_10_SEC:
189                         ssleep(10);
190                         ticks += 10000;
191                         break;
192                 case H_LONG_BUSY_ORDER_100_SEC:
193                         ssleep(100);
194                         ticks += 100000;
195                         break;
196                 default:
197                         dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
198                                 qrc);
199                         rc = ERROR;
200                         break;
201                 }
203                 /*
204                  * dont wait more then 300 seconds
205                  * ticks are in milliseconds more or less
206                  */
207                 if (ticks > 300000 && qrc != H_SUCCESS) {
208                         rc = ERROR;
209                         dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
210                 }
211         } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
213         dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
215         return rc;
218 /**
219  * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
220  * @vscsi:      Pointer to our adapter structure
221  * @client_closed:      True if client closed its queue
222  *
223  * Deletes information specific to the client when the client goes away
224  *
225  * EXECUTION ENVIRONMENT:
226  *      Interrupt or Process
227  */
228 static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
229                                          bool client_closed)
231         vscsi->client_cap = 0;
233         /*
234          * Some things we don't want to clear if we're closing the queue,
235          * because some clients don't resend the host handshake when they
236          * get a transport event.
237          */
238         if (client_closed)
239                 vscsi->client_data.os_type = 0;
242 /**
243  * ibmvscsis_free_command_q() - Free Command Queue
244  * @vscsi:      Pointer to our adapter structure
245  *
246  * This function calls unregister_command_q, then clears interrupts and
247  * any pending interrupt acknowledgments associated with the command q.
248  * It also clears memory if there is no error.
249  *
250  * PHYP did not meet the PAPR architecture so that we must give up the
251  * lock. This causes a timing hole regarding state change.  To close the
252  * hole this routine does accounting on any change that occurred during
253  * the time the lock is not held.
254  * NOTE: must give up and then acquire the interrupt lock, the caller must
255  *       make sure that state and or flags will prevent interrupt handler from
256  *       scheduling work.
257  *
258  * EXECUTION ENVIRONMENT:
259  *      Process level, interrupt lock is held
260  */
261 static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
263         int bytes;
264         u32 flags_under_lock;
265         u16 state_under_lock;
266         long rc = ADAPT_SUCCESS;
268         if (!(vscsi->flags & CRQ_CLOSED)) {
269                 vio_disable_interrupts(vscsi->dma_dev);
271                 state_under_lock = vscsi->new_state;
272                 flags_under_lock = vscsi->flags;
273                 vscsi->phyp_acr_state = 0;
274                 vscsi->phyp_acr_flags = 0;
276                 spin_unlock_bh(&vscsi->intr_lock);
277                 rc = ibmvscsis_unregister_command_q(vscsi);
278                 spin_lock_bh(&vscsi->intr_lock);
280                 if (state_under_lock != vscsi->new_state)
281                         vscsi->phyp_acr_state = vscsi->new_state;
283                 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
285                 if (rc == ADAPT_SUCCESS) {
286                         bytes = vscsi->cmd_q.size * PAGE_SIZE;
287                         memset(vscsi->cmd_q.base_addr, 0, bytes);
288                         vscsi->cmd_q.index = 0;
289                         vscsi->flags |= CRQ_CLOSED;
291                         ibmvscsis_delete_client_info(vscsi, false);
292                 }
294                 dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
295                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
296                         vscsi->phyp_acr_state);
297         }
298         return rc;
301 /**
302  * ibmvscsis_cmd_q_dequeue() - Get valid Command element
303  * @mask:       Mask to use in case index wraps
304  * @current_index:      Current index into command queue
305  * @base_addr:  Pointer to start of command queue
306  *
307  * Returns a pointer to a valid command element or NULL, if the command
308  * queue is empty
309  *
310  * EXECUTION ENVIRONMENT:
311  *      Interrupt environment, interrupt lock held
312  */
313 static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
314                                                   uint *current_index,
315                                                   struct viosrp_crq *base_addr)
317         struct viosrp_crq *ptr;
319         ptr = base_addr + *current_index;
321         if (ptr->valid) {
322                 *current_index = (*current_index + 1) & mask;
323                 dma_rmb();
324         } else {
325                 ptr = NULL;
326         }
328         return ptr;
331 /**
332  * ibmvscsis_send_init_message() - send initialize message to the client
333  * @vscsi:      Pointer to our adapter structure
334  * @format:     Which Init Message format to send
335  *
336  * EXECUTION ENVIRONMENT:
337  *      Interrupt environment interrupt lock held
338  */
339 static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
341         struct viosrp_crq *crq;
342         u64 buffer[2] = { 0, 0 };
343         long rc;
345         crq = (struct viosrp_crq *)&buffer;
346         crq->valid = VALID_INIT_MSG;
347         crq->format = format;
348         rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
349                         cpu_to_be64(buffer[MSG_LOW]));
351         return rc;
354 /**
355  * ibmvscsis_check_init_msg() - Check init message valid
356  * @vscsi:      Pointer to our adapter structure
357  * @format:     Pointer to return format of Init Message, if any.
358  *              Set to UNUSED_FORMAT if no Init Message in queue.
359  *
360  * Checks if an initialize message was queued by the initiatior
361  * after the queue was created and before the interrupt was enabled.
362  *
363  * EXECUTION ENVIRONMENT:
364  *      Process level only, interrupt lock held
365  */
366 static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
368         struct viosrp_crq *crq;
369         long rc = ADAPT_SUCCESS;
371         crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
372                                       vscsi->cmd_q.base_addr);
373         if (!crq) {
374                 *format = (uint)UNUSED_FORMAT;
375         } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
376                 *format = (uint)INIT_MSG;
377                 crq->valid = INVALIDATE_CMD_RESP_EL;
378                 dma_rmb();
380                 /*
381                  * the caller has ensured no initialize message was
382                  * sent after the queue was
383                  * created so there should be no other message on the queue.
384                  */
385                 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
386                                               &vscsi->cmd_q.index,
387                                               vscsi->cmd_q.base_addr);
388                 if (crq) {
389                         *format = (uint)(crq->format);
390                         rc = ERROR;
391                         crq->valid = INVALIDATE_CMD_RESP_EL;
392                         dma_rmb();
393                 }
394         } else {
395                 *format = (uint)(crq->format);
396                 rc = ERROR;
397                 crq->valid = INVALIDATE_CMD_RESP_EL;
398                 dma_rmb();
399         }
401         return rc;
404 /**
405  * ibmvscsis_disconnect() - Helper function to disconnect
406  * @work:       Pointer to work_struct, gives access to our adapter structure
407  *
408  * An error has occurred or the driver received a Transport event,
409  * and the driver is requesting that the command queue be de-registered
410  * in a safe manner. If there is no outstanding I/O then we can stop the
411  * queue. If we are restarting the queue it will be reflected in the
412  * the state of the adapter.
413  *
414  * EXECUTION ENVIRONMENT:
415  *      Process environment
416  */
417 static void ibmvscsis_disconnect(struct work_struct *work)
419         struct scsi_info *vscsi = container_of(work, struct scsi_info,
420                                                proc_work);
421         u16 new_state;
422         bool wait_idle = false;
424         spin_lock_bh(&vscsi->intr_lock);
425         new_state = vscsi->new_state;
426         vscsi->new_state = 0;
428         vscsi->flags |= DISCONNECT_SCHEDULED;
429         vscsi->flags &= ~SCHEDULE_DISCONNECT;
431         dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
432                 vscsi->flags, vscsi->state);
434         /*
435          * check which state we are in and see if we
436          * should transitition to the new state
437          */
438         switch (vscsi->state) {
439         /* Should never be called while in this state. */
440         case NO_QUEUE:
441         /*
442          * Can never transition from this state;
443          * igonore errors and logout.
444          */
445         case UNCONFIGURING:
446                 break;
448         /* can transition from this state to UNCONFIGURING */
449         case ERR_DISCONNECT:
450                 if (new_state == UNCONFIGURING)
451                         vscsi->state = new_state;
452                 break;
454         /*
455          * Can transition from this state to to unconfiguring
456          * or err disconnect.
457          */
458         case ERR_DISCONNECT_RECONNECT:
459                 switch (new_state) {
460                 case UNCONFIGURING:
461                 case ERR_DISCONNECT:
462                         vscsi->state = new_state;
463                         break;
465                 case WAIT_IDLE:
466                         break;
467                 default:
468                         break;
469                 }
470                 break;
472         /* can transition from this state to UNCONFIGURING */
473         case ERR_DISCONNECTED:
474                 if (new_state == UNCONFIGURING)
475                         vscsi->state = new_state;
476                 break;
478         case WAIT_ENABLED:
479                 switch (new_state) {
480                 case UNCONFIGURING:
481                         vscsi->state = new_state;
482                         vscsi->flags |= RESPONSE_Q_DOWN;
483                         vscsi->flags &= ~(SCHEDULE_DISCONNECT |
484                                           DISCONNECT_SCHEDULED);
485                         dma_rmb();
486                         if (vscsi->flags & CFG_SLEEPING) {
487                                 vscsi->flags &= ~CFG_SLEEPING;
488                                 complete(&vscsi->unconfig);
489                         }
490                         break;
492                 /* should never happen */
493                 case ERR_DISCONNECT:
494                 case ERR_DISCONNECT_RECONNECT:
495                 case WAIT_IDLE:
496                         dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
497                                 vscsi->state);
498                         break;
499                 }
500                 break;
502         case WAIT_IDLE:
503                 switch (new_state) {
504                 case UNCONFIGURING:
505                         vscsi->flags |= RESPONSE_Q_DOWN;
506                         vscsi->state = new_state;
507                         vscsi->flags &= ~(SCHEDULE_DISCONNECT |
508                                           DISCONNECT_SCHEDULED);
509                         ibmvscsis_free_command_q(vscsi);
510                         break;
511                 case ERR_DISCONNECT:
512                 case ERR_DISCONNECT_RECONNECT:
513                         vscsi->state = new_state;
514                         break;
515                 }
516                 break;
518         /*
519          * Initiator has not done a successful srp login
520          * or has done a successful srp logout ( adapter was not
521          * busy). In the first case there can be responses queued
522          * waiting for space on the initiators response queue (MAD)
523          * The second case the adapter is idle. Assume the worse case,
524          * i.e. the second case.
525          */
526         case WAIT_CONNECTION:
527         case CONNECTED:
528         case SRP_PROCESSING:
529                 wait_idle = true;
530                 vscsi->state = new_state;
531                 break;
533         /* can transition from this state to UNCONFIGURING */
534         case UNDEFINED:
535                 if (new_state == UNCONFIGURING)
536                         vscsi->state = new_state;
537                 break;
538         default:
539                 break;
540         }
542         if (wait_idle) {
543                 dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
544                         (int)list_empty(&vscsi->active_q),
545                         (int)list_empty(&vscsi->schedule_q));
546                 if (!list_empty(&vscsi->active_q) ||
547                     !list_empty(&vscsi->schedule_q)) {
548                         vscsi->flags |= WAIT_FOR_IDLE;
549                         dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
550                                 vscsi->flags);
551                         /*
552                          * This routine is can not be called with the interrupt
553                          * lock held.
554                          */
555                         spin_unlock_bh(&vscsi->intr_lock);
556                         wait_for_completion(&vscsi->wait_idle);
557                         spin_lock_bh(&vscsi->intr_lock);
558                 }
559                 dev_dbg(&vscsi->dev, "disconnect stop wait\n");
561                 ibmvscsis_adapter_idle(vscsi);
562         }
564         spin_unlock_bh(&vscsi->intr_lock);
567 /**
568  * ibmvscsis_post_disconnect() - Schedule the disconnect
569  * @vscsi:      Pointer to our adapter structure
570  * @new_state:  State to move to after disconnecting
571  * @flag_bits:  Flags to turn on in adapter structure
572  *
573  * If it's already been scheduled, then see if we need to "upgrade"
574  * the new state (if the one passed in is more "severe" than the
575  * previous one).
576  *
577  * PRECONDITION:
578  *      interrupt lock is held
579  */
580 static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
581                                       uint flag_bits)
583         uint state;
585         /* check the validity of the new state */
586         switch (new_state) {
587         case UNCONFIGURING:
588         case ERR_DISCONNECT:
589         case ERR_DISCONNECT_RECONNECT:
590         case WAIT_IDLE:
591                 break;
593         default:
594                 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
595                         new_state);
596                 return;
597         }
599         vscsi->flags |= flag_bits;
601         dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
602                 new_state, flag_bits, vscsi->flags, vscsi->state);
604         if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
605                 vscsi->flags |= SCHEDULE_DISCONNECT;
606                 vscsi->new_state = new_state;
608                 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
609                 (void)queue_work(vscsi->work_q, &vscsi->proc_work);
610         } else {
611                 if (vscsi->new_state)
612                         state = vscsi->new_state;
613                 else
614                         state = vscsi->state;
616                 switch (state) {
617                 case NO_QUEUE:
618                 case UNCONFIGURING:
619                         break;
621                 case ERR_DISCONNECTED:
622                 case ERR_DISCONNECT:
623                 case UNDEFINED:
624                         if (new_state == UNCONFIGURING)
625                                 vscsi->new_state = new_state;
626                         break;
628                 case ERR_DISCONNECT_RECONNECT:
629                         switch (new_state) {
630                         case UNCONFIGURING:
631                         case ERR_DISCONNECT:
632                                 vscsi->new_state = new_state;
633                                 break;
634                         default:
635                                 break;
636                         }
637                         break;
639                 case WAIT_ENABLED:
640                 case WAIT_IDLE:
641                 case WAIT_CONNECTION:
642                 case CONNECTED:
643                 case SRP_PROCESSING:
644                         vscsi->new_state = new_state;
645                         break;
647                 default:
648                         break;
649                 }
650         }
652         dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
653                 vscsi->flags, vscsi->new_state);
656 /**
657  * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
658  * @vscsi:      Pointer to our adapter structure
659  *
660  * Must be called with interrupt lock held.
661  */
662 static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
664         long rc = ADAPT_SUCCESS;
666         switch (vscsi->state) {
667         case NO_QUEUE:
668         case ERR_DISCONNECT:
669         case ERR_DISCONNECT_RECONNECT:
670         case ERR_DISCONNECTED:
671         case UNCONFIGURING:
672         case UNDEFINED:
673                 rc = ERROR;
674                 break;
676         case WAIT_CONNECTION:
677                 vscsi->state = CONNECTED;
678                 break;
680         case WAIT_IDLE:
681         case SRP_PROCESSING:
682         case CONNECTED:
683         case WAIT_ENABLED:
684         default:
685                 rc = ERROR;
686                 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
687                         vscsi->state);
688                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
689                 break;
690         }
692         return rc;
695 /**
696  * ibmvscsis_handle_init_msg() - Respond to an Init Message
697  * @vscsi:      Pointer to our adapter structure
698  *
699  * Must be called with interrupt lock held.
700  */
701 static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
703         long rc = ADAPT_SUCCESS;
705         switch (vscsi->state) {
706         case WAIT_CONNECTION:
707                 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
708                 switch (rc) {
709                 case H_SUCCESS:
710                         vscsi->state = CONNECTED;
711                         break;
713                 case H_PARAMETER:
714                         dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
715                                 rc);
716                         ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
717                         break;
719                 case H_DROPPED:
720                         dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
721                                 rc);
722                         rc = ERROR;
723                         ibmvscsis_post_disconnect(vscsi,
724                                                   ERR_DISCONNECT_RECONNECT, 0);
725                         break;
727                 case H_CLOSED:
728                         dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
729                                  rc);
730                         rc = 0;
731                         break;
732                 }
733                 break;
735         case UNDEFINED:
736                 rc = ERROR;
737                 break;
739         case UNCONFIGURING:
740                 break;
742         case WAIT_ENABLED:
743         case CONNECTED:
744         case SRP_PROCESSING:
745         case WAIT_IDLE:
746         case NO_QUEUE:
747         case ERR_DISCONNECT:
748         case ERR_DISCONNECT_RECONNECT:
749         case ERR_DISCONNECTED:
750         default:
751                 rc = ERROR;
752                 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
753                         vscsi->state);
754                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
755                 break;
756         }
758         return rc;
761 /**
762  * ibmvscsis_init_msg() - Respond to an init message
763  * @vscsi:      Pointer to our adapter structure
764  * @crq:        Pointer to CRQ element containing the Init Message
765  *
766  * EXECUTION ENVIRONMENT:
767  *      Interrupt, interrupt lock held
768  */
769 static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
771         long rc = ADAPT_SUCCESS;
773         dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
775         rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
776                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
777                       0);
778         if (rc == H_SUCCESS) {
779                 vscsi->client_data.partition_number =
780                         be64_to_cpu(*(u64 *)vscsi->map_buf);
781                 dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
782                         vscsi->client_data.partition_number);
783         } else {
784                 dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
785                 rc = ADAPT_SUCCESS;
786         }
788         if (crq->format == INIT_MSG) {
789                 rc = ibmvscsis_handle_init_msg(vscsi);
790         } else if (crq->format == INIT_COMPLETE_MSG) {
791                 rc = ibmvscsis_handle_init_compl_msg(vscsi);
792         } else {
793                 rc = ERROR;
794                 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
795                         (uint)crq->format);
796                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
797         }
799         return rc;
802 /**
803  * ibmvscsis_establish_new_q() - Establish new CRQ queue
804  * @vscsi:      Pointer to our adapter structure
805  *
806  * Must be called with interrupt lock held.
807  */
808 static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
810         long rc = ADAPT_SUCCESS;
811         uint format;
813         rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
814                       0, 0, 0, 0);
815         if (rc == H_SUCCESS)
816                 vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
817         else if (rc != H_NOT_FOUND)
818                 dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
819                         rc);
821         vscsi->flags &= PRESERVE_FLAG_FIELDS;
822         vscsi->rsp_q_timer.timer_pops = 0;
823         vscsi->debit = 0;
824         vscsi->credit = 0;
826         rc = vio_enable_interrupts(vscsi->dma_dev);
827         if (rc) {
828                 dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
829                          rc);
830                 return rc;
831         }
833         rc = ibmvscsis_check_init_msg(vscsi, &format);
834         if (rc) {
835                 dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
836                         rc);
837                 return rc;
838         }
840         if (format == UNUSED_FORMAT) {
841                 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
842                 switch (rc) {
843                 case H_SUCCESS:
844                 case H_DROPPED:
845                 case H_CLOSED:
846                         rc = ADAPT_SUCCESS;
847                         break;
849                 case H_PARAMETER:
850                 case H_HARDWARE:
851                         break;
853                 default:
854                         vscsi->state = UNDEFINED;
855                         rc = H_HARDWARE;
856                         break;
857                 }
858         } else if (format == INIT_MSG) {
859                 rc = ibmvscsis_handle_init_msg(vscsi);
860         }
862         return rc;
865 /**
866  * ibmvscsis_reset_queue() - Reset CRQ Queue
867  * @vscsi:      Pointer to our adapter structure
868  *
869  * This function calls h_free_q and then calls h_reg_q and does all
870  * of the bookkeeping to get us back to where we can communicate.
871  *
872  * Actually, we don't always call h_free_crq.  A problem was discovered
873  * where one partition would close and reopen his queue, which would
874  * cause his partner to get a transport event, which would cause him to
875  * close and reopen his queue, which would cause the original partition
876  * to get a transport event, etc., etc.  To prevent this, we don't
877  * actually close our queue if the client initiated the reset, (i.e.
878  * either we got a transport event or we have detected that the client's
879  * queue is gone)
880  *
881  * EXECUTION ENVIRONMENT:
882  *      Process environment, called with interrupt lock held
883  */
884 static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
886         int bytes;
887         long rc = ADAPT_SUCCESS;
889         dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
891         /* don't reset, the client did it for us */
892         if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
893                 vscsi->flags &= PRESERVE_FLAG_FIELDS;
894                 vscsi->rsp_q_timer.timer_pops = 0;
895                 vscsi->debit = 0;
896                 vscsi->credit = 0;
897                 vscsi->state = WAIT_CONNECTION;
898                 vio_enable_interrupts(vscsi->dma_dev);
899         } else {
900                 rc = ibmvscsis_free_command_q(vscsi);
901                 if (rc == ADAPT_SUCCESS) {
902                         vscsi->state = WAIT_CONNECTION;
904                         bytes = vscsi->cmd_q.size * PAGE_SIZE;
905                         rc = h_reg_crq(vscsi->dds.unit_id,
906                                        vscsi->cmd_q.crq_token, bytes);
907                         if (rc == H_CLOSED || rc == H_SUCCESS) {
908                                 rc = ibmvscsis_establish_new_q(vscsi);
909                         }
911                         if (rc != ADAPT_SUCCESS) {
912                                 dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
913                                         rc);
915                                 vscsi->state = ERR_DISCONNECTED;
916                                 vscsi->flags |= RESPONSE_Q_DOWN;
917                                 ibmvscsis_free_command_q(vscsi);
918                         }
919                 } else {
920                         vscsi->state = ERR_DISCONNECTED;
921                         vscsi->flags |= RESPONSE_Q_DOWN;
922                 }
923         }
926 /**
927  * ibmvscsis_free_cmd_resources() - Free command resources
928  * @vscsi:      Pointer to our adapter structure
929  * @cmd:        Command which is not longer in use
930  *
931  * Must be called with interrupt lock held.
932  */
933 static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
934                                          struct ibmvscsis_cmd *cmd)
936         struct iu_entry *iue = cmd->iue;
938         switch (cmd->type) {
939         case TASK_MANAGEMENT:
940         case SCSI_CDB:
941                 /*
942                  * When the queue goes down this value is cleared, so it
943                  * cannot be cleared in this general purpose function.
944                  */
945                 if (vscsi->debit)
946                         vscsi->debit -= 1;
947                 break;
948         case ADAPTER_MAD:
949                 vscsi->flags &= ~PROCESSING_MAD;
950                 break;
951         case UNSET_TYPE:
952                 break;
953         default:
954                 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
955                         cmd->type);
956                 break;
957         }
959         cmd->iue = NULL;
960         list_add_tail(&cmd->list, &vscsi->free_cmd);
961         srp_iu_put(iue);
963         if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
964             list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
965                 vscsi->flags &= ~WAIT_FOR_IDLE;
966                 complete(&vscsi->wait_idle);
967         }
970 /**
971  * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
972  * @vscsi:      Pointer to our adapter structure
973  * @idle:       Indicates whether we were called from adapter_idle.  This
974  *              is important to know if we need to do a disconnect, since if
975  *              we're called from adapter_idle, we're still processing the
976  *              current disconnect, so we can't just call post_disconnect.
977  *
978  * This function is called when the adapter is idle when phyp has sent
979  * us a Prepare for Suspend Transport Event.
980  *
981  * EXECUTION ENVIRONMENT:
982  *      Process or interrupt environment called with interrupt lock held
983  */
984 static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
986         long rc = 0;
987         struct viosrp_crq *crq;
989         /* See if there is a Resume event in the queue */
990         crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
992         dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
993                 vscsi->flags, vscsi->state, (int)crq->valid);
995         if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
996                 rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
997                               0, 0);
998                 if (rc) {
999                         dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
1000                                 rc);
1001                         rc = 0;
1002                 }
1003         } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
1004                     (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
1005                    ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1006                                      (crq->format != RESUME_FROM_SUSP)))) {
1007                 if (idle) {
1008                         vscsi->state = ERR_DISCONNECT_RECONNECT;
1009                         ibmvscsis_reset_queue(vscsi);
1010                         rc = -1;
1011                 } else if (vscsi->state == CONNECTED) {
1012                         ibmvscsis_post_disconnect(vscsi,
1013                                                   ERR_DISCONNECT_RECONNECT, 0);
1014                 }
1016                 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1018                 if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1019                                      (crq->format != RESUME_FROM_SUSP)))
1020                         dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
1021         }
1023         vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
1025         return rc;
1028 /**
1029  * ibmvscsis_trans_event() - Handle a Transport Event
1030  * @vscsi:      Pointer to our adapter structure
1031  * @crq:        Pointer to CRQ entry containing the Transport Event
1032  *
1033  * Do the logic to close the I_T nexus.  This function may not
1034  * behave to specification.
1035  *
1036  * EXECUTION ENVIRONMENT:
1037  *      Interrupt, interrupt lock held
1038  */
1039 static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1040                                   struct viosrp_crq *crq)
1042         long rc = ADAPT_SUCCESS;
1044         dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1045                 (int)crq->format, vscsi->flags, vscsi->state);
1047         switch (crq->format) {
1048         case MIGRATED:
1049         case PARTNER_FAILED:
1050         case PARTNER_DEREGISTER:
1051                 ibmvscsis_delete_client_info(vscsi, true);
1052                 if (crq->format == MIGRATED)
1053                         vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1054                 switch (vscsi->state) {
1055                 case NO_QUEUE:
1056                 case ERR_DISCONNECTED:
1057                 case UNDEFINED:
1058                         break;
1060                 case UNCONFIGURING:
1061                         vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1062                         break;
1064                 case WAIT_ENABLED:
1065                         break;
1067                 case WAIT_CONNECTION:
1068                         break;
1070                 case CONNECTED:
1071                         ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1072                                                   (RESPONSE_Q_DOWN |
1073                                                    TRANS_EVENT));
1074                         break;
1076                 case SRP_PROCESSING:
1077                         if ((vscsi->debit > 0) ||
1078                             !list_empty(&vscsi->schedule_q) ||
1079                             !list_empty(&vscsi->waiting_rsp) ||
1080                             !list_empty(&vscsi->active_q)) {
1081                                 dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
1082                                         vscsi->debit,
1083                                         (int)list_empty(&vscsi->schedule_q),
1084                                         (int)list_empty(&vscsi->waiting_rsp),
1085                                         (int)list_empty(&vscsi->active_q));
1086                                 dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
1087                         } else {
1088                                 dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
1089                         }
1091                         ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1092                                                   (RESPONSE_Q_DOWN |
1093                                                    TRANS_EVENT));
1094                         break;
1096                 case ERR_DISCONNECT:
1097                 case ERR_DISCONNECT_RECONNECT:
1098                 case WAIT_IDLE:
1099                         vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1100                         break;
1101                 }
1102                 break;
1104         case PREPARE_FOR_SUSPEND:
1105                 dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
1106                         (int)crq->status);
1107                 switch (vscsi->state) {
1108                 case ERR_DISCONNECTED:
1109                 case WAIT_CONNECTION:
1110                 case CONNECTED:
1111                         ibmvscsis_ready_for_suspend(vscsi, false);
1112                         break;
1113                 case SRP_PROCESSING:
1114                         vscsi->resume_state = vscsi->state;
1115                         vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
1116                         if (crq->status == CRQ_ENTRY_OVERWRITTEN)
1117                                 vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
1118                         ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
1119                         break;
1120                 case NO_QUEUE:
1121                 case UNDEFINED:
1122                 case UNCONFIGURING:
1123                 case WAIT_ENABLED:
1124                 case ERR_DISCONNECT:
1125                 case ERR_DISCONNECT_RECONNECT:
1126                 case WAIT_IDLE:
1127                         dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1128                                 vscsi->state);
1129                         break;
1130                 }
1131                 break;
1133         case RESUME_FROM_SUSP:
1134                 dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
1135                         (int)crq->status);
1136                 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1137                         vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1138                 } else {
1139                         if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
1140                             (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
1141                                 ibmvscsis_post_disconnect(vscsi,
1142                                                           ERR_DISCONNECT_RECONNECT,
1143                                                           0);
1144                                 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1145                         }
1146                 }
1147                 break;
1149         default:
1150                 rc = ERROR;
1151                 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
1152                         (uint)crq->format);
1153                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
1154                                           RESPONSE_Q_DOWN);
1155                 break;
1156         }
1158         rc = vscsi->flags & SCHEDULE_DISCONNECT;
1160         dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1161                 vscsi->flags, vscsi->state, rc);
1163         return rc;
1166 /**
1167  * ibmvscsis_poll_cmd_q() - Poll Command Queue
1168  * @vscsi:      Pointer to our adapter structure
1169  *
1170  * Called to handle command elements that may have arrived while
1171  * interrupts were disabled.
1172  *
1173  * EXECUTION ENVIRONMENT:
1174  *      intr_lock must be held
1175  */
1176 static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1178         struct viosrp_crq *crq;
1179         long rc;
1180         bool ack = true;
1181         volatile u8 valid;
1183         dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1184                 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1186         rc = vscsi->flags & SCHEDULE_DISCONNECT;
1187         crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1188         valid = crq->valid;
1189         dma_rmb();
1191         while (valid) {
1192 poll_work:
1193                 vscsi->cmd_q.index =
1194                         (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
1196                 if (!rc) {
1197                         rc = ibmvscsis_parse_command(vscsi, crq);
1198                 } else {
1199                         if ((uint)crq->valid == VALID_TRANS_EVENT) {
1200                                 /*
1201                                  * must service the transport layer events even
1202                                  * in an error state, dont break out until all
1203                                  * the consecutive transport events have been
1204                                  * processed
1205                                  */
1206                                 rc = ibmvscsis_trans_event(vscsi, crq);
1207                         } else if (vscsi->flags & TRANS_EVENT) {
1208                                 /*
1209                                  * if a tranport event has occurred leave
1210                                  * everything but transport events on the queue
1211                                  */
1212                                 dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
1214                                 /*
1215                                  * need to decrement the queue index so we can
1216                                  * look at the elment again
1217                                  */
1218                                 if (vscsi->cmd_q.index)
1219                                         vscsi->cmd_q.index -= 1;
1220                                 else
1221                                         /*
1222                                          * index is at 0 it just wrapped.
1223                                          * have it index last element in q
1224                                          */
1225                                         vscsi->cmd_q.index = vscsi->cmd_q.mask;
1226                                 break;
1227                         }
1228                 }
1230                 crq->valid = INVALIDATE_CMD_RESP_EL;
1232                 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1233                 valid = crq->valid;
1234                 dma_rmb();
1235         }
1237         if (!rc) {
1238                 if (ack) {
1239                         vio_enable_interrupts(vscsi->dma_dev);
1240                         ack = false;
1241                         dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
1242                 }
1243                 valid = crq->valid;
1244                 dma_rmb();
1245                 if (valid)
1246                         goto poll_work;
1247         }
1249         dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
1252 /**
1253  * ibmvscsis_free_cmd_qs() - Free elements in queue
1254  * @vscsi:      Pointer to our adapter structure
1255  *
1256  * Free all of the elements on all queues that are waiting for
1257  * whatever reason.
1258  *
1259  * PRECONDITION:
1260  *      Called with interrupt lock held
1261  */
1262 static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1264         struct ibmvscsis_cmd *cmd, *nxt;
1266         dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1267                 (int)list_empty(&vscsi->waiting_rsp),
1268                 vscsi->rsp_q_timer.started);
1270         list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1271                 list_del(&cmd->list);
1272                 ibmvscsis_free_cmd_resources(vscsi, cmd);
1273         }
1276 /**
1277  * ibmvscsis_get_free_cmd() - Get free command from list
1278  * @vscsi:      Pointer to our adapter structure
1279  *
1280  * Must be called with interrupt lock held.
1281  */
1282 static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1284         struct ibmvscsis_cmd *cmd = NULL;
1285         struct iu_entry *iue;
1287         iue = srp_iu_get(&vscsi->target);
1288         if (iue) {
1289                 cmd = list_first_entry_or_null(&vscsi->free_cmd,
1290                                                struct ibmvscsis_cmd, list);
1291                 if (cmd) {
1292                         if (cmd->abort_cmd)
1293                                 cmd->abort_cmd = NULL;
1294                         cmd->flags &= ~(DELAY_SEND);
1295                         list_del(&cmd->list);
1296                         cmd->iue = iue;
1297                         cmd->type = UNSET_TYPE;
1298                         memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1299                 } else {
1300                         srp_iu_put(iue);
1301                 }
1302         }
1304         return cmd;
1307 /**
1308  * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1309  * @vscsi:      Pointer to our adapter structure
1310  *
1311  * This function is called when the adapter is idle when the driver
1312  * is attempting to clear an error condition.
1313  * The adapter is considered busy if any of its cmd queues
1314  * are non-empty. This function can be invoked
1315  * from the off level disconnect function.
1316  *
1317  * EXECUTION ENVIRONMENT:
1318  *      Process environment called with interrupt lock held
1319  */
1320 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1322         int free_qs = false;
1323         long rc = 0;
1325         dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
1326                 vscsi->flags, vscsi->state);
1328         /* Only need to free qs if we're disconnecting from client */
1329         if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1330                 free_qs = true;
1332         switch (vscsi->state) {
1333         case UNCONFIGURING:
1334                 ibmvscsis_free_command_q(vscsi);
1335                 dma_rmb();
1336                 isync();
1337                 if (vscsi->flags & CFG_SLEEPING) {
1338                         vscsi->flags &= ~CFG_SLEEPING;
1339                         complete(&vscsi->unconfig);
1340                 }
1341                 break;
1342         case ERR_DISCONNECT_RECONNECT:
1343                 ibmvscsis_reset_queue(vscsi);
1344                 dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
1345                         vscsi->flags);
1346                 break;
1348         case ERR_DISCONNECT:
1349                 ibmvscsis_free_command_q(vscsi);
1350                 vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1351                 vscsi->flags |= RESPONSE_Q_DOWN;
1352                 if (vscsi->tport.enabled)
1353                         vscsi->state = ERR_DISCONNECTED;
1354                 else
1355                         vscsi->state = WAIT_ENABLED;
1356                 dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1357                         vscsi->flags, vscsi->state);
1358                 break;
1360         case WAIT_IDLE:
1361                 vscsi->rsp_q_timer.timer_pops = 0;
1362                 vscsi->debit = 0;
1363                 vscsi->credit = 0;
1364                 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1365                         vscsi->state = vscsi->resume_state;
1366                         vscsi->resume_state = 0;
1367                         rc = ibmvscsis_ready_for_suspend(vscsi, true);
1368                         vscsi->flags &= ~DISCONNECT_SCHEDULED;
1369                         if (rc)
1370                                 break;
1371                 } else if (vscsi->flags & TRANS_EVENT) {
1372                         vscsi->state = WAIT_CONNECTION;
1373                         vscsi->flags &= PRESERVE_FLAG_FIELDS;
1374                 } else {
1375                         vscsi->state = CONNECTED;
1376                         vscsi->flags &= ~DISCONNECT_SCHEDULED;
1377                 }
1379                 dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1380                         vscsi->flags, vscsi->state);
1381                 ibmvscsis_poll_cmd_q(vscsi);
1382                 break;
1384         case ERR_DISCONNECTED:
1385                 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1386                 dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1387                         vscsi->flags, vscsi->state);
1388                 break;
1390         default:
1391                 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1392                         vscsi->state);
1393                 break;
1394         }
1396         if (free_qs)
1397                 ibmvscsis_free_cmd_qs(vscsi);
1399         /*
1400          * There is a timing window where we could lose a disconnect request.
1401          * The known path to this window occurs during the DISCONNECT_RECONNECT
1402          * case above: reset_queue calls free_command_q, which will release the
1403          * interrupt lock.  During that time, a new post_disconnect call can be
1404          * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1405          * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1406          * will only set the new_state.  Now free_command_q reacquires the intr
1407          * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1408          * FIELDS), and the disconnect is lost.  This is particularly bad when
1409          * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1410          * forever.
1411          * Fix is that free command queue sets acr state and acr flags if there
1412          * is a change under the lock
1413          * note free command queue writes to this state it clears it
1414          * before releasing the lock, different drivers call the free command
1415          * queue different times so dont initialize above
1416          */
1417         if (vscsi->phyp_acr_state != 0) {
1418                 /*
1419                  * set any bits in flags that may have been cleared by
1420                  * a call to free command queue in switch statement
1421                  * or reset queue
1422                  */
1423                 vscsi->flags |= vscsi->phyp_acr_flags;
1424                 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1425                 vscsi->phyp_acr_state = 0;
1426                 vscsi->phyp_acr_flags = 0;
1428                 dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1429                         vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1430                         vscsi->phyp_acr_state);
1431         }
1433         dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1434                 vscsi->flags, vscsi->state, vscsi->new_state);
1437 /**
1438  * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1439  * @vscsi:      Pointer to our adapter structure
1440  * @cmd:        Pointer to command element to use to process the request
1441  * @crq:        Pointer to CRQ entry containing the request
1442  *
1443  * Copy the srp information unit from the hosted
1444  * partition using remote dma
1445  *
1446  * EXECUTION ENVIRONMENT:
1447  *      Interrupt, interrupt lock held
1448  */
1449 static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1450                                       struct ibmvscsis_cmd *cmd,
1451                                       struct viosrp_crq *crq)
1453         struct iu_entry *iue = cmd->iue;
1454         long rc = 0;
1455         u16 len;
1457         len = be16_to_cpu(crq->IU_length);
1458         if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1459                 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1460                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1461                 return SRP_VIOLATION;
1462         }
1464         rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1465                          be64_to_cpu(crq->IU_data_ptr),
1466                          vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1468         switch (rc) {
1469         case H_SUCCESS:
1470                 cmd->init_time = mftb();
1471                 iue->remote_token = crq->IU_data_ptr;
1472                 iue->iu_len = len;
1473                 dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1474                         be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1475                 break;
1476         case H_PERMISSION:
1477                 if (connection_broken(vscsi))
1478                         ibmvscsis_post_disconnect(vscsi,
1479                                                   ERR_DISCONNECT_RECONNECT,
1480                                                   (RESPONSE_Q_DOWN |
1481                                                    CLIENT_FAILED));
1482                 else
1483                         ibmvscsis_post_disconnect(vscsi,
1484                                                   ERR_DISCONNECT_RECONNECT, 0);
1486                 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1487                         rc);
1488                 break;
1489         case H_DEST_PARM:
1490         case H_SOURCE_PARM:
1491         default:
1492                 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1493                         rc);
1494                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1495                 break;
1496         }
1498         return rc;
1501 /**
1502  * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1503  * @vscsi:      Pointer to our adapter structure
1504  * @iue:        Information Unit containing the Adapter Info MAD request
1505  *
1506  * EXECUTION ENVIRONMENT:
1507  *      Interrupt adapter lock is held
1508  */
1509 static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1510                                    struct iu_entry *iue)
1512         struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1513         struct mad_adapter_info_data *info;
1514         uint flag_bits = 0;
1515         dma_addr_t token;
1516         long rc;
1518         mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1520         if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1521                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1522                 return 0;
1523         }
1525         info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1526                                   GFP_ATOMIC);
1527         if (!info) {
1528                 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1529                         iue->target);
1530                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1531                 return 0;
1532         }
1534         /* Get remote info */
1535         rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1536                          vscsi->dds.window[REMOTE].liobn,
1537                          be64_to_cpu(mad->buffer),
1538                          vscsi->dds.window[LOCAL].liobn, token);
1540         if (rc != H_SUCCESS) {
1541                 if (rc == H_PERMISSION) {
1542                         if (connection_broken(vscsi))
1543                                 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1544                 }
1545                 dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1546                          rc);
1547                 dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1548                         be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1549                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1550                                           flag_bits);
1551                 goto free_dma;
1552         }
1554         /*
1555          * Copy client info, but ignore partition number, which we
1556          * already got from phyp - unless we failed to get it from
1557          * phyp (e.g. if we're running on a p5 system).
1558          */
1559         if (vscsi->client_data.partition_number == 0)
1560                 vscsi->client_data.partition_number =
1561                         be32_to_cpu(info->partition_number);
1562         strncpy(vscsi->client_data.srp_version, info->srp_version,
1563                 sizeof(vscsi->client_data.srp_version));
1564         strncpy(vscsi->client_data.partition_name, info->partition_name,
1565                 sizeof(vscsi->client_data.partition_name));
1566         vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1567         vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1569         /* Copy our info */
1570         strncpy(info->srp_version, SRP_VERSION,
1571                 sizeof(info->srp_version));
1572         strncpy(info->partition_name, vscsi->dds.partition_name,
1573                 sizeof(info->partition_name));
1574         info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1575         info->mad_version = cpu_to_be32(MAD_VERSION_1);
1576         info->os_type = cpu_to_be32(LINUX);
1577         memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1578         info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1580         dma_wmb();
1581         rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1582                          token, vscsi->dds.window[REMOTE].liobn,
1583                          be64_to_cpu(mad->buffer));
1584         switch (rc) {
1585         case H_SUCCESS:
1586                 break;
1588         case H_SOURCE_PARM:
1589         case H_DEST_PARM:
1590         case H_PERMISSION:
1591                 if (connection_broken(vscsi))
1592                         flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1593         default:
1594                 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1595                         rc);
1596                 ibmvscsis_post_disconnect(vscsi,
1597                                           ERR_DISCONNECT_RECONNECT,
1598                                           flag_bits);
1599                 break;
1600         }
1602 free_dma:
1603         dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1604         dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
1606         return rc;
1609 /**
1610  * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1611  * @vscsi:      Pointer to our adapter structure
1612  * @iue:        Information Unit containing the Capabilities MAD request
1613  *
1614  * NOTE: if you return an error from this routine you must be
1615  * disconnecting or you will cause a hang
1616  *
1617  * EXECUTION ENVIRONMENT:
1618  *      Interrupt called with adapter lock held
1619  */
1620 static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1622         struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1623         struct capabilities *cap;
1624         struct mad_capability_common *common;
1625         dma_addr_t token;
1626         u16 olen, len, status, min_len, cap_len;
1627         u32 flag;
1628         uint flag_bits = 0;
1629         long rc = 0;
1631         olen = be16_to_cpu(mad->common.length);
1632         /*
1633          * struct capabilities hardcodes a couple capabilities after the
1634          * header, but the capabilities can actually be in any order.
1635          */
1636         min_len = offsetof(struct capabilities, migration);
1637         if ((olen < min_len) || (olen > PAGE_SIZE)) {
1638                 dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
1639                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1640                 return 0;
1641         }
1643         cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1644                                  GFP_ATOMIC);
1645         if (!cap) {
1646                 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1647                         iue->target);
1648                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1649                 return 0;
1650         }
1651         rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1652                          be64_to_cpu(mad->buffer),
1653                          vscsi->dds.window[LOCAL].liobn, token);
1654         if (rc == H_SUCCESS) {
1655                 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1656                         SRP_MAX_LOC_LEN);
1658                 len = olen - min_len;
1659                 status = VIOSRP_MAD_SUCCESS;
1660                 common = (struct mad_capability_common *)&cap->migration;
1662                 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1663                         dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1664                                 len, be32_to_cpu(common->cap_type),
1665                                 be16_to_cpu(common->length));
1667                         cap_len = be16_to_cpu(common->length);
1668                         if (cap_len > len) {
1669                                 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1670                                 status = VIOSRP_MAD_FAILED;
1671                                 break;
1672                         }
1674                         if (cap_len == 0) {
1675                                 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1676                                 status = VIOSRP_MAD_FAILED;
1677                                 break;
1678                         }
1680                         switch (common->cap_type) {
1681                         default:
1682                                 dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
1683                                 common->server_support = 0;
1684                                 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1685                                 cap->flags &= ~flag;
1686                                 break;
1687                         }
1689                         len = len - cap_len;
1690                         common = (struct mad_capability_common *)
1691                                 ((char *)common + cap_len);
1692                 }
1694                 mad->common.status = cpu_to_be16(status);
1696                 dma_wmb();
1697                 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1698                                  vscsi->dds.window[REMOTE].liobn,
1699                                  be64_to_cpu(mad->buffer));
1701                 if (rc != H_SUCCESS) {
1702                         dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
1703                                 rc);
1705                         if (rc == H_PERMISSION) {
1706                                 if (connection_broken(vscsi))
1707                                         flag_bits = (RESPONSE_Q_DOWN |
1708                                                      CLIENT_FAILED);
1709                         }
1711                         dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
1712                                  rc);
1713                         ibmvscsis_post_disconnect(vscsi,
1714                                                   ERR_DISCONNECT_RECONNECT,
1715                                                   flag_bits);
1716                 }
1717         }
1719         dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1721         dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1722                 rc, vscsi->client_cap);
1724         return rc;
1727 /**
1728  * ibmvscsis_process_mad() - Service a MAnagement Data gram
1729  * @vscsi:      Pointer to our adapter structure
1730  * @iue:        Information Unit containing the MAD request
1731  *
1732  * Must be called with interrupt lock held.
1733  */
1734 static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1736         struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1737         struct viosrp_empty_iu *empty;
1738         long rc = ADAPT_SUCCESS;
1740         switch (be32_to_cpu(mad->type)) {
1741         case VIOSRP_EMPTY_IU_TYPE:
1742                 empty = &vio_iu(iue)->mad.empty_iu;
1743                 vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1744                 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1745                 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1746                 break;
1747         case VIOSRP_ADAPTER_INFO_TYPE:
1748                 rc = ibmvscsis_adapter_info(vscsi, iue);
1749                 break;
1750         case VIOSRP_CAPABILITIES_TYPE:
1751                 rc = ibmvscsis_cap_mad(vscsi, iue);
1752                 break;
1753         case VIOSRP_ENABLE_FAST_FAIL:
1754                 if (vscsi->state == CONNECTED) {
1755                         vscsi->fast_fail = true;
1756                         mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1757                 } else {
1758                         dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
1759                         mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1760                 }
1761                 break;
1762         default:
1763                 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1764                 break;
1765         }
1767         return rc;
1770 /**
1771  * srp_snd_msg_failed() - Handle an error when sending a response
1772  * @vscsi:      Pointer to our adapter structure
1773  * @rc:         The return code from the h_send_crq command
1774  *
1775  * Must be called with interrupt lock held.
1776  */
1777 static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1779         ktime_t kt;
1781         if (rc != H_DROPPED) {
1782                 ibmvscsis_free_cmd_qs(vscsi);
1784                 if (rc == H_CLOSED)
1785                         vscsi->flags |= CLIENT_FAILED;
1787                 /* don't flag the same problem multiple times */
1788                 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1789                         vscsi->flags |= RESPONSE_Q_DOWN;
1790                         if (!(vscsi->state & (ERR_DISCONNECT |
1791                                               ERR_DISCONNECT_RECONNECT |
1792                                               ERR_DISCONNECTED | UNDEFINED))) {
1793                                 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1794                                         vscsi->state, vscsi->flags, rc);
1795                         }
1796                         ibmvscsis_post_disconnect(vscsi,
1797                                                   ERR_DISCONNECT_RECONNECT, 0);
1798                 }
1799                 return;
1800         }
1802         /*
1803          * The response queue is full.
1804          * If the server is processing SRP requests, i.e.
1805          * the client has successfully done an
1806          * SRP_LOGIN, then it will wait forever for room in
1807          * the queue.  However if the system admin
1808          * is attempting to unconfigure the server then one
1809          * or more children will be in a state where
1810          * they are being removed. So if there is even one
1811          * child being removed then the driver assumes
1812          * the system admin is attempting to break the
1813          * connection with the client and MAX_TIMER_POPS
1814          * is honored.
1815          */
1816         if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1817             (vscsi->state == SRP_PROCESSING)) {
1818                 dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1819                         vscsi->flags, (int)vscsi->rsp_q_timer.started,
1820                         vscsi->rsp_q_timer.timer_pops);
1822                 /*
1823                  * Check if the timer is running; if it
1824                  * is not then start it up.
1825                  */
1826                 if (!vscsi->rsp_q_timer.started) {
1827                         if (vscsi->rsp_q_timer.timer_pops <
1828                             MAX_TIMER_POPS) {
1829                                 kt = WAIT_NANO_SECONDS;
1830                         } else {
1831                                 /*
1832                                  * slide the timeslice if the maximum
1833                                  * timer pops have already happened
1834                                  */
1835                                 kt = ktime_set(WAIT_SECONDS, 0);
1836                         }
1838                         vscsi->rsp_q_timer.started = true;
1839                         hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1840                                       HRTIMER_MODE_REL);
1841                 }
1842         } else {
1843                 /*
1844                  * TBD: Do we need to worry about this? Need to get
1845                  *      remove working.
1846                  */
1847                 /*
1848                  * waited a long time and it appears the system admin
1849                  * is bring this driver down
1850                  */
1851                 vscsi->flags |= RESPONSE_Q_DOWN;
1852                 ibmvscsis_free_cmd_qs(vscsi);
1853                 /*
1854                  * if the driver is already attempting to disconnect
1855                  * from the client and has already logged an error
1856                  * trace this event but don't put it in the error log
1857                  */
1858                 if (!(vscsi->state & (ERR_DISCONNECT |
1859                                       ERR_DISCONNECT_RECONNECT |
1860                                       ERR_DISCONNECTED | UNDEFINED))) {
1861                         dev_err(&vscsi->dev, "client crq full too long\n");
1862                         ibmvscsis_post_disconnect(vscsi,
1863                                                   ERR_DISCONNECT_RECONNECT,
1864                                                   0);
1865                 }
1866         }
1869 /**
1870  * ibmvscsis_send_messages() - Send a Response
1871  * @vscsi:      Pointer to our adapter structure
1872  *
1873  * Send a response, first checking the waiting queue. Responses are
1874  * sent in order they are received. If the response cannot be sent,
1875  * because the client queue is full, it stays on the waiting queue.
1876  *
1877  * PRECONDITION:
1878  *      Called with interrupt lock held
1879  */
1880 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1882         u64 msg_hi = 0;
1883         /* note do not attempt to access the IU_data_ptr with this pointer
1884          * it is not valid
1885          */
1886         struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1887         struct ibmvscsis_cmd *cmd, *nxt;
1888         struct iu_entry *iue;
1889         long rc = ADAPT_SUCCESS;
1890         bool retry = false;
1892         if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1893                 do {
1894                         retry = false;
1895                         list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
1896                                                  list) {
1897                                 /*
1898                                  * Check to make sure abort cmd gets processed
1899                                  * prior to the abort tmr cmd
1900                                  */
1901                                 if (cmd->flags & DELAY_SEND)
1902                                         continue;
1904                                 if (cmd->abort_cmd) {
1905                                         retry = true;
1906                                         cmd->abort_cmd->flags &= ~(DELAY_SEND);
1907                                         cmd->abort_cmd = NULL;
1908                                 }
1910                                 /*
1911                                  * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1912                                  * the case where LIO issued a
1913                                  * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1914                                  * case then we dont send a response, since it
1915                                  * was already done.
1916                                  */
1917                                 if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
1918                                     !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
1919                                         list_del(&cmd->list);
1920                                         ibmvscsis_free_cmd_resources(vscsi,
1921                                                                      cmd);
1922                                         /*
1923                                          * With a successfully aborted op
1924                                          * through LIO we want to increment the
1925                                          * the vscsi credit so that when we dont
1926                                          * send a rsp to the original scsi abort
1927                                          * op (h_send_crq), but the tm rsp to
1928                                          * the abort is sent, the credit is
1929                                          * correctly sent with the abort tm rsp.
1930                                          * We would need 1 for the abort tm rsp
1931                                          * and 1 credit for the aborted scsi op.
1932                                          * Thus we need to increment here.
1933                                          * Also we want to increment the credit
1934                                          * here because we want to make sure
1935                                          * cmd is actually released first
1936                                          * otherwise the client will think it
1937                                          * it can send a new cmd, and we could
1938                                          * find ourselves short of cmd elements.
1939                                          */
1940                                         vscsi->credit += 1;
1941                                 } else {
1942                                         iue = cmd->iue;
1944                                         crq->valid = VALID_CMD_RESP_EL;
1945                                         crq->format = cmd->rsp.format;
1947                                         if (cmd->flags & CMD_FAST_FAIL)
1948                                                 crq->status = VIOSRP_ADAPTER_FAIL;
1950                                         crq->IU_length = cpu_to_be16(cmd->rsp.len);
1952                                         rc = h_send_crq(vscsi->dma_dev->unit_address,
1953                                                         be64_to_cpu(msg_hi),
1954                                                         be64_to_cpu(cmd->rsp.tag));
1956                                         dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1957                                                 cmd, be64_to_cpu(cmd->rsp.tag),
1958                                                 rc);
1960                                         /* if all ok free up the command
1961                                          * element resources
1962                                          */
1963                                         if (rc == H_SUCCESS) {
1964                                                 /* some movement has occurred */
1965                                                 vscsi->rsp_q_timer.timer_pops = 0;
1966                                                 list_del(&cmd->list);
1968                                                 ibmvscsis_free_cmd_resources(vscsi,
1969                                                                              cmd);
1970                                         } else {
1971                                                 srp_snd_msg_failed(vscsi, rc);
1972                                                 break;
1973                                         }
1974                                 }
1975                         }
1976                 } while (retry);
1978                 if (!rc) {
1979                         /*
1980                          * The timer could pop with the queue empty.  If
1981                          * this happens, rc will always indicate a
1982                          * success; clear the pop count.
1983                          */
1984                         vscsi->rsp_q_timer.timer_pops = 0;
1985                 }
1986         } else {
1987                 ibmvscsis_free_cmd_qs(vscsi);
1988         }
1991 /* Called with intr lock held */
1992 static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1993                                     struct ibmvscsis_cmd *cmd,
1994                                     struct viosrp_crq *crq)
1996         struct iu_entry *iue = cmd->iue;
1997         struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1998         uint flag_bits = 0;
1999         long rc;
2001         dma_wmb();
2002         rc = h_copy_rdma(sizeof(struct mad_common),
2003                          vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
2004                          vscsi->dds.window[REMOTE].liobn,
2005                          be64_to_cpu(crq->IU_data_ptr));
2006         if (!rc) {
2007                 cmd->rsp.format = VIOSRP_MAD_FORMAT;
2008                 cmd->rsp.len = sizeof(struct mad_common);
2009                 cmd->rsp.tag = mad->tag;
2010                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2011                 ibmvscsis_send_messages(vscsi);
2012         } else {
2013                 dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
2014                         rc);
2015                 if (rc == H_PERMISSION) {
2016                         if (connection_broken(vscsi))
2017                                 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
2018                 }
2019                 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
2020                         rc);
2022                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2023                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2024                                           flag_bits);
2025         }
2028 /**
2029  * ibmvscsis_mad() - Service a MAnagement Data gram.
2030  * @vscsi:      Pointer to our adapter structure
2031  * @crq:        Pointer to the CRQ entry containing the MAD request
2032  *
2033  * EXECUTION ENVIRONMENT:
2034  *      Interrupt, called with adapter lock held
2035  */
2036 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2038         struct iu_entry *iue;
2039         struct ibmvscsis_cmd *cmd;
2040         struct mad_common *mad;
2041         long rc = ADAPT_SUCCESS;
2043         switch (vscsi->state) {
2044                 /*
2045                  * We have not exchanged Init Msgs yet, so this MAD was sent
2046                  * before the last Transport Event; client will not be
2047                  * expecting a response.
2048                  */
2049         case WAIT_CONNECTION:
2050                 dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2051                         vscsi->flags);
2052                 return ADAPT_SUCCESS;
2054         case SRP_PROCESSING:
2055         case CONNECTED:
2056                 break;
2058                 /*
2059                  * We should never get here while we're in these states.
2060                  * Just log an error and get out.
2061                  */
2062         case UNCONFIGURING:
2063         case WAIT_IDLE:
2064         case ERR_DISCONNECT:
2065         case ERR_DISCONNECT_RECONNECT:
2066         default:
2067                 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
2068                         vscsi->state);
2069                 return ADAPT_SUCCESS;
2070         }
2072         cmd = ibmvscsis_get_free_cmd(vscsi);
2073         if (!cmd) {
2074                 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
2075                         vscsi->debit);
2076                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2077                 return ERROR;
2078         }
2079         iue = cmd->iue;
2080         cmd->type = ADAPTER_MAD;
2082         rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2083         if (!rc) {
2084                 mad = (struct mad_common *)&vio_iu(iue)->mad;
2086                 dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
2088                 rc = ibmvscsis_process_mad(vscsi, iue);
2090                 dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
2091                         be16_to_cpu(mad->status), rc);
2093                 if (!rc)
2094                         ibmvscsis_send_mad_resp(vscsi, cmd, crq);
2095         } else {
2096                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2097         }
2099         dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
2100         return rc;
2103 /**
2104  * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2105  * @vscsi:      Pointer to our adapter structure
2106  * @cmd:        Pointer to the command for the SRP Login request
2107  *
2108  * EXECUTION ENVIRONMENT:
2109  *      Interrupt, interrupt lock held
2110  */
2111 static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
2112                                 struct ibmvscsis_cmd *cmd)
2114         struct iu_entry *iue = cmd->iue;
2115         struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
2116         struct format_code *fmt;
2117         uint flag_bits = 0;
2118         long rc = ADAPT_SUCCESS;
2120         memset(rsp, 0, sizeof(struct srp_login_rsp));
2122         rsp->opcode = SRP_LOGIN_RSP;
2123         rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
2124         rsp->tag = cmd->rsp.tag;
2125         rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2126         rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
2127         fmt = (struct format_code *)&rsp->buf_fmt;
2128         fmt->buffers = SUPPORTED_FORMATS;
2129         vscsi->credit = 0;
2131         cmd->rsp.len = sizeof(struct srp_login_rsp);
2133         dma_wmb();
2134         rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2135                          iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2136                          be64_to_cpu(iue->remote_token));
2138         switch (rc) {
2139         case H_SUCCESS:
2140                 break;
2142         case H_PERMISSION:
2143                 if (connection_broken(vscsi))
2144                         flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2145                 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2146                         rc);
2147                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2148                                           flag_bits);
2149                 break;
2150         case H_SOURCE_PARM:
2151         case H_DEST_PARM:
2152         default:
2153                 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
2154                         rc);
2155                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2156                 break;
2157         }
2159         return rc;
2162 /**
2163  * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2164  * @vscsi:      Pointer to our adapter structure
2165  * @cmd:        Pointer to the command for the SRP Login request
2166  * @reason:     The reason the SRP Login is being rejected, per SRP protocol
2167  *
2168  * EXECUTION ENVIRONMENT:
2169  *      Interrupt, interrupt lock held
2170  */
2171 static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
2172                                     struct ibmvscsis_cmd *cmd, u32 reason)
2174         struct iu_entry *iue = cmd->iue;
2175         struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
2176         struct format_code *fmt;
2177         uint flag_bits = 0;
2178         long rc = ADAPT_SUCCESS;
2180         memset(rej, 0, sizeof(*rej));
2182         rej->opcode = SRP_LOGIN_REJ;
2183         rej->reason = cpu_to_be32(reason);
2184         rej->tag = cmd->rsp.tag;
2185         fmt = (struct format_code *)&rej->buf_fmt;
2186         fmt->buffers = SUPPORTED_FORMATS;
2188         cmd->rsp.len = sizeof(*rej);
2190         dma_wmb();
2191         rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2192                          iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2193                          be64_to_cpu(iue->remote_token));
2195         switch (rc) {
2196         case H_SUCCESS:
2197                 break;
2198         case H_PERMISSION:
2199                 if (connection_broken(vscsi))
2200                         flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2201                 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2202                         rc);
2203                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2204                                           flag_bits);
2205                 break;
2206         case H_SOURCE_PARM:
2207         case H_DEST_PARM:
2208         default:
2209                 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2210                         rc);
2211                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2212                 break;
2213         }
2215         return rc;
2218 static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2220         char *name = tport->tport_name;
2221         struct ibmvscsis_nexus *nexus;
2222         struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
2223         int rc;
2225         if (tport->ibmv_nexus) {
2226                 dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
2227                 return 0;
2228         }
2230         nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2231         if (!nexus) {
2232                 dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
2233                 return -ENOMEM;
2234         }
2236         nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
2237                                               TARGET_PROT_NORMAL, name, nexus,
2238                                               NULL);
2239         if (IS_ERR(nexus->se_sess)) {
2240                 rc = PTR_ERR(nexus->se_sess);
2241                 goto transport_init_fail;
2242         }
2244         tport->ibmv_nexus = nexus;
2246         return 0;
2248 transport_init_fail:
2249         kfree(nexus);
2250         return rc;
2253 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2255         struct se_session *se_sess;
2256         struct ibmvscsis_nexus *nexus;
2258         nexus = tport->ibmv_nexus;
2259         if (!nexus)
2260                 return -ENODEV;
2262         se_sess = nexus->se_sess;
2263         if (!se_sess)
2264                 return -ENODEV;
2266         /*
2267          * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2268          */
2269         target_wait_for_sess_cmds(se_sess);
2270         target_remove_session(se_sess);
2271         tport->ibmv_nexus = NULL;
2272         kfree(nexus);
2274         return 0;
2277 /**
2278  * ibmvscsis_srp_login() - Process an SRP Login Request
2279  * @vscsi:      Pointer to our adapter structure
2280  * @cmd:        Command element to use to process the SRP Login request
2281  * @crq:        Pointer to CRQ entry containing the SRP Login request
2282  *
2283  * EXECUTION ENVIRONMENT:
2284  *      Interrupt, called with interrupt lock held
2285  */
2286 static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2287                                 struct ibmvscsis_cmd *cmd,
2288                                 struct viosrp_crq *crq)
2290         struct iu_entry *iue = cmd->iue;
2291         struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
2292         struct port_id {
2293                 __be64 id_extension;
2294                 __be64 io_guid;
2295         } *iport, *tport;
2296         struct format_code *fmt;
2297         u32 reason = 0x0;
2298         long rc = ADAPT_SUCCESS;
2300         iport = (struct port_id *)req->initiator_port_id;
2301         tport = (struct port_id *)req->target_port_id;
2302         fmt = (struct format_code *)&req->req_buf_fmt;
2303         if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
2304                 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
2305         else if (be32_to_cpu(req->req_it_iu_len) < 64)
2306                 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2307         else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
2308                  (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
2309                 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
2310         else if (req->req_flags & SRP_MULTICHAN_MULTI)
2311                 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
2312         else if (fmt->buffers & (~SUPPORTED_FORMATS))
2313                 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2314         else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
2315                 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2317         if (vscsi->state == SRP_PROCESSING)
2318                 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
2320         rc = ibmvscsis_make_nexus(&vscsi->tport);
2321         if (rc)
2322                 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2324         cmd->rsp.format = VIOSRP_SRP_FORMAT;
2325         cmd->rsp.tag = req->tag;
2327         dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
2329         if (reason)
2330                 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
2331         else
2332                 rc = ibmvscsis_login_rsp(vscsi, cmd);
2334         if (!rc) {
2335                 if (!reason)
2336                         vscsi->state = SRP_PROCESSING;
2338                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2339                 ibmvscsis_send_messages(vscsi);
2340         } else {
2341                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2342         }
2344         dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
2345         return rc;
2348 /**
2349  * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2350  * @vscsi:      Pointer to our adapter structure
2351  * @cmd:        Command element to use to process the Implicit Logout request
2352  * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2353  *
2354  * Do the logic to close the I_T nexus.  This function may not
2355  * behave to specification.
2356  *
2357  * EXECUTION ENVIRONMENT:
2358  *      Interrupt, interrupt lock held
2359  */
2360 static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2361                                    struct ibmvscsis_cmd *cmd,
2362                                    struct viosrp_crq *crq)
2364         struct iu_entry *iue = cmd->iue;
2365         struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2366         long rc = ADAPT_SUCCESS;
2368         if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2369             !list_empty(&vscsi->waiting_rsp)) {
2370                 dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2371                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2372         } else {
2373                 cmd->rsp.format = SRP_FORMAT;
2374                 cmd->rsp.tag = log_out->tag;
2375                 cmd->rsp.len = sizeof(struct mad_common);
2376                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2377                 ibmvscsis_send_messages(vscsi);
2379                 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2380         }
2382         return rc;
2385 /* Called with intr lock held */
2386 static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2388         struct ibmvscsis_cmd *cmd;
2389         struct iu_entry *iue;
2390         struct srp_cmd *srp;
2391         struct srp_tsk_mgmt *tsk;
2392         long rc;
2394         if (vscsi->request_limit - vscsi->debit <= 0) {
2395                 /* Client has exceeded request limit */
2396                 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2397                         vscsi->request_limit, vscsi->debit);
2398                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2399                 return;
2400         }
2402         cmd = ibmvscsis_get_free_cmd(vscsi);
2403         if (!cmd) {
2404                 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2405                         vscsi->debit);
2406                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2407                 return;
2408         }
2409         iue = cmd->iue;
2410         srp = &vio_iu(iue)->srp.cmd;
2412         rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2413         if (rc) {
2414                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2415                 return;
2416         }
2418         if (vscsi->state == SRP_PROCESSING) {
2419                 switch (srp->opcode) {
2420                 case SRP_LOGIN_REQ:
2421                         rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2422                         break;
2424                 case SRP_TSK_MGMT:
2425                         tsk = &vio_iu(iue)->srp.tsk_mgmt;
2426                         dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
2427                                 tsk->tag, tsk->tag);
2428                         cmd->rsp.tag = tsk->tag;
2429                         vscsi->debit += 1;
2430                         cmd->type = TASK_MANAGEMENT;
2431                         list_add_tail(&cmd->list, &vscsi->schedule_q);
2432                         queue_work(vscsi->work_q, &cmd->work);
2433                         break;
2435                 case SRP_CMD:
2436                         dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
2437                                 srp->tag, srp->tag);
2438                         cmd->rsp.tag = srp->tag;
2439                         vscsi->debit += 1;
2440                         cmd->type = SCSI_CDB;
2441                         /*
2442                          * We want to keep track of work waiting for
2443                          * the workqueue.
2444                          */
2445                         list_add_tail(&cmd->list, &vscsi->schedule_q);
2446                         queue_work(vscsi->work_q, &cmd->work);
2447                         break;
2449                 case SRP_I_LOGOUT:
2450                         rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2451                         break;
2453                 case SRP_CRED_RSP:
2454                 case SRP_AER_RSP:
2455                 default:
2456                         ibmvscsis_free_cmd_resources(vscsi, cmd);
2457                         dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2458                                 (uint)srp->opcode);
2459                         ibmvscsis_post_disconnect(vscsi,
2460                                                   ERR_DISCONNECT_RECONNECT, 0);
2461                         break;
2462                 }
2463         } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2464                 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2465         } else {
2466                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2467                 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2468                         vscsi->state);
2469                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2470         }
2473 /**
2474  * ibmvscsis_ping_response() - Respond to a ping request
2475  * @vscsi:      Pointer to our adapter structure
2476  *
2477  * Let the client know that the server is alive and waiting on
2478  * its native I/O stack.
2479  * If any type of error occurs from the call to queue a ping
2480  * response then the client is either not accepting or receiving
2481  * interrupts.  Disconnect with an error.
2482  *
2483  * EXECUTION ENVIRONMENT:
2484  *      Interrupt, interrupt lock held
2485  */
2486 static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2488         struct viosrp_crq *crq;
2489         u64 buffer[2] = { 0, 0 };
2490         long rc;
2492         crq = (struct viosrp_crq *)&buffer;
2493         crq->valid = VALID_CMD_RESP_EL;
2494         crq->format = (u8)MESSAGE_IN_CRQ;
2495         crq->status = PING_RESPONSE;
2497         rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2498                         cpu_to_be64(buffer[MSG_LOW]));
2500         switch (rc) {
2501         case H_SUCCESS:
2502                 break;
2503         case H_CLOSED:
2504                 vscsi->flags |= CLIENT_FAILED;
2505         case H_DROPPED:
2506                 vscsi->flags |= RESPONSE_Q_DOWN;
2507         case H_REMOTE_PARM:
2508                 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2509                         rc);
2510                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2511                 break;
2512         default:
2513                 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2514                         rc);
2515                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2516                 break;
2517         }
2519         return rc;
2522 /**
2523  * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2524  * @vscsi:      Pointer to our adapter structure
2525  * @crq:        Pointer to CRQ element containing the SRP request
2526  *
2527  * This function will return success if the command queue element is valid
2528  * and the srp iu or MAD request it pointed to was also valid.  That does
2529  * not mean that an error was not returned to the client.
2530  *
2531  * EXECUTION ENVIRONMENT:
2532  *      Interrupt, intr lock held
2533  */
2534 static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2535                                     struct viosrp_crq *crq)
2537         long rc = ADAPT_SUCCESS;
2539         switch (crq->valid) {
2540         case VALID_CMD_RESP_EL:
2541                 switch (crq->format) {
2542                 case OS400_FORMAT:
2543                 case AIX_FORMAT:
2544                 case LINUX_FORMAT:
2545                 case MAD_FORMAT:
2546                         if (vscsi->flags & PROCESSING_MAD) {
2547                                 rc = ERROR;
2548                                 dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2549                                 ibmvscsis_post_disconnect(vscsi,
2550                                                        ERR_DISCONNECT_RECONNECT,
2551                                                        0);
2552                         } else {
2553                                 vscsi->flags |= PROCESSING_MAD;
2554                                 rc = ibmvscsis_mad(vscsi, crq);
2555                         }
2556                         break;
2558                 case SRP_FORMAT:
2559                         ibmvscsis_srp_cmd(vscsi, crq);
2560                         break;
2562                 case MESSAGE_IN_CRQ:
2563                         if (crq->status == PING)
2564                                 ibmvscsis_ping_response(vscsi);
2565                         break;
2567                 default:
2568                         dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2569                                 (uint)crq->format);
2570                         ibmvscsis_post_disconnect(vscsi,
2571                                                   ERR_DISCONNECT_RECONNECT, 0);
2572                         break;
2573                 }
2574                 break;
2576         case VALID_TRANS_EVENT:
2577                 rc = ibmvscsis_trans_event(vscsi, crq);
2578                 break;
2580         case VALID_INIT_MSG:
2581                 rc = ibmvscsis_init_msg(vscsi, crq);
2582                 break;
2584         default:
2585                 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2586                         (uint)crq->valid);
2587                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2588                 break;
2589         }
2591         /*
2592          * Return only what the interrupt handler cares
2593          * about. Most errors we keep right on trucking.
2594          */
2595         rc = vscsi->flags & SCHEDULE_DISCONNECT;
2597         return rc;
2600 static int read_dma_window(struct scsi_info *vscsi)
2602         struct vio_dev *vdev = vscsi->dma_dev;
2603         const __be32 *dma_window;
2604         const __be32 *prop;
2606         /* TODO Using of_parse_dma_window would be better, but it doesn't give
2607          * a way to read multiple windows without already knowing the size of
2608          * a window or the number of windows.
2609          */
2610         dma_window = (const __be32 *)vio_get_attribute(vdev,
2611                                                        "ibm,my-dma-window",
2612                                                        NULL);
2613         if (!dma_window) {
2614                 dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
2615                 return -1;
2616         }
2618         vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2619         dma_window++;
2621         prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2622                                                  NULL);
2623         if (!prop) {
2624                 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
2625                 dma_window++;
2626         } else {
2627                 dma_window += be32_to_cpu(*prop);
2628         }
2630         prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2631                                                  NULL);
2632         if (!prop) {
2633                 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
2634                 dma_window++;
2635         } else {
2636                 dma_window += be32_to_cpu(*prop);
2637         }
2639         /* dma_window should point to the second window now */
2640         vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2642         return 0;
2645 static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2647         struct ibmvscsis_tport *tport = NULL;
2648         struct vio_dev *vdev;
2649         struct scsi_info *vscsi;
2651         spin_lock_bh(&ibmvscsis_dev_lock);
2652         list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2653                 vdev = vscsi->dma_dev;
2654                 if (!strcmp(dev_name(&vdev->dev), name)) {
2655                         tport = &vscsi->tport;
2656                         break;
2657                 }
2658         }
2659         spin_unlock_bh(&ibmvscsis_dev_lock);
2661         return tport;
2664 /**
2665  * ibmvscsis_parse_cmd() - Parse SRP Command
2666  * @vscsi:      Pointer to our adapter structure
2667  * @cmd:        Pointer to command element with SRP command
2668  *
2669  * Parse the srp command; if it is valid then submit it to tcm.
2670  * Note: The return code does not reflect the status of the SCSI CDB.
2671  *
2672  * EXECUTION ENVIRONMENT:
2673  *      Process level
2674  */
2675 static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2676                                 struct ibmvscsis_cmd *cmd)
2678         struct iu_entry *iue = cmd->iue;
2679         struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2680         struct ibmvscsis_nexus *nexus;
2681         u64 data_len = 0;
2682         enum dma_data_direction dir;
2683         int attr = 0;
2684         int rc = 0;
2686         nexus = vscsi->tport.ibmv_nexus;
2687         /*
2688          * additional length in bytes.  Note that the SRP spec says that
2689          * additional length is in 4-byte words, but technically the
2690          * additional length field is only the upper 6 bits of the byte.
2691          * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2692          * all reserved fields should be), then interpreting the byte as
2693          * an int will yield the length in bytes.
2694          */
2695         if (srp->add_cdb_len & 0x03) {
2696                 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2697                 spin_lock_bh(&vscsi->intr_lock);
2698                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2699                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2700                 spin_unlock_bh(&vscsi->intr_lock);
2701                 return;
2702         }
2704         if (srp_get_desc_table(srp, &dir, &data_len)) {
2705                 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2706                         srp->tag);
2707                 goto fail;
2708         }
2710         cmd->rsp.sol_not = srp->sol_not;
2712         switch (srp->task_attr) {
2713         case SRP_SIMPLE_TASK:
2714                 attr = TCM_SIMPLE_TAG;
2715                 break;
2716         case SRP_ORDERED_TASK:
2717                 attr = TCM_ORDERED_TAG;
2718                 break;
2719         case SRP_HEAD_TASK:
2720                 attr = TCM_HEAD_TAG;
2721                 break;
2722         case SRP_ACA_TASK:
2723                 attr = TCM_ACA_TAG;
2724                 break;
2725         default:
2726                 dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2727                         srp->task_attr);
2728                 goto fail;
2729         }
2731         cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2733         spin_lock_bh(&vscsi->intr_lock);
2734         list_add_tail(&cmd->list, &vscsi->active_q);
2735         spin_unlock_bh(&vscsi->intr_lock);
2737         srp->lun.scsi_lun[0] &= 0x3f;
2739         rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2740                                cmd->sense_buf, scsilun_to_int(&srp->lun),
2741                                data_len, attr, dir, 0);
2742         if (rc) {
2743                 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2744                 spin_lock_bh(&vscsi->intr_lock);
2745                 list_del(&cmd->list);
2746                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2747                 spin_unlock_bh(&vscsi->intr_lock);
2748                 goto fail;
2749         }
2750         return;
2752 fail:
2753         spin_lock_bh(&vscsi->intr_lock);
2754         ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2755         spin_unlock_bh(&vscsi->intr_lock);
2758 /**
2759  * ibmvscsis_parse_task() - Parse SRP Task Management Request
2760  * @vscsi:      Pointer to our adapter structure
2761  * @cmd:        Pointer to command element with SRP task management request
2762  *
2763  * Parse the srp task management request; if it is valid then submit it to tcm.
2764  * Note: The return code does not reflect the status of the task management
2765  * request.
2766  *
2767  * EXECUTION ENVIRONMENT:
2768  *      Processor level
2769  */
2770 static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2771                                  struct ibmvscsis_cmd *cmd)
2773         struct iu_entry *iue = cmd->iue;
2774         struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2775         int tcm_type;
2776         u64 tag_to_abort = 0;
2777         int rc = 0;
2778         struct ibmvscsis_nexus *nexus;
2780         nexus = vscsi->tport.ibmv_nexus;
2782         cmd->rsp.sol_not = srp_tsk->sol_not;
2784         switch (srp_tsk->tsk_mgmt_func) {
2785         case SRP_TSK_ABORT_TASK:
2786                 tcm_type = TMR_ABORT_TASK;
2787                 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2788                 break;
2789         case SRP_TSK_ABORT_TASK_SET:
2790                 tcm_type = TMR_ABORT_TASK_SET;
2791                 break;
2792         case SRP_TSK_CLEAR_TASK_SET:
2793                 tcm_type = TMR_CLEAR_TASK_SET;
2794                 break;
2795         case SRP_TSK_LUN_RESET:
2796                 tcm_type = TMR_LUN_RESET;
2797                 break;
2798         case SRP_TSK_CLEAR_ACA:
2799                 tcm_type = TMR_CLEAR_ACA;
2800                 break;
2801         default:
2802                 dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2803                         srp_tsk->tsk_mgmt_func);
2804                 cmd->se_cmd.se_tmr_req->response =
2805                         TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2806                 rc = -1;
2807                 break;
2808         }
2810         if (!rc) {
2811                 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2813                 spin_lock_bh(&vscsi->intr_lock);
2814                 list_add_tail(&cmd->list, &vscsi->active_q);
2815                 spin_unlock_bh(&vscsi->intr_lock);
2817                 srp_tsk->lun.scsi_lun[0] &= 0x3f;
2819                 dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
2820                         srp_tsk->tsk_mgmt_func);
2821                 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2822                                        scsilun_to_int(&srp_tsk->lun), srp_tsk,
2823                                        tcm_type, GFP_KERNEL, tag_to_abort, 0);
2824                 if (rc) {
2825                         dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2826                                 rc);
2827                         spin_lock_bh(&vscsi->intr_lock);
2828                         list_del(&cmd->list);
2829                         spin_unlock_bh(&vscsi->intr_lock);
2830                         cmd->se_cmd.se_tmr_req->response =
2831                                 TMR_FUNCTION_REJECTED;
2832                 }
2833         }
2835         if (rc)
2836                 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2839 static void ibmvscsis_scheduler(struct work_struct *work)
2841         struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2842                                                  work);
2843         struct scsi_info *vscsi = cmd->adapter;
2845         spin_lock_bh(&vscsi->intr_lock);
2847         /* Remove from schedule_q */
2848         list_del(&cmd->list);
2850         /* Don't submit cmd if we're disconnecting */
2851         if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2852                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2854                 /* ibmvscsis_disconnect might be waiting for us */
2855                 if (list_empty(&vscsi->active_q) &&
2856                     list_empty(&vscsi->schedule_q) &&
2857                     (vscsi->flags & WAIT_FOR_IDLE)) {
2858                         vscsi->flags &= ~WAIT_FOR_IDLE;
2859                         complete(&vscsi->wait_idle);
2860                 }
2862                 spin_unlock_bh(&vscsi->intr_lock);
2863                 return;
2864         }
2866         spin_unlock_bh(&vscsi->intr_lock);
2868         switch (cmd->type) {
2869         case SCSI_CDB:
2870                 ibmvscsis_parse_cmd(vscsi, cmd);
2871                 break;
2872         case TASK_MANAGEMENT:
2873                 ibmvscsis_parse_task(vscsi, cmd);
2874                 break;
2875         default:
2876                 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2877                         cmd->type);
2878                 spin_lock_bh(&vscsi->intr_lock);
2879                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2880                 spin_unlock_bh(&vscsi->intr_lock);
2881                 break;
2882         }
2885 static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2887         struct ibmvscsis_cmd *cmd;
2888         int i;
2890         INIT_LIST_HEAD(&vscsi->free_cmd);
2891         vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2892                                   GFP_KERNEL);
2893         if (!vscsi->cmd_pool)
2894                 return -ENOMEM;
2896         for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2897              i++, cmd++) {
2898                 cmd->abort_cmd = NULL;
2899                 cmd->adapter = vscsi;
2900                 INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2901                 list_add_tail(&cmd->list, &vscsi->free_cmd);
2902         }
2904         return 0;
2907 static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2909         kfree(vscsi->cmd_pool);
2910         vscsi->cmd_pool = NULL;
2911         INIT_LIST_HEAD(&vscsi->free_cmd);
2914 /**
2915  * ibmvscsis_service_wait_q() - Service Waiting Queue
2916  * @timer:      Pointer to timer which has expired
2917  *
2918  * This routine is called when the timer pops to service the waiting
2919  * queue. Elements on the queue have completed, their responses have been
2920  * copied to the client, but the client's response queue was full so
2921  * the queue message could not be sent. The routine grabs the proper locks
2922  * and calls send messages.
2923  *
2924  * EXECUTION ENVIRONMENT:
2925  *      called at interrupt level
2926  */
2927 static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2929         struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2930         struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2931                                                rsp_q_timer);
2933         spin_lock_bh(&vscsi->intr_lock);
2934         p_timer->timer_pops += 1;
2935         p_timer->started = false;
2936         ibmvscsis_send_messages(vscsi);
2937         spin_unlock_bh(&vscsi->intr_lock);
2939         return HRTIMER_NORESTART;
2942 static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2944         struct timer_cb *p_timer;
2946         p_timer = &vscsi->rsp_q_timer;
2947         hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2949         p_timer->timer.function = ibmvscsis_service_wait_q;
2950         p_timer->started = false;
2951         p_timer->timer_pops = 0;
2953         return ADAPT_SUCCESS;
2956 static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2958         struct timer_cb *p_timer;
2960         p_timer = &vscsi->rsp_q_timer;
2962         (void)hrtimer_cancel(&p_timer->timer);
2964         p_timer->started = false;
2965         p_timer->timer_pops = 0;
2968 static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2970         struct scsi_info *vscsi = data;
2972         vio_disable_interrupts(vscsi->dma_dev);
2973         tasklet_schedule(&vscsi->work_task);
2975         return IRQ_HANDLED;
2978 /**
2979  * ibmvscsis_enable_change_state() - Set new state based on enabled status
2980  * @vscsi:      Pointer to our adapter structure
2981  *
2982  * This function determines our new state now that we are enabled.  This
2983  * may involve sending an Init Complete message to the client.
2984  *
2985  * Must be called with interrupt lock held.
2986  */
2987 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2989         int bytes;
2990         long rc = ADAPT_SUCCESS;
2992         bytes = vscsi->cmd_q.size * PAGE_SIZE;
2993         rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2994         if (rc == H_CLOSED || rc == H_SUCCESS) {
2995                 vscsi->state = WAIT_CONNECTION;
2996                 rc = ibmvscsis_establish_new_q(vscsi);
2997         }
2999         if (rc != ADAPT_SUCCESS) {
3000                 vscsi->state = ERR_DISCONNECTED;
3001                 vscsi->flags |= RESPONSE_Q_DOWN;
3002         }
3004         return rc;
3007 /**
3008  * ibmvscsis_create_command_q() - Create Command Queue
3009  * @vscsi:      Pointer to our adapter structure
3010  * @num_cmds:   Currently unused.  In the future, may be used to determine
3011  *              the size of the CRQ.
3012  *
3013  * Allocates memory for command queue maps remote memory into an ioba
3014  * initializes the command response queue
3015  *
3016  * EXECUTION ENVIRONMENT:
3017  *      Process level only
3018  */
3019 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
3021         int pages;
3022         struct vio_dev *vdev = vscsi->dma_dev;
3024         /* We might support multiple pages in the future, but just 1 for now */
3025         pages = 1;
3027         vscsi->cmd_q.size = pages;
3029         vscsi->cmd_q.base_addr =
3030                 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
3031         if (!vscsi->cmd_q.base_addr)
3032                 return -ENOMEM;
3034         vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
3036         vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
3037                                                 vscsi->cmd_q.base_addr,
3038                                                 PAGE_SIZE, DMA_BIDIRECTIONAL);
3039         if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
3040                 free_page((unsigned long)vscsi->cmd_q.base_addr);
3041                 return -ENOMEM;
3042         }
3044         return 0;
3047 /**
3048  * ibmvscsis_destroy_command_q - Destroy Command Queue
3049  * @vscsi:      Pointer to our adapter structure
3050  *
3051  * Releases memory for command queue and unmaps mapped remote memory.
3052  *
3053  * EXECUTION ENVIRONMENT:
3054  *      Process level only
3055  */
3056 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
3058         dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
3059                          PAGE_SIZE, DMA_BIDIRECTIONAL);
3060         free_page((unsigned long)vscsi->cmd_q.base_addr);
3061         vscsi->cmd_q.base_addr = NULL;
3062         vscsi->state = NO_QUEUE;
3065 static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
3066                               struct ibmvscsis_cmd *cmd)
3068         struct iu_entry *iue = cmd->iue;
3069         struct se_cmd *se_cmd = &cmd->se_cmd;
3070         struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3071         struct scsi_sense_hdr sshdr;
3072         u8 rc = se_cmd->scsi_status;
3074         if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
3075                 if (scsi_normalize_sense(se_cmd->sense_buffer,
3076                                          se_cmd->scsi_sense_length, &sshdr))
3077                         if (sshdr.sense_key == HARDWARE_ERROR &&
3078                             (se_cmd->residual_count == 0 ||
3079                              se_cmd->residual_count == se_cmd->data_length)) {
3080                                 rc = NO_SENSE;
3081                                 cmd->flags |= CMD_FAST_FAIL;
3082                         }
3084         return rc;
3087 /**
3088  * srp_build_response() - Build an SRP response buffer
3089  * @vscsi:      Pointer to our adapter structure
3090  * @cmd:        Pointer to command for which to send the response
3091  * @len_p:      Where to return the length of the IU response sent.  This
3092  *              is needed to construct the CRQ response.
3093  *
3094  * Build the SRP response buffer and copy it to the client's memory space.
3095  */
3096 static long srp_build_response(struct scsi_info *vscsi,
3097                                struct ibmvscsis_cmd *cmd, uint *len_p)
3099         struct iu_entry *iue = cmd->iue;
3100         struct se_cmd *se_cmd = &cmd->se_cmd;
3101         struct srp_rsp *rsp;
3102         uint len;
3103         u32 rsp_code;
3104         char *data;
3105         u32 *tsk_status;
3106         long rc = ADAPT_SUCCESS;
3108         spin_lock_bh(&vscsi->intr_lock);
3110         rsp = &vio_iu(iue)->srp.rsp;
3111         len = sizeof(*rsp);
3112         memset(rsp, 0, len);
3113         data = rsp->data;
3115         rsp->opcode = SRP_RSP;
3117         rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3118         rsp->tag = cmd->rsp.tag;
3119         rsp->flags = 0;
3121         if (cmd->type == SCSI_CDB) {
3122                 rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3123                 if (rsp->status) {
3124                         dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
3125                                 cmd, (int)rsp->status);
3126                         ibmvscsis_determine_resid(se_cmd, rsp);
3127                         if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3128                                 rsp->sense_data_len =
3129                                         cpu_to_be32(se_cmd->scsi_sense_length);
3130                                 rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3131                                 len += se_cmd->scsi_sense_length;
3132                                 memcpy(data, se_cmd->sense_buffer,
3133                                        se_cmd->scsi_sense_length);
3134                         }
3135                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3136                                 UCSOLNT_RESP_SHIFT;
3137                 } else if (cmd->flags & CMD_FAST_FAIL) {
3138                         dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
3139                                 cmd);
3140                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3141                                 UCSOLNT_RESP_SHIFT;
3142                 } else {
3143                         rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3144                                 SCSOLNT_RESP_SHIFT;
3145                 }
3146         } else {
3147                 /* this is task management */
3148                 rsp->status = 0;
3149                 rsp->resp_data_len = cpu_to_be32(4);
3150                 rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3152                 switch (se_cmd->se_tmr_req->response) {
3153                 case TMR_FUNCTION_COMPLETE:
3154                 case TMR_TASK_DOES_NOT_EXIST:
3155                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3156                         rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3157                                 SCSOLNT_RESP_SHIFT;
3158                         break;
3159                 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3160                 case TMR_LUN_DOES_NOT_EXIST:
3161                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3162                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3163                                 UCSOLNT_RESP_SHIFT;
3164                         break;
3165                 case TMR_FUNCTION_FAILED:
3166                 case TMR_FUNCTION_REJECTED:
3167                 default:
3168                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3169                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3170                                 UCSOLNT_RESP_SHIFT;
3171                         break;
3172                 }
3174                 tsk_status = (u32 *)data;
3175                 *tsk_status = cpu_to_be32(rsp_code);
3176                 data = (char *)(tsk_status + 1);
3177                 len += 4;
3178         }
3180         dma_wmb();
3181         rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3182                          vscsi->dds.window[REMOTE].liobn,
3183                          be64_to_cpu(iue->remote_token));
3185         switch (rc) {
3186         case H_SUCCESS:
3187                 vscsi->credit = 0;
3188                 *len_p = len;
3189                 break;
3190         case H_PERMISSION:
3191                 if (connection_broken(vscsi))
3192                         vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3194                 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3195                         rc, vscsi->flags, vscsi->state);
3196                 break;
3197         case H_SOURCE_PARM:
3198         case H_DEST_PARM:
3199         default:
3200                 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3201                         rc);
3202                 break;
3203         }
3205         spin_unlock_bh(&vscsi->intr_lock);
3207         return rc;
3210 static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3211                           int nsg, struct srp_direct_buf *md, int nmd,
3212                           enum dma_data_direction dir, unsigned int bytes)
3214         struct iu_entry *iue = cmd->iue;
3215         struct srp_target *target = iue->target;
3216         struct scsi_info *vscsi = target->ldata;
3217         struct scatterlist *sgp;
3218         dma_addr_t client_ioba, server_ioba;
3219         ulong buf_len;
3220         ulong client_len, server_len;
3221         int md_idx;
3222         long tx_len;
3223         long rc = 0;
3225         if (bytes == 0)
3226                 return 0;
3228         sgp = sg;
3229         client_len = 0;
3230         server_len = 0;
3231         md_idx = 0;
3232         tx_len = bytes;
3234         do {
3235                 if (client_len == 0) {
3236                         if (md_idx >= nmd) {
3237                                 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3238                                 rc = -EIO;
3239                                 break;
3240                         }
3241                         client_ioba = be64_to_cpu(md[md_idx].va);
3242                         client_len = be32_to_cpu(md[md_idx].len);
3243                 }
3244                 if (server_len == 0) {
3245                         if (!sgp) {
3246                                 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3247                                 rc = -EIO;
3248                                 break;
3249                         }
3250                         server_ioba = sg_dma_address(sgp);
3251                         server_len = sg_dma_len(sgp);
3252                 }
3254                 buf_len = tx_len;
3256                 if (buf_len > client_len)
3257                         buf_len = client_len;
3259                 if (buf_len > server_len)
3260                         buf_len = server_len;
3262                 if (buf_len > max_vdma_size)
3263                         buf_len = max_vdma_size;
3265                 if (dir == DMA_TO_DEVICE) {
3266                         /* read from client */
3267                         rc = h_copy_rdma(buf_len,
3268                                          vscsi->dds.window[REMOTE].liobn,
3269                                          client_ioba,
3270                                          vscsi->dds.window[LOCAL].liobn,
3271                                          server_ioba);
3272                 } else {
3273                         /* The h_copy_rdma will cause phyp, running in another
3274                          * partition, to read memory, so we need to make sure
3275                          * the data has been written out, hence these syncs.
3276                          */
3277                         /* ensure that everything is in memory */
3278                         isync();
3279                         /* ensure that memory has been made visible */
3280                         dma_wmb();
3281                         rc = h_copy_rdma(buf_len,
3282                                          vscsi->dds.window[LOCAL].liobn,
3283                                          server_ioba,
3284                                          vscsi->dds.window[REMOTE].liobn,
3285                                          client_ioba);
3286                 }
3287                 switch (rc) {
3288                 case H_SUCCESS:
3289                         break;
3290                 case H_PERMISSION:
3291                 case H_SOURCE_PARM:
3292                 case H_DEST_PARM:
3293                         if (connection_broken(vscsi)) {
3294                                 spin_lock_bh(&vscsi->intr_lock);
3295                                 vscsi->flags |=
3296                                         (RESPONSE_Q_DOWN | CLIENT_FAILED);
3297                                 spin_unlock_bh(&vscsi->intr_lock);
3298                         }
3299                         dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3300                                 rc);
3301                         break;
3303                 default:
3304                         dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3305                                 rc);
3306                         break;
3307                 }
3309                 if (!rc) {
3310                         tx_len -= buf_len;
3311                         if (tx_len) {
3312                                 client_len -= buf_len;
3313                                 if (client_len == 0)
3314                                         md_idx++;
3315                                 else
3316                                         client_ioba += buf_len;
3318                                 server_len -= buf_len;
3319                                 if (server_len == 0)
3320                                         sgp = sg_next(sgp);
3321                                 else
3322                                         server_ioba += buf_len;
3323                         } else {
3324                                 break;
3325                         }
3326                 }
3327         } while (!rc);
3329         return rc;
3332 /**
3333  * ibmvscsis_handle_crq() - Handle CRQ
3334  * @data:       Pointer to our adapter structure
3335  *
3336  * Read the command elements from the command queue and copy the payloads
3337  * associated with the command elements to local memory and execute the
3338  * SRP requests.
3339  *
3340  * Note: this is an edge triggered interrupt. It can not be shared.
3341  */
3342 static void ibmvscsis_handle_crq(unsigned long data)
3344         struct scsi_info *vscsi = (struct scsi_info *)data;
3345         struct viosrp_crq *crq;
3346         long rc;
3347         bool ack = true;
3348         volatile u8 valid;
3350         spin_lock_bh(&vscsi->intr_lock);
3352         dev_dbg(&vscsi->dev, "got interrupt\n");
3354         /*
3355          * if we are in a path where we are waiting for all pending commands
3356          * to complete because we received a transport event and anything in
3357          * the command queue is for a new connection, do nothing
3358          */
3359         if (TARGET_STOP(vscsi)) {
3360                 vio_enable_interrupts(vscsi->dma_dev);
3362                 dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3363                         vscsi->flags, vscsi->state);
3364                 spin_unlock_bh(&vscsi->intr_lock);
3365                 return;
3366         }
3368         rc = vscsi->flags & SCHEDULE_DISCONNECT;
3369         crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3370         valid = crq->valid;
3371         dma_rmb();
3373         while (valid) {
3374                 /*
3375                  * These are edege triggered interrupts. After dropping out of
3376                  * the while loop, the code must check for work since an
3377                  * interrupt could be lost, and an elment be left on the queue,
3378                  * hence the label.
3379                  */
3380 cmd_work:
3381                 vscsi->cmd_q.index =
3382                         (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3384                 if (!rc) {
3385                         rc = ibmvscsis_parse_command(vscsi, crq);
3386                 } else {
3387                         if ((uint)crq->valid == VALID_TRANS_EVENT) {
3388                                 /*
3389                                  * must service the transport layer events even
3390                                  * in an error state, dont break out until all
3391                                  * the consecutive transport events have been
3392                                  * processed
3393                                  */
3394                                 rc = ibmvscsis_trans_event(vscsi, crq);
3395                         } else if (vscsi->flags & TRANS_EVENT) {
3396                                 /*
3397                                  * if a transport event has occurred leave
3398                                  * everything but transport events on the queue
3399                                  *
3400                                  * need to decrement the queue index so we can
3401                                  * look at the element again
3402                                  */
3403                                 if (vscsi->cmd_q.index)
3404                                         vscsi->cmd_q.index -= 1;
3405                                 else
3406                                         /*
3407                                          * index is at 0 it just wrapped.
3408                                          * have it index last element in q
3409                                          */
3410                                         vscsi->cmd_q.index = vscsi->cmd_q.mask;
3411                                 break;
3412                         }
3413                 }
3415                 crq->valid = INVALIDATE_CMD_RESP_EL;
3417                 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3418                 valid = crq->valid;
3419                 dma_rmb();
3420         }
3422         if (!rc) {
3423                 if (ack) {
3424                         vio_enable_interrupts(vscsi->dma_dev);
3425                         ack = false;
3426                         dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
3427                 }
3428                 valid = crq->valid;
3429                 dma_rmb();
3430                 if (valid)
3431                         goto cmd_work;
3432         } else {
3433                 dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3434                         vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3435         }
3437         dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3438                 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3439                 vscsi->state);
3441         spin_unlock_bh(&vscsi->intr_lock);
3444 static int ibmvscsis_probe(struct vio_dev *vdev,
3445                            const struct vio_device_id *id)
3447         struct scsi_info *vscsi;
3448         int rc = 0;
3449         long hrc = 0;
3450         char wq_name[24];
3452         vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3453         if (!vscsi) {
3454                 rc = -ENOMEM;
3455                 dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
3456                 return rc;
3457         }
3459         vscsi->dma_dev = vdev;
3460         vscsi->dev = vdev->dev;
3461         INIT_LIST_HEAD(&vscsi->schedule_q);
3462         INIT_LIST_HEAD(&vscsi->waiting_rsp);
3463         INIT_LIST_HEAD(&vscsi->active_q);
3465         snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3466                  dev_name(&vdev->dev));
3468         dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
3470         rc = read_dma_window(vscsi);
3471         if (rc)
3472                 goto free_adapter;
3473         dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
3474                 vscsi->dds.window[LOCAL].liobn,
3475                 vscsi->dds.window[REMOTE].liobn);
3477         snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
3479         vscsi->dds.unit_id = vdev->unit_address;
3480         strscpy(vscsi->dds.partition_name, partition_name,
3481                 sizeof(vscsi->dds.partition_name));
3482         vscsi->dds.partition_num = partition_number;
3484         spin_lock_bh(&ibmvscsis_dev_lock);
3485         list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3486         spin_unlock_bh(&ibmvscsis_dev_lock);
3488         /*
3489          * TBD: How do we determine # of cmds to request?  Do we know how
3490          * many "children" we have?
3491          */
3492         vscsi->request_limit = INITIAL_SRP_LIMIT;
3493         rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3494                               SRP_MAX_IU_LEN);
3495         if (rc)
3496                 goto rem_list;
3498         vscsi->target.ldata = vscsi;
3500         rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3501         if (rc) {
3502                 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3503                         rc, vscsi->request_limit);
3504                 goto free_target;
3505         }
3507         /*
3508          * Note: the lock is used in freeing timers, so must initialize
3509          * first so that ordering in case of error is correct.
3510          */
3511         spin_lock_init(&vscsi->intr_lock);
3513         rc = ibmvscsis_alloctimer(vscsi);
3514         if (rc) {
3515                 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3516                 goto free_cmds;
3517         }
3519         rc = ibmvscsis_create_command_q(vscsi, 256);
3520         if (rc) {
3521                 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3522                         rc);
3523                 goto free_timer;
3524         }
3526         vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3527         if (!vscsi->map_buf) {
3528                 rc = -ENOMEM;
3529                 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3530                 goto destroy_queue;
3531         }
3533         vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3534                                          DMA_BIDIRECTIONAL);
3535         if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3536                 rc = -ENOMEM;
3537                 dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3538                 goto free_buf;
3539         }
3541         hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3542                        (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3543                        0);
3544         if (hrc == H_SUCCESS)
3545                 vscsi->client_data.partition_number =
3546                         be64_to_cpu(*(u64 *)vscsi->map_buf);
3547         /*
3548          * We expect the VIOCTL to fail if we're configured as "any
3549          * client can connect" and the client isn't activated yet.
3550          * We'll make the call again when he sends an init msg.
3551          */
3552         dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
3553                 hrc, vscsi->client_data.partition_number);
3555         tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3556                      (unsigned long)vscsi);
3558         init_completion(&vscsi->wait_idle);
3559         init_completion(&vscsi->unconfig);
3561         snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3562         vscsi->work_q = create_workqueue(wq_name);
3563         if (!vscsi->work_q) {
3564                 rc = -ENOMEM;
3565                 dev_err(&vscsi->dev, "create_workqueue failed\n");
3566                 goto unmap_buf;
3567         }
3569         rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3570         if (rc) {
3571                 rc = -EPERM;
3572                 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3573                 goto destroy_WQ;
3574         }
3576         vscsi->state = WAIT_ENABLED;
3578         dev_set_drvdata(&vdev->dev, vscsi);
3580         return 0;
3582 destroy_WQ:
3583         destroy_workqueue(vscsi->work_q);
3584 unmap_buf:
3585         dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3586                          DMA_BIDIRECTIONAL);
3587 free_buf:
3588         kfree(vscsi->map_buf);
3589 destroy_queue:
3590         tasklet_kill(&vscsi->work_task);
3591         ibmvscsis_unregister_command_q(vscsi);
3592         ibmvscsis_destroy_command_q(vscsi);
3593 free_timer:
3594         ibmvscsis_freetimer(vscsi);
3595 free_cmds:
3596         ibmvscsis_free_cmds(vscsi);
3597 free_target:
3598         srp_target_free(&vscsi->target);
3599 rem_list:
3600         spin_lock_bh(&ibmvscsis_dev_lock);
3601         list_del(&vscsi->list);
3602         spin_unlock_bh(&ibmvscsis_dev_lock);
3603 free_adapter:
3604         kfree(vscsi);
3606         return rc;
3609 static int ibmvscsis_remove(struct vio_dev *vdev)
3611         struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3613         dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3615         spin_lock_bh(&vscsi->intr_lock);
3616         ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3617         vscsi->flags |= CFG_SLEEPING;
3618         spin_unlock_bh(&vscsi->intr_lock);
3619         wait_for_completion(&vscsi->unconfig);
3621         vio_disable_interrupts(vdev);
3622         free_irq(vdev->irq, vscsi);
3623         destroy_workqueue(vscsi->work_q);
3624         dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3625                          DMA_BIDIRECTIONAL);
3626         kfree(vscsi->map_buf);
3627         tasklet_kill(&vscsi->work_task);
3628         ibmvscsis_destroy_command_q(vscsi);
3629         ibmvscsis_freetimer(vscsi);
3630         ibmvscsis_free_cmds(vscsi);
3631         srp_target_free(&vscsi->target);
3632         spin_lock_bh(&ibmvscsis_dev_lock);
3633         list_del(&vscsi->list);
3634         spin_unlock_bh(&ibmvscsis_dev_lock);
3635         kfree(vscsi);
3637         return 0;
3640 static ssize_t system_id_show(struct device *dev,
3641                               struct device_attribute *attr, char *buf)
3643         return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3646 static ssize_t partition_number_show(struct device *dev,
3647                                      struct device_attribute *attr, char *buf)
3649         return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3652 static ssize_t unit_address_show(struct device *dev,
3653                                  struct device_attribute *attr, char *buf)
3655         struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3657         return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3660 static int ibmvscsis_get_system_info(void)
3662         struct device_node *rootdn, *vdevdn;
3663         const char *id, *model, *name;
3664         const uint *num;
3666         rootdn = of_find_node_by_path("/");
3667         if (!rootdn)
3668                 return -ENOENT;
3670         model = of_get_property(rootdn, "model", NULL);
3671         id = of_get_property(rootdn, "system-id", NULL);
3672         if (model && id)
3673                 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3675         name = of_get_property(rootdn, "ibm,partition-name", NULL);
3676         if (name)
3677                 strncpy(partition_name, name, sizeof(partition_name));
3679         num = of_get_property(rootdn, "ibm,partition-no", NULL);
3680         if (num)
3681                 partition_number = of_read_number(num, 1);
3683         of_node_put(rootdn);
3685         vdevdn = of_find_node_by_path("/vdevice");
3686         if (vdevdn) {
3687                 const uint *mvds;
3689                 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3690                                        NULL);
3691                 if (mvds)
3692                         max_vdma_size = *mvds;
3693                 of_node_put(vdevdn);
3694         }
3696         return 0;
3699 static char *ibmvscsis_get_fabric_name(void)
3701         return "ibmvscsis";
3704 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3706         struct ibmvscsis_tport *tport =
3707                 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3709         return tport->tport_name;
3712 static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3714         struct ibmvscsis_tport *tport =
3715                 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3717         return tport->tport_tpgt;
3720 static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3722         return 1;
3725 static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3727         return 1;
3730 static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3732         return 0;
3735 static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3737         return 1;
3740 static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3742         return target_put_sess_cmd(se_cmd);
3745 static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3747         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3748                                                  se_cmd);
3749         struct scsi_info *vscsi = cmd->adapter;
3751         spin_lock_bh(&vscsi->intr_lock);
3752         /* Remove from active_q */
3753         list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3754         ibmvscsis_send_messages(vscsi);
3755         spin_unlock_bh(&vscsi->intr_lock);
3758 static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3760         return 0;
3763 static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3765         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3766                                                  se_cmd);
3767         struct scsi_info *vscsi = cmd->adapter;
3768         struct iu_entry *iue = cmd->iue;
3769         int rc;
3771         /*
3772          * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3773          * since LIO can't do anything about it, and we dont want to
3774          * attempt an srp_transfer_data.
3775          */
3776         if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3777                 dev_err(&vscsi->dev, "write_pending failed since: %d\n",
3778                         vscsi->flags);
3779                 return -EIO;
3781         }
3783         rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3784                                1, 1);
3785         if (rc) {
3786                 dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
3787                 return -EIO;
3788         }
3789         /*
3790          * We now tell TCM to add this WRITE CDB directly into the TCM storage
3791          * object execution queue.
3792          */
3793         target_execute_cmd(se_cmd);
3794         return 0;
3797 static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3799         return 0;
3802 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3806 static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3808         return 0;
3811 static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3813         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3814                                                  se_cmd);
3815         struct iu_entry *iue = cmd->iue;
3816         struct scsi_info *vscsi = cmd->adapter;
3817         char *sd;
3818         uint len = 0;
3819         int rc;
3821         rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3822                                1);
3823         if (rc) {
3824                 dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
3825                 sd = se_cmd->sense_buffer;
3826                 se_cmd->scsi_sense_length = 18;
3827                 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3828                 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3829                 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3830                                         0x08, 0x01);
3831         }
3833         srp_build_response(vscsi, cmd, &len);
3834         cmd->rsp.format = SRP_FORMAT;
3835         cmd->rsp.len = len;
3837         return 0;
3840 static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3842         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3843                                                  se_cmd);
3844         struct scsi_info *vscsi = cmd->adapter;
3845         uint len;
3847         dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
3849         srp_build_response(vscsi, cmd, &len);
3850         cmd->rsp.format = SRP_FORMAT;
3851         cmd->rsp.len = len;
3853         return 0;
3856 static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3858         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3859                                                  se_cmd);
3860         struct scsi_info *vscsi = cmd->adapter;
3861         struct ibmvscsis_cmd *cmd_itr;
3862         struct iu_entry *iue = iue = cmd->iue;
3863         struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
3864         u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
3865         uint len;
3867         dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
3868                 se_cmd, (int)se_cmd->se_tmr_req->response);
3870         if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
3871             cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
3872                 spin_lock_bh(&vscsi->intr_lock);
3873                 list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
3874                         if (tag_to_abort == cmd_itr->se_cmd.tag) {
3875                                 cmd_itr->abort_cmd = cmd;
3876                                 cmd->flags |= DELAY_SEND;
3877                                 break;
3878                         }
3879                 }
3880                 spin_unlock_bh(&vscsi->intr_lock);
3881         }
3883         srp_build_response(vscsi, cmd, &len);
3884         cmd->rsp.format = SRP_FORMAT;
3885         cmd->rsp.len = len;
3888 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3890         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3891                                                  se_cmd);
3892         struct scsi_info *vscsi = cmd->adapter;
3894         dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3895                 se_cmd, se_cmd->tag);
3898 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3899                                            struct config_group *group,
3900                                            const char *name)
3902         struct ibmvscsis_tport *tport;
3903         struct scsi_info *vscsi;
3905         tport = ibmvscsis_lookup_port(name);
3906         if (tport) {
3907                 vscsi = container_of(tport, struct scsi_info, tport);
3908                 tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3909                 dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
3910                         name, tport, tport->tport_proto_id);
3911                 return &tport->tport_wwn;
3912         }
3914         return ERR_PTR(-EINVAL);
3917 static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3919         struct ibmvscsis_tport *tport = container_of(wwn,
3920                                                      struct ibmvscsis_tport,
3921                                                      tport_wwn);
3922         struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3924         dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
3925                 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3928 static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3929                                                   const char *name)
3931         struct ibmvscsis_tport *tport =
3932                 container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3933         u16 tpgt;
3934         int rc;
3936         if (strstr(name, "tpgt_") != name)
3937                 return ERR_PTR(-EINVAL);
3938         rc = kstrtou16(name + 5, 0, &tpgt);
3939         if (rc)
3940                 return ERR_PTR(rc);
3941         tport->tport_tpgt = tpgt;
3943         tport->releasing = false;
3945         rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3946                                tport->tport_proto_id);
3947         if (rc)
3948                 return ERR_PTR(rc);
3950         return &tport->se_tpg;
3953 static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3955         struct ibmvscsis_tport *tport = container_of(se_tpg,
3956                                                      struct ibmvscsis_tport,
3957                                                      se_tpg);
3959         tport->releasing = true;
3960         tport->enabled = false;
3962         /*
3963          * Release the virtual I_T Nexus for this ibmvscsis TPG
3964          */
3965         ibmvscsis_drop_nexus(tport);
3966         /*
3967          * Deregister the se_tpg from TCM..
3968          */
3969         core_tpg_deregister(se_tpg);
3972 static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3973                                           char *page)
3975         return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3977 CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3979 static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3980         &ibmvscsis_wwn_attr_version,
3981         NULL,
3982 };
3984 static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3985                                          char *page)
3987         struct se_portal_group *se_tpg = to_tpg(item);
3988         struct ibmvscsis_tport *tport = container_of(se_tpg,
3989                                                      struct ibmvscsis_tport,
3990                                                      se_tpg);
3992         return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3995 static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3996                                           const char *page, size_t count)
3998         struct se_portal_group *se_tpg = to_tpg(item);
3999         struct ibmvscsis_tport *tport = container_of(se_tpg,
4000                                                      struct ibmvscsis_tport,
4001                                                      se_tpg);
4002         struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
4003         unsigned long tmp;
4004         int rc;
4005         long lrc;
4007         rc = kstrtoul(page, 0, &tmp);
4008         if (rc < 0) {
4009                 dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
4010                 return -EINVAL;
4011         }
4013         if ((tmp != 0) && (tmp != 1)) {
4014                 dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
4015                 return -EINVAL;
4016         }
4018         if (tmp) {
4019                 spin_lock_bh(&vscsi->intr_lock);
4020                 tport->enabled = true;
4021                 lrc = ibmvscsis_enable_change_state(vscsi);
4022                 if (lrc)
4023                         dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
4024                                 lrc, vscsi->state);
4025                 spin_unlock_bh(&vscsi->intr_lock);
4026         } else {
4027                 spin_lock_bh(&vscsi->intr_lock);
4028                 tport->enabled = false;
4029                 /* This simulates the server going down */
4030                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
4031                 spin_unlock_bh(&vscsi->intr_lock);
4032         }
4034         dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
4035                 vscsi->state);
4037         return count;
4039 CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
4041 static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
4042         &ibmvscsis_tpg_attr_enable,
4043         NULL,
4044 };
4046 static const struct target_core_fabric_ops ibmvscsis_ops = {
4047         .module                         = THIS_MODULE,
4048         .name                           = "ibmvscsis",
4049         .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
4050         .get_fabric_name                = ibmvscsis_get_fabric_name,
4051         .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
4052         .tpg_get_tag                    = ibmvscsis_get_tag,
4053         .tpg_get_default_depth          = ibmvscsis_get_default_depth,
4054         .tpg_check_demo_mode            = ibmvscsis_check_true,
4055         .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
4056         .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
4057         .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
4058         .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
4059         .check_stop_free                = ibmvscsis_check_stop_free,
4060         .release_cmd                    = ibmvscsis_release_cmd,
4061         .sess_get_index                 = ibmvscsis_sess_get_index,
4062         .write_pending                  = ibmvscsis_write_pending,
4063         .write_pending_status           = ibmvscsis_write_pending_status,
4064         .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
4065         .get_cmd_state                  = ibmvscsis_get_cmd_state,
4066         .queue_data_in                  = ibmvscsis_queue_data_in,
4067         .queue_status                   = ibmvscsis_queue_status,
4068         .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
4069         .aborted_task                   = ibmvscsis_aborted_task,
4070         /*
4071          * Setup function pointers for logic in target_core_fabric_configfs.c
4072          */
4073         .fabric_make_wwn                = ibmvscsis_make_tport,
4074         .fabric_drop_wwn                = ibmvscsis_drop_tport,
4075         .fabric_make_tpg                = ibmvscsis_make_tpg,
4076         .fabric_drop_tpg                = ibmvscsis_drop_tpg,
4078         .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
4079         .tfc_tpg_base_attrs             = ibmvscsis_tpg_attrs,
4080 };
4082 static void ibmvscsis_dev_release(struct device *dev) {};
4084 static struct device_attribute dev_attr_system_id =
4085         __ATTR(system_id, S_IRUGO, system_id_show, NULL);
4087 static struct device_attribute dev_attr_partition_number =
4088         __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
4090 static struct device_attribute dev_attr_unit_address =
4091         __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
4093 static struct attribute *ibmvscsis_dev_attrs[] = {
4094         &dev_attr_system_id.attr,
4095         &dev_attr_partition_number.attr,
4096         &dev_attr_unit_address.attr,
4097 };
4098 ATTRIBUTE_GROUPS(ibmvscsis_dev);
4100 static struct class ibmvscsis_class = {
4101         .name           = "ibmvscsis",
4102         .dev_release    = ibmvscsis_dev_release,
4103         .dev_groups     = ibmvscsis_dev_groups,
4104 };
4106 static const struct vio_device_id ibmvscsis_device_table[] = {
4107         { "v-scsi-host", "IBM,v-scsi-host" },
4108         { "", "" }
4109 };
4110 MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
4112 static struct vio_driver ibmvscsis_driver = {
4113         .name = "ibmvscsis",
4114         .id_table = ibmvscsis_device_table,
4115         .probe = ibmvscsis_probe,
4116         .remove = ibmvscsis_remove,
4117 };
4119 /*
4120  * ibmvscsis_init() - Kernel Module initialization
4121  *
4122  * Note: vio_register_driver() registers callback functions, and at least one
4123  * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4124  * the SCSI Target template must be registered before vio_register_driver()
4125  * is called.
4126  */
4127 static int __init ibmvscsis_init(void)
4129         int rc = 0;
4131         rc = ibmvscsis_get_system_info();
4132         if (rc) {
4133                 pr_err("rc %d from get_system_info\n", rc);
4134                 goto out;
4135         }
4137         rc = class_register(&ibmvscsis_class);
4138         if (rc) {
4139                 pr_err("failed class register\n");
4140                 goto out;
4141         }
4143         rc = target_register_template(&ibmvscsis_ops);
4144         if (rc) {
4145                 pr_err("rc %d from target_register_template\n", rc);
4146                 goto unregister_class;
4147         }
4149         rc = vio_register_driver(&ibmvscsis_driver);
4150         if (rc) {
4151                 pr_err("rc %d from vio_register_driver\n", rc);
4152                 goto unregister_target;
4153         }
4155         return 0;
4157 unregister_target:
4158         target_unregister_template(&ibmvscsis_ops);
4159 unregister_class:
4160         class_unregister(&ibmvscsis_class);
4161 out:
4162         return rc;
4165 static void __exit ibmvscsis_exit(void)
4167         pr_info("Unregister IBM virtual SCSI host driver\n");
4168         vio_unregister_driver(&ibmvscsis_driver);
4169         target_unregister_template(&ibmvscsis_ops);
4170         class_unregister(&ibmvscsis_class);
4173 MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4174 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4175 MODULE_LICENSE("GPL");
4176 MODULE_VERSION(IBMVSCSIS_VERSION);
4177 module_init(ibmvscsis_init);
4178 module_exit(ibmvscsis_exit);