Merge tag 'firewire-fixes-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / scsi / lpfc / lpfc_nportdisc.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34
35 #include "lpfc_hw4.h"
36 #include "lpfc_hw.h"
37 #include "lpfc_sli.h"
38 #include "lpfc_sli4.h"
39 #include "lpfc_nl.h"
40 #include "lpfc_disc.h"
41 #include "lpfc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc_nvme.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_debugfs.h"
48
49
50 /* Called to verify a rcv'ed ADISC was intended for us. */
51 static int
52 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53                  struct lpfc_name *nn, struct lpfc_name *pn)
54 {
55         /* First, we MUST have a RPI registered */
56         if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
57                 return 0;
58
59         /* Compare the ADISC rsp WWNN / WWPN matches our internal node
60          * table entry for that node.
61          */
62         if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
63                 return 0;
64
65         if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
66                 return 0;
67
68         /* we match, return success */
69         return 1;
70 }
71
72 int
73 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
74                  struct serv_parm *sp, uint32_t class, int flogi)
75 {
76         volatile struct serv_parm *hsp = &vport->fc_sparam;
77         uint16_t hsp_value, ssp_value = 0;
78
79         /*
80          * The receive data field size and buffer-to-buffer receive data field
81          * size entries are 16 bits but are represented as two 8-bit fields in
82          * the driver data structure to account for rsvd bits and other control
83          * bits.  Reconstruct and compare the fields as a 16-bit values before
84          * correcting the byte values.
85          */
86         if (sp->cls1.classValid) {
87                 if (!flogi) {
88                         hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
89                                      hsp->cls1.rcvDataSizeLsb);
90                         ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
91                                      sp->cls1.rcvDataSizeLsb);
92                         if (!ssp_value)
93                                 goto bad_service_param;
94                         if (ssp_value > hsp_value) {
95                                 sp->cls1.rcvDataSizeLsb =
96                                         hsp->cls1.rcvDataSizeLsb;
97                                 sp->cls1.rcvDataSizeMsb =
98                                         hsp->cls1.rcvDataSizeMsb;
99                         }
100                 }
101         } else if (class == CLASS1)
102                 goto bad_service_param;
103         if (sp->cls2.classValid) {
104                 if (!flogi) {
105                         hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
106                                      hsp->cls2.rcvDataSizeLsb);
107                         ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
108                                      sp->cls2.rcvDataSizeLsb);
109                         if (!ssp_value)
110                                 goto bad_service_param;
111                         if (ssp_value > hsp_value) {
112                                 sp->cls2.rcvDataSizeLsb =
113                                         hsp->cls2.rcvDataSizeLsb;
114                                 sp->cls2.rcvDataSizeMsb =
115                                         hsp->cls2.rcvDataSizeMsb;
116                         }
117                 }
118         } else if (class == CLASS2)
119                 goto bad_service_param;
120         if (sp->cls3.classValid) {
121                 if (!flogi) {
122                         hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
123                                      hsp->cls3.rcvDataSizeLsb);
124                         ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
125                                      sp->cls3.rcvDataSizeLsb);
126                         if (!ssp_value)
127                                 goto bad_service_param;
128                         if (ssp_value > hsp_value) {
129                                 sp->cls3.rcvDataSizeLsb =
130                                         hsp->cls3.rcvDataSizeLsb;
131                                 sp->cls3.rcvDataSizeMsb =
132                                         hsp->cls3.rcvDataSizeMsb;
133                         }
134                 }
135         } else if (class == CLASS3)
136                 goto bad_service_param;
137
138         /*
139          * Preserve the upper four bits of the MSB from the PLOGI response.
140          * These bits contain the Buffer-to-Buffer State Change Number
141          * from the target and need to be passed to the FW.
142          */
143         hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
144         ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
145         if (ssp_value > hsp_value) {
146                 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
147                 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
148                                        (hsp->cmn.bbRcvSizeMsb & 0x0F);
149         }
150
151         memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
152         memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
153         return 1;
154 bad_service_param:
155         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
156                          "0207 Device %x "
157                          "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
158                          "invalid service parameters.  Ignoring device.\n",
159                          ndlp->nlp_DID,
160                          sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
161                          sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
162                          sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
163                          sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
164         return 0;
165 }
166
167 static void *
168 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
169                         struct lpfc_iocbq *rspiocb)
170 {
171         struct lpfc_dmabuf *pcmd, *prsp;
172         uint32_t *lp;
173         void     *ptr = NULL;
174         u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
175
176         pcmd = cmdiocb->cmd_dmabuf;
177
178         /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
179          * freeing associated memory till after ABTS completes.
180          */
181         if (pcmd) {
182                 prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
183                                        list);
184                 if (prsp) {
185                         lp = (uint32_t *) prsp->virt;
186                         ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
187                 }
188         } else {
189                 /* Force ulp_status error since we are returning NULL ptr */
190                 if (!(ulp_status)) {
191                         if (phba->sli_rev == LPFC_SLI_REV4) {
192                                 bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
193                                        IOSTAT_LOCAL_REJECT);
194                                 rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
195                         } else {
196                                 rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
197                                 rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
198                         }
199                 }
200                 ptr = NULL;
201         }
202         return ptr;
203 }
204
205
206
207 /*
208  * Free resources / clean up outstanding I/Os
209  * associated with a LPFC_NODELIST entry. This
210  * routine effectively results in a "software abort".
211  */
212 void
213 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
214 {
215         LIST_HEAD(abort_list);
216         struct lpfc_sli_ring *pring;
217         struct lpfc_iocbq *iocb, *next_iocb;
218
219         pring = lpfc_phba_elsring(phba);
220
221         /* In case of error recovery path, we might have a NULL pring here */
222         if (unlikely(!pring))
223                 return;
224
225         /* Abort outstanding I/O on NPort <nlp_DID> */
226         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
227                          "2819 Abort outstanding I/O on NPort x%x "
228                          "Data: x%x x%x x%x\n",
229                          ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
230                          ndlp->nlp_rpi);
231         /* Clean up all fabric IOs first.*/
232         lpfc_fabric_abort_nport(ndlp);
233
234         /*
235          * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
236          * of all ELS IOs that need an ABTS.  The IOs need to stay on the
237          * txcmplq so that the abort operation completes them successfully.
238          */
239         spin_lock_irq(&phba->hbalock);
240         if (phba->sli_rev == LPFC_SLI_REV4)
241                 spin_lock(&pring->ring_lock);
242         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
243         /* Add to abort_list on on NDLP match. */
244                 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
245                         list_add_tail(&iocb->dlist, &abort_list);
246         }
247         if (phba->sli_rev == LPFC_SLI_REV4)
248                 spin_unlock(&pring->ring_lock);
249         spin_unlock_irq(&phba->hbalock);
250
251         /* Abort the targeted IOs and remove them from the abort list. */
252         list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
253                         spin_lock_irq(&phba->hbalock);
254                         list_del_init(&iocb->dlist);
255                         lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
256                         spin_unlock_irq(&phba->hbalock);
257         }
258         /* Make sure HBA is alive */
259         lpfc_issue_hb_tmo(phba);
260
261         INIT_LIST_HEAD(&abort_list);
262
263         /* Now process the txq */
264         spin_lock_irq(&phba->hbalock);
265         if (phba->sli_rev == LPFC_SLI_REV4)
266                 spin_lock(&pring->ring_lock);
267
268         list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
269                 /* Check to see if iocb matches the nport we are looking for */
270                 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
271                         list_del_init(&iocb->list);
272                         list_add_tail(&iocb->list, &abort_list);
273                 }
274         }
275
276         if (phba->sli_rev == LPFC_SLI_REV4)
277                 spin_unlock(&pring->ring_lock);
278         spin_unlock_irq(&phba->hbalock);
279
280         /* Cancel all the IOCBs from the completions list */
281         lpfc_sli_cancel_iocbs(phba, &abort_list,
282                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
283
284         lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
285 }
286
287 /* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
288  * @phba: pointer to lpfc hba data structure.
289  * @login_mbox: pointer to REG_RPI mailbox object
290  *
291  * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
292  */
293 static void
294 lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
295 {
296         struct lpfc_iocbq *save_iocb;
297         struct lpfc_nodelist *ndlp;
298         MAILBOX_t *mb = &login_mbox->u.mb;
299
300         int rc;
301
302         ndlp = login_mbox->ctx_ndlp;
303         save_iocb = login_mbox->context3;
304
305         if (mb->mbxStatus == MBX_SUCCESS) {
306                 /* Now that REG_RPI completed successfully,
307                  * we can now proceed with sending the PLOGI ACC.
308                  */
309                 rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
310                                       save_iocb, ndlp, NULL);
311                 if (rc) {
312                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
313                                         "4576 PLOGI ACC fails pt2pt discovery: "
314                                         "DID %x Data: %x\n", ndlp->nlp_DID, rc);
315                 }
316         }
317
318         /* Now process the REG_RPI cmpl */
319         lpfc_mbx_cmpl_reg_login(phba, login_mbox);
320         ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
321         kfree(save_iocb);
322 }
323
324 static int
325 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
326                struct lpfc_iocbq *cmdiocb)
327 {
328         struct lpfc_hba    *phba = vport->phba;
329         struct lpfc_dmabuf *pcmd;
330         uint64_t nlp_portwwn = 0;
331         uint32_t *lp;
332         union lpfc_wqe128 *wqe;
333         IOCB_t *icmd;
334         struct serv_parm *sp;
335         uint32_t ed_tov;
336         LPFC_MBOXQ_t *link_mbox;
337         LPFC_MBOXQ_t *login_mbox;
338         struct lpfc_iocbq *save_iocb;
339         struct ls_rjt stat;
340         uint32_t vid, flag;
341         int rc;
342         u32 remote_did;
343
344         memset(&stat, 0, sizeof (struct ls_rjt));
345         pcmd = cmdiocb->cmd_dmabuf;
346         lp = (uint32_t *) pcmd->virt;
347         sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
348         if (wwn_to_u64(sp->portName.u.wwn) == 0) {
349                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
350                                  "0140 PLOGI Reject: invalid pname\n");
351                 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
352                 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
353                 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
354                         NULL);
355                 return 0;
356         }
357         if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
358                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
359                                  "0141 PLOGI Reject: invalid nname\n");
360                 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
361                 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
362                 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
363                         NULL);
364                 return 0;
365         }
366
367         nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
368         if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
369                 /* Reject this request because invalid parameters */
370                 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
371                 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
372                 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
373                         NULL);
374                 return 0;
375         }
376
377         if (phba->sli_rev == LPFC_SLI_REV4)
378                 wqe = &cmdiocb->wqe;
379         else
380                 icmd = &cmdiocb->iocb;
381
382         /* PLOGI chkparm OK */
383         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
384                          "0114 PLOGI chkparm OK Data: x%x x%x x%x "
385                          "x%x x%x x%lx\n",
386                          ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
387                          ndlp->nlp_rpi, vport->port_state,
388                          vport->fc_flag);
389
390         if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
391                 ndlp->nlp_fcp_info |= CLASS2;
392         else
393                 ndlp->nlp_fcp_info |= CLASS3;
394
395         ndlp->nlp_class_sup = 0;
396         if (sp->cls1.classValid)
397                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
398         if (sp->cls2.classValid)
399                 ndlp->nlp_class_sup |= FC_COS_CLASS2;
400         if (sp->cls3.classValid)
401                 ndlp->nlp_class_sup |= FC_COS_CLASS3;
402         if (sp->cls4.classValid)
403                 ndlp->nlp_class_sup |= FC_COS_CLASS4;
404         ndlp->nlp_maxframe =
405                 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
406         /* if already logged in, do implicit logout */
407         switch (ndlp->nlp_state) {
408         case  NLP_STE_NPR_NODE:
409                 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
410                         break;
411                 fallthrough;
412         case  NLP_STE_REG_LOGIN_ISSUE:
413         case  NLP_STE_PRLI_ISSUE:
414         case  NLP_STE_UNMAPPED_NODE:
415         case  NLP_STE_MAPPED_NODE:
416                 /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
417                  * For target mode, execute implicit logo.
418                  * Fabric nodes go into NPR.
419                  */
420                 if (!(ndlp->nlp_type & NLP_FABRIC) &&
421                     !(phba->nvmet_support)) {
422                         /* Clear ndlp info, since follow up PRLI may have
423                          * updated ndlp information
424                          */
425                         ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
426                         ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
427                         ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
428                         ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
429                         ndlp->nlp_flag &= ~NLP_FIRSTBURST;
430
431                         lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
432                                          ndlp, NULL);
433                         return 1;
434                 }
435                 if (nlp_portwwn != 0 &&
436                     nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
437                         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
438                                          "0143 PLOGI recv'd from DID: x%x "
439                                          "WWPN changed: old %llx new %llx\n",
440                                          ndlp->nlp_DID,
441                                          (unsigned long long)nlp_portwwn,
442                                          (unsigned long long)
443                                          wwn_to_u64(sp->portName.u.wwn));
444
445                 /* Notify transport of connectivity loss to trigger cleanup. */
446                 if (phba->nvmet_support &&
447                     ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
448                         lpfc_nvmet_invalidate_host(phba, ndlp);
449
450                 ndlp->nlp_prev_state = ndlp->nlp_state;
451                 /* rport needs to be unregistered first */
452                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
453                 break;
454         }
455
456         ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
457         ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
458         ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
459         ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
460         ndlp->nlp_flag &= ~NLP_FIRSTBURST;
461
462         login_mbox = NULL;
463         link_mbox = NULL;
464         save_iocb = NULL;
465
466         /* Check for Nport to NPort pt2pt protocol */
467         if (test_bit(FC_PT2PT, &vport->fc_flag) &&
468             !test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
469                 /* rcv'ed PLOGI decides what our NPortId will be */
470                 if (phba->sli_rev == LPFC_SLI_REV4) {
471                         vport->fc_myDID = bf_get(els_rsp64_sid,
472                                                  &cmdiocb->wqe.xmit_els_rsp);
473                 } else {
474                         vport->fc_myDID = icmd->un.rcvels.parmRo;
475                 }
476
477                 /* If there is an outstanding FLOGI, abort it now.
478                  * The remote NPort is not going to ACC our FLOGI
479                  * if its already issuing a PLOGI for pt2pt mode.
480                  * This indicates our FLOGI was dropped; however, we
481                  * must have ACCed the remote NPorts FLOGI to us
482                  * to make it here.
483                  */
484                 if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
485                         lpfc_els_abort_flogi(phba);
486
487                 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
488                 if (sp->cmn.edtovResolution) {
489                         /* E_D_TOV ticks are in nanoseconds */
490                         ed_tov = (phba->fc_edtov + 999999) / 1000000;
491                 }
492
493                 /*
494                  * For pt-to-pt, use the larger EDTOV
495                  * RATOV = 2 * EDTOV
496                  */
497                 if (ed_tov > phba->fc_edtov)
498                         phba->fc_edtov = ed_tov;
499                 phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
500
501                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
502
503                 /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
504                  * to account for updated TOV's / parameters
505                  */
506                 if (phba->sli_rev == LPFC_SLI_REV4)
507                         lpfc_issue_reg_vfi(vport);
508                 else {
509                         link_mbox = mempool_alloc(phba->mbox_mem_pool,
510                                                   GFP_KERNEL);
511                         if (!link_mbox)
512                                 goto out;
513                         lpfc_config_link(phba, link_mbox);
514                         link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
515                         link_mbox->vport = vport;
516
517                         /* The default completion handling for CONFIG_LINK
518                          * does not require the ndlp so no reference is needed.
519                          */
520                         link_mbox->ctx_ndlp = ndlp;
521
522                         rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
523                         if (rc == MBX_NOT_FINISHED) {
524                                 mempool_free(link_mbox, phba->mbox_mem_pool);
525                                 goto out;
526                         }
527                 }
528
529                 lpfc_can_disctmo(vport);
530         }
531
532         ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
533         if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
534             sp->cmn.valid_vendor_ver_level) {
535                 vid = be32_to_cpu(sp->un.vv.vid);
536                 flag = be32_to_cpu(sp->un.vv.flags);
537                 if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
538                         ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
539         }
540
541         login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
542         if (!login_mbox)
543                 goto out;
544
545         save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
546         if (!save_iocb)
547                 goto out;
548
549         /* Save info from cmd IOCB to be used in rsp after all mbox completes */
550         memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
551                sizeof(struct lpfc_iocbq));
552
553         /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
554         if (phba->sli_rev == LPFC_SLI_REV4)
555                 lpfc_unreg_rpi(vport, ndlp);
556
557         /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
558          * always be deferring the ACC.
559          */
560         if (phba->sli_rev == LPFC_SLI_REV4)
561                 remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
562         else
563                 remote_did = icmd->un.rcvels.remoteID;
564         rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
565                             (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
566         if (rc)
567                 goto out;
568
569         login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
570         login_mbox->vport = vport;
571
572         /*
573          * If there is an outstanding PLOGI issued, abort it before
574          * sending ACC rsp for received PLOGI. If pending plogi
575          * is not canceled here, the plogi will be rejected by
576          * remote port and will be retried. On a configuration with
577          * single discovery thread, this will cause a huge delay in
578          * discovery. Also this will cause multiple state machines
579          * running in parallel for this node.
580          * This only applies to a fabric environment.
581          */
582         if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
583             test_bit(FC_FABRIC, &vport->fc_flag)) {
584                 /* software abort outstanding PLOGI */
585                 lpfc_els_abort(phba, ndlp);
586         }
587
588         if ((vport->port_type == LPFC_NPIV_PORT &&
589              vport->cfg_restrict_login)) {
590
591                 /* no deferred ACC */
592                 kfree(save_iocb);
593
594                 /* This is an NPIV SLI4 instance that does not need to register
595                  * a default RPI.
596                  */
597                 if (phba->sli_rev == LPFC_SLI_REV4) {
598                         lpfc_mbox_rsrc_cleanup(phba, login_mbox,
599                                                MBOX_THD_UNLOCKED);
600                         login_mbox = NULL;
601                 } else {
602                         /* In order to preserve RPIs, we want to cleanup
603                          * the default RPI the firmware created to rcv
604                          * this ELS request. The only way to do this is
605                          * to register, then unregister the RPI.
606                          */
607                         spin_lock_irq(&ndlp->lock);
608                         ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
609                                            NLP_RCV_PLOGI);
610                         spin_unlock_irq(&ndlp->lock);
611                 }
612
613                 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
614                 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
615                 rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
616                                          ndlp, login_mbox);
617                 if (rc && login_mbox)
618                         lpfc_mbox_rsrc_cleanup(phba, login_mbox,
619                                                MBOX_THD_UNLOCKED);
620                 return 1;
621         }
622
623         /* So the order here should be:
624          * SLI3 pt2pt
625          *   Issue CONFIG_LINK mbox
626          *   CONFIG_LINK cmpl
627          * SLI4 pt2pt
628          *   Issue REG_VFI mbox
629          *   REG_VFI cmpl
630          * SLI4
631          *   Issue UNREG RPI mbx
632          *   UNREG RPI cmpl
633          * Issue REG_RPI mbox
634          * REG RPI cmpl
635          * Issue PLOGI ACC
636          * PLOGI ACC cmpl
637          */
638         login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
639         login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
640         if (!login_mbox->ctx_ndlp)
641                 goto out;
642
643         login_mbox->context3 = save_iocb; /* For PLOGI ACC */
644
645         spin_lock_irq(&ndlp->lock);
646         ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
647         spin_unlock_irq(&ndlp->lock);
648
649         /* Start the ball rolling by issuing REG_LOGIN here */
650         rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
651         if (rc == MBX_NOT_FINISHED) {
652                 lpfc_nlp_put(ndlp);
653                 goto out;
654         }
655         lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
656
657         return 1;
658 out:
659         kfree(save_iocb);
660         if (login_mbox)
661                 mempool_free(login_mbox, phba->mbox_mem_pool);
662
663         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
664         stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
665         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
666         return 0;
667 }
668
669 /**
670  * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
671  * @phba: pointer to lpfc hba data structure.
672  * @mboxq: pointer to mailbox object
673  *
674  * This routine is invoked to issue a completion to a rcv'ed
675  * ADISC or PDISC after the paused RPI has been resumed.
676  **/
677 static void
678 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
679 {
680         struct lpfc_vport *vport;
681         struct lpfc_iocbq *elsiocb;
682         struct lpfc_nodelist *ndlp;
683         uint32_t cmd;
684
685         elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
686         ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
687         vport = mboxq->vport;
688         cmd = elsiocb->drvrTimeout;
689
690         if (cmd == ELS_CMD_ADISC) {
691                 lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
692         } else {
693                 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
694                         ndlp, NULL);
695         }
696
697         /* This nlp_put pairs with lpfc_sli4_resume_rpi */
698         lpfc_nlp_put(ndlp);
699
700         kfree(elsiocb);
701         mempool_free(mboxq, phba->mbox_mem_pool);
702 }
703
704 static int
705 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
706                 struct lpfc_iocbq *cmdiocb)
707 {
708         struct lpfc_hba *phba = vport->phba;
709         struct lpfc_iocbq  *elsiocb;
710         struct lpfc_dmabuf *pcmd;
711         struct serv_parm   *sp;
712         struct lpfc_name   *pnn, *ppn;
713         struct ls_rjt stat;
714         ADISC *ap;
715         uint32_t *lp;
716         uint32_t cmd;
717
718         pcmd = cmdiocb->cmd_dmabuf;
719         lp = (uint32_t *) pcmd->virt;
720
721         cmd = *lp++;
722         if (cmd == ELS_CMD_ADISC) {
723                 ap = (ADISC *) lp;
724                 pnn = (struct lpfc_name *) & ap->nodeName;
725                 ppn = (struct lpfc_name *) & ap->portName;
726         } else {
727                 sp = (struct serv_parm *) lp;
728                 pnn = (struct lpfc_name *) & sp->nodeName;
729                 ppn = (struct lpfc_name *) & sp->portName;
730         }
731
732         if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
733             lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
734
735                 /*
736                  * As soon as  we send ACC, the remote NPort can
737                  * start sending us data. Thus, for SLI4 we must
738                  * resume the RPI before the ACC goes out.
739                  */
740                 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
741                         elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
742                                 GFP_KERNEL);
743                         if (elsiocb) {
744                                 /* Save info from cmd IOCB used in rsp */
745                                 memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
746                                         sizeof(struct lpfc_iocbq));
747
748                                 /* Save the ELS cmd */
749                                 elsiocb->drvrTimeout = cmd;
750
751                                 if (lpfc_sli4_resume_rpi(ndlp,
752                                                 lpfc_mbx_cmpl_resume_rpi,
753                                                 elsiocb))
754                                         kfree(elsiocb);
755                                 goto out;
756                         }
757                 }
758
759                 if (cmd == ELS_CMD_ADISC) {
760                         lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
761                 } else {
762                         lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
763                                 ndlp, NULL);
764                 }
765 out:
766                 /* If we are authenticated, move to the proper state.
767                  * It is possible an ADISC arrived and the remote nport
768                  * is already in MAPPED or UNMAPPED state.  Catch this
769                  * condition and don't set the nlp_state again because
770                  * it causes an unnecessary transport unregister/register.
771                  *
772                  * Nodes marked for ADISC will move MAPPED or UNMAPPED state
773                  * after issuing ADISC
774                  */
775                 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
776                         if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
777                             !(ndlp->nlp_flag & NLP_NPR_ADISC))
778                                 lpfc_nlp_set_state(vport, ndlp,
779                                                    NLP_STE_MAPPED_NODE);
780                 }
781
782                 return 1;
783         }
784         /* Reject this request because invalid parameters */
785         stat.un.b.lsRjtRsvd0 = 0;
786         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
787         stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
788         stat.un.b.vendorUnique = 0;
789         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
790
791         /* 1 sec timeout */
792         mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
793
794         spin_lock_irq(&ndlp->lock);
795         ndlp->nlp_flag |= NLP_DELAY_TMO;
796         spin_unlock_irq(&ndlp->lock);
797         ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
798         ndlp->nlp_prev_state = ndlp->nlp_state;
799         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
800         return 0;
801 }
802
803 static int
804 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
805               struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
806 {
807         struct lpfc_hba    *phba = vport->phba;
808         struct lpfc_vport **vports;
809         int i, active_vlink_present = 0 ;
810
811         /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
812         /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
813          * PLOGIs during LOGO storms from a device.
814          */
815         spin_lock_irq(&ndlp->lock);
816         ndlp->nlp_flag |= NLP_LOGO_ACC;
817         spin_unlock_irq(&ndlp->lock);
818         if (els_cmd == ELS_CMD_PRLO)
819                 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
820         else
821                 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
822
823         /* This clause allows the initiator to ACC the LOGO back to the
824          * Fabric Domain Controller.  It does deliberately skip all other
825          * steps because some fabrics send RDP requests after logging out
826          * from the initiator.
827          */
828         if (ndlp->nlp_type & NLP_FABRIC &&
829             ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
830                 return 0;
831
832         /* Notify transport of connectivity loss to trigger cleanup. */
833         if (phba->nvmet_support &&
834             ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
835                 lpfc_nvmet_invalidate_host(phba, ndlp);
836
837         if (ndlp->nlp_DID == Fabric_DID) {
838                 if (vport->port_state <= LPFC_FDISC ||
839                     test_bit(FC_PT2PT, &vport->fc_flag))
840                         goto out;
841                 lpfc_linkdown_port(vport);
842                 set_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
843                 vports = lpfc_create_vport_work_array(phba);
844                 if (vports) {
845                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
846                                         i++) {
847                                 if (!test_bit(FC_VPORT_LOGO_RCVD,
848                                               &vports[i]->fc_flag) &&
849                                     vports[i]->port_state > LPFC_FDISC) {
850                                         active_vlink_present = 1;
851                                         break;
852                                 }
853                         }
854                         lpfc_destroy_vport_work_array(phba, vports);
855                 }
856
857                 /*
858                  * Don't re-instantiate if vport is marked for deletion.
859                  * If we are here first then vport_delete is going to wait
860                  * for discovery to complete.
861                  */
862                 if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
863                     active_vlink_present) {
864                         /*
865                          * If there are other active VLinks present,
866                          * re-instantiate the Vlink using FDISC.
867                          */
868                         mod_timer(&ndlp->nlp_delayfunc,
869                                   jiffies + msecs_to_jiffies(1000));
870                         spin_lock_irq(&ndlp->lock);
871                         ndlp->nlp_flag |= NLP_DELAY_TMO;
872                         spin_unlock_irq(&ndlp->lock);
873                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
874                         vport->port_state = LPFC_FDISC;
875                 } else {
876                         clear_bit(FC_LOGO_RCVD_DID_CHNG, &phba->pport->fc_flag);
877                         lpfc_retry_pport_discovery(phba);
878                 }
879         } else {
880                 lpfc_printf_vlog(vport, KERN_INFO,
881                                  LOG_NODE | LOG_ELS | LOG_DISCOVERY,
882                                  "3203 LOGO recover nport x%06x state x%x "
883                                  "ntype x%x fc_flag x%lx\n",
884                                  ndlp->nlp_DID, ndlp->nlp_state,
885                                  ndlp->nlp_type, vport->fc_flag);
886
887                 /* Special cases for rports that recover post LOGO. */
888                 if ((!(ndlp->nlp_type == NLP_FABRIC) &&
889                      (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
890                       test_bit(FC_PT2PT, &vport->fc_flag))) ||
891                     (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
892                      ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
893                         mod_timer(&ndlp->nlp_delayfunc,
894                                   jiffies + msecs_to_jiffies(1000 * 1));
895                         spin_lock_irq(&ndlp->lock);
896                         ndlp->nlp_flag |= NLP_DELAY_TMO;
897                         spin_unlock_irq(&ndlp->lock);
898                         ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
899                         lpfc_printf_vlog(vport, KERN_INFO,
900                                          LOG_NODE | LOG_ELS | LOG_DISCOVERY,
901                                          "3204 Start nlpdelay on DID x%06x "
902                                          "nflag x%x lastels x%x ref cnt %u",
903                                          ndlp->nlp_DID, ndlp->nlp_flag,
904                                          ndlp->nlp_last_elscmd,
905                                          kref_read(&ndlp->kref));
906                 }
907         }
908 out:
909         /* Unregister from backend, could have been skipped due to ADISC */
910         lpfc_nlp_unreg_node(vport, ndlp);
911
912         ndlp->nlp_prev_state = ndlp->nlp_state;
913         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
914
915         spin_lock_irq(&ndlp->lock);
916         ndlp->nlp_flag &= ~NLP_NPR_ADISC;
917         spin_unlock_irq(&ndlp->lock);
918         /* The driver has to wait until the ACC completes before it continues
919          * processing the LOGO.  The action will resume in
920          * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
921          * unreg_login, the driver waits so the ACC does not get aborted.
922          */
923         return 0;
924 }
925
926 static uint32_t
927 lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
928                             struct lpfc_nodelist *ndlp,
929                             struct lpfc_iocbq *cmdiocb)
930 {
931         struct ls_rjt stat;
932         uint32_t *payload;
933         uint32_t cmd;
934         PRLI *npr;
935
936         payload = cmdiocb->cmd_dmabuf->virt;
937         cmd = *payload;
938         npr = (PRLI *)((uint8_t *)payload + sizeof(uint32_t));
939
940         if (vport->phba->nvmet_support) {
941                 /* Must be a NVME PRLI */
942                 if (cmd == ELS_CMD_PRLI)
943                         goto out;
944         } else {
945                 /* Initiator mode. */
946                 if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
947                         goto out;
948
949                 /* NPIV ports will RJT initiator only functions */
950                 if (vport->port_type == LPFC_NPIV_PORT &&
951                     npr->initiatorFunc && !npr->targetFunc)
952                         goto out;
953         }
954         return 1;
955 out:
956         lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
957                          "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
958                          "state x%x flags x%x port_type: x%x "
959                          "npr->initfcn: x%x npr->tgtfcn: x%x\n",
960                          cmd, ndlp->nlp_rpi, ndlp->nlp_state,
961                          ndlp->nlp_flag, vport->port_type,
962                          npr->initiatorFunc, npr->targetFunc);
963         memset(&stat, 0, sizeof(struct ls_rjt));
964         stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
965         stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
966         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
967                             ndlp, NULL);
968         return 0;
969 }
970
971 static void
972 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
973               struct lpfc_iocbq *cmdiocb)
974 {
975         struct lpfc_hba  *phba = vport->phba;
976         struct lpfc_dmabuf *pcmd;
977         uint32_t *lp;
978         PRLI *npr;
979         struct fc_rport *rport = ndlp->rport;
980         u32 roles;
981
982         pcmd = cmdiocb->cmd_dmabuf;
983         lp = (uint32_t *)pcmd->virt;
984         npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
985
986         if ((npr->prliType == PRLI_FCP_TYPE) ||
987             (npr->prliType == PRLI_NVME_TYPE)) {
988                 if (npr->initiatorFunc) {
989                         if (npr->prliType == PRLI_FCP_TYPE)
990                                 ndlp->nlp_type |= NLP_FCP_INITIATOR;
991                         if (npr->prliType == PRLI_NVME_TYPE)
992                                 ndlp->nlp_type |= NLP_NVME_INITIATOR;
993                 }
994                 if (npr->targetFunc) {
995                         if (npr->prliType == PRLI_FCP_TYPE)
996                                 ndlp->nlp_type |= NLP_FCP_TARGET;
997                         if (npr->prliType == PRLI_NVME_TYPE)
998                                 ndlp->nlp_type |= NLP_NVME_TARGET;
999                         if (npr->writeXferRdyDis)
1000                                 ndlp->nlp_flag |= NLP_FIRSTBURST;
1001                 }
1002                 if (npr->Retry && ndlp->nlp_type &
1003                                         (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
1004                         ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1005
1006                 if (npr->Retry && phba->nsler &&
1007                     ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
1008                         ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
1009
1010
1011                 /* If this driver is in nvme target mode, set the ndlp's fc4
1012                  * type to NVME provided the PRLI response claims NVME FC4
1013                  * type.  Target mode does not issue gft_id so doesn't get
1014                  * the fc4 type set until now.
1015                  */
1016                 if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
1017                         ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1018                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1019                 }
1020
1021                 /* Fabric Controllers send FCP PRLI as an initiator but should
1022                  * not get recognized as FCP type and registered with transport.
1023                  */
1024                 if (npr->prliType == PRLI_FCP_TYPE &&
1025                     !(ndlp->nlp_type & NLP_FABRIC))
1026                         ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1027         }
1028         if (rport) {
1029                 /* We need to update the rport role values */
1030                 roles = FC_RPORT_ROLE_UNKNOWN;
1031                 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1032                         roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1033                 if (ndlp->nlp_type & NLP_FCP_TARGET)
1034                         roles |= FC_RPORT_ROLE_FCP_TARGET;
1035
1036                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1037                         "rport rolechg:   role:x%x did:x%x flg:x%x",
1038                         roles, ndlp->nlp_DID, ndlp->nlp_flag);
1039
1040                 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1041                         fc_remote_port_rolechg(rport, roles);
1042         }
1043 }
1044
1045 static uint32_t
1046 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1047 {
1048         if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
1049                 spin_lock_irq(&ndlp->lock);
1050                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1051                 spin_unlock_irq(&ndlp->lock);
1052                 return 0;
1053         }
1054
1055         if (!test_bit(FC_PT2PT, &vport->fc_flag)) {
1056                 /* Check config parameter use-adisc or FCP-2 */
1057                 if (vport->cfg_use_adisc &&
1058                     (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
1059                     ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
1060                      (ndlp->nlp_type & NLP_FCP_TARGET)))) {
1061                         spin_lock_irq(&ndlp->lock);
1062                         ndlp->nlp_flag |= NLP_NPR_ADISC;
1063                         spin_unlock_irq(&ndlp->lock);
1064                         return 1;
1065                 }
1066         }
1067
1068         spin_lock_irq(&ndlp->lock);
1069         ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1070         spin_unlock_irq(&ndlp->lock);
1071         lpfc_unreg_rpi(vport, ndlp);
1072         return 0;
1073 }
1074
1075 /**
1076  * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
1077  * @phba : Pointer to lpfc_hba structure.
1078  * @vport: Pointer to lpfc_vport structure.
1079  * @ndlp: Pointer to lpfc_nodelist structure.
1080  * @rpi  : rpi to be release.
1081  *
1082  * This function will send a unreg_login mailbox command to the firmware
1083  * to release a rpi.
1084  **/
1085 static void
1086 lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
1087                  struct lpfc_nodelist *ndlp, uint16_t rpi)
1088 {
1089         LPFC_MBOXQ_t *pmb;
1090         int rc;
1091
1092         /* If there is already an UNREG in progress for this ndlp,
1093          * no need to queue up another one.
1094          */
1095         if (ndlp->nlp_flag & NLP_UNREG_INP) {
1096                 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1097                                  "1435 release_rpi SKIP UNREG x%x on "
1098                                  "NPort x%x deferred x%x  flg x%x "
1099                                  "Data: x%px\n",
1100                                  ndlp->nlp_rpi, ndlp->nlp_DID,
1101                                  ndlp->nlp_defer_did,
1102                                  ndlp->nlp_flag, ndlp);
1103                 return;
1104         }
1105
1106         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1107                         GFP_KERNEL);
1108         if (!pmb)
1109                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1110                                  "2796 mailbox memory allocation failed \n");
1111         else {
1112                 lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
1113                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1114                 pmb->vport = vport;
1115                 pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
1116                 if (!pmb->ctx_ndlp) {
1117                         mempool_free(pmb, phba->mbox_mem_pool);
1118                         return;
1119                 }
1120
1121                 if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
1122                     (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
1123                         ndlp->nlp_flag |= NLP_UNREG_INP;
1124
1125                 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1126                                  "1437 release_rpi UNREG x%x "
1127                                  "on NPort x%x flg x%x\n",
1128                                  ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
1129
1130                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1131                 if (rc == MBX_NOT_FINISHED) {
1132                         lpfc_nlp_put(ndlp);
1133                         mempool_free(pmb, phba->mbox_mem_pool);
1134                 }
1135         }
1136 }
1137
1138 static uint32_t
1139 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1140                   void *arg, uint32_t evt)
1141 {
1142         struct lpfc_hba *phba;
1143         LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1144         uint16_t rpi;
1145
1146         phba = vport->phba;
1147         /* Release the RPI if reglogin completing */
1148         if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
1149             evt == NLP_EVT_CMPL_REG_LOGIN && !pmb->u.mb.mbxStatus) {
1150                 rpi = pmb->u.mb.un.varWords[0];
1151                 lpfc_release_rpi(phba, vport, ndlp, rpi);
1152         }
1153         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1154                          "0271 Illegal State Transition: node x%x "
1155                          "event x%x, state x%x Data: x%x x%x\n",
1156                          ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
1157                          ndlp->nlp_flag);
1158         return ndlp->nlp_state;
1159 }
1160
1161 static uint32_t
1162 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1163                   void *arg, uint32_t evt)
1164 {
1165         /* This transition is only legal if we previously
1166          * rcv'ed a PLOGI. Since we don't want 2 discovery threads
1167          * working on the same NPortID, do nothing for this thread
1168          * to stop it.
1169          */
1170         if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
1171                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1172                                  "0272 Illegal State Transition: node x%x "
1173                                  "event x%x, state x%x Data: x%x x%x\n",
1174                                   ndlp->nlp_DID, evt, ndlp->nlp_state,
1175                                   ndlp->nlp_rpi, ndlp->nlp_flag);
1176         }
1177         return ndlp->nlp_state;
1178 }
1179
1180 /* Start of Discovery State Machine routines */
1181
1182 static uint32_t
1183 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1184                            void *arg, uint32_t evt)
1185 {
1186         struct lpfc_iocbq *cmdiocb;
1187
1188         cmdiocb = (struct lpfc_iocbq *) arg;
1189
1190         if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1191                 return ndlp->nlp_state;
1192         }
1193         return NLP_STE_FREED_NODE;
1194 }
1195
1196 static uint32_t
1197 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1198                          void *arg, uint32_t evt)
1199 {
1200         lpfc_issue_els_logo(vport, ndlp, 0);
1201         return ndlp->nlp_state;
1202 }
1203
1204 static uint32_t
1205 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1206                           void *arg, uint32_t evt)
1207 {
1208         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1209
1210         spin_lock_irq(&ndlp->lock);
1211         ndlp->nlp_flag |= NLP_LOGO_ACC;
1212         spin_unlock_irq(&ndlp->lock);
1213         lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1214
1215         return ndlp->nlp_state;
1216 }
1217
1218 static uint32_t
1219 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1220                            void *arg, uint32_t evt)
1221 {
1222         return NLP_STE_FREED_NODE;
1223 }
1224
1225 static uint32_t
1226 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1227                            void *arg, uint32_t evt)
1228 {
1229         return NLP_STE_FREED_NODE;
1230 }
1231
1232 static uint32_t
1233 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
1234                         struct lpfc_nodelist *ndlp,
1235                            void *arg, uint32_t evt)
1236 {
1237         return ndlp->nlp_state;
1238 }
1239
1240 static uint32_t
1241 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1242                            void *arg, uint32_t evt)
1243 {
1244         struct lpfc_hba   *phba = vport->phba;
1245         struct lpfc_iocbq *cmdiocb = arg;
1246         struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
1247         uint32_t *lp = (uint32_t *) pcmd->virt;
1248         struct serv_parm *sp = (struct serv_parm *) (lp + 1);
1249         struct ls_rjt stat;
1250         int port_cmp;
1251
1252         memset(&stat, 0, sizeof (struct ls_rjt));
1253
1254         /* For a PLOGI, we only accept if our portname is less
1255          * than the remote portname.
1256          */
1257         phba->fc_stat.elsLogiCol++;
1258         port_cmp = memcmp(&vport->fc_portname, &sp->portName,
1259                           sizeof(struct lpfc_name));
1260
1261         if (port_cmp >= 0) {
1262                 /* Reject this request because the remote node will accept
1263                    ours */
1264                 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1265                 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
1266                 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
1267                         NULL);
1268         } else {
1269                 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
1270                     (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
1271                     (vport->num_disc_nodes)) {
1272                         spin_lock_irq(&ndlp->lock);
1273                         ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1274                         spin_unlock_irq(&ndlp->lock);
1275                         /* Check if there are more PLOGIs to be sent */
1276                         lpfc_more_plogi(vport);
1277                         if (vport->num_disc_nodes == 0) {
1278                                 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
1279                                 lpfc_can_disctmo(vport);
1280                                 lpfc_end_rscn(vport);
1281                         }
1282                 }
1283         } /* If our portname was less */
1284
1285         return ndlp->nlp_state;
1286 }
1287
1288 static uint32_t
1289 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1290                           void *arg, uint32_t evt)
1291 {
1292         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1293         struct ls_rjt     stat;
1294
1295         memset(&stat, 0, sizeof (struct ls_rjt));
1296         stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1297         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1298         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1299         return ndlp->nlp_state;
1300 }
1301
1302 static uint32_t
1303 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1304                           void *arg, uint32_t evt)
1305 {
1306         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1307
1308         /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
1309         if (vport->phba->sli_rev == LPFC_SLI_REV3)
1310                 ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
1311                                 /* software abort outstanding PLOGI */
1312         lpfc_els_abort(vport->phba, ndlp);
1313
1314         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1315         return ndlp->nlp_state;
1316 }
1317
1318 static uint32_t
1319 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1320                          void *arg, uint32_t evt)
1321 {
1322         struct lpfc_hba   *phba = vport->phba;
1323         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1324
1325         /* software abort outstanding PLOGI */
1326         lpfc_els_abort(phba, ndlp);
1327
1328         if (evt == NLP_EVT_RCV_LOGO) {
1329                 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1330         } else {
1331                 lpfc_issue_els_logo(vport, ndlp, 0);
1332         }
1333
1334         /* Put ndlp in npr state set plogi timer for 1 sec */
1335         mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1336         spin_lock_irq(&ndlp->lock);
1337         ndlp->nlp_flag |= NLP_DELAY_TMO;
1338         spin_unlock_irq(&ndlp->lock);
1339         ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1340         ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1341         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1342
1343         return ndlp->nlp_state;
1344 }
1345
1346 static uint32_t
1347 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1348                             struct lpfc_nodelist *ndlp,
1349                             void *arg,
1350                             uint32_t evt)
1351 {
1352         struct lpfc_hba    *phba = vport->phba;
1353         struct lpfc_iocbq  *cmdiocb, *rspiocb;
1354         struct lpfc_dmabuf *pcmd, *prsp;
1355         uint32_t *lp;
1356         uint32_t vid, flag;
1357         struct serv_parm *sp;
1358         uint32_t ed_tov;
1359         LPFC_MBOXQ_t *mbox;
1360         int rc;
1361         u32 ulp_status;
1362         u32 did;
1363
1364         cmdiocb = (struct lpfc_iocbq *) arg;
1365         rspiocb = cmdiocb->rsp_iocb;
1366
1367         ulp_status = get_job_ulpstatus(phba, rspiocb);
1368
1369         if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1370                 /* Recovery from PLOGI collision logic */
1371                 return ndlp->nlp_state;
1372         }
1373
1374         if (ulp_status)
1375                 goto out;
1376
1377         pcmd = cmdiocb->cmd_dmabuf;
1378
1379         prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1380         if (!prsp)
1381                 goto out;
1382
1383         lp = (uint32_t *) prsp->virt;
1384         sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1385
1386         /* Some switches have FDMI servers returning 0 for WWN */
1387         if ((ndlp->nlp_DID != FDMI_DID) &&
1388                 (wwn_to_u64(sp->portName.u.wwn) == 0 ||
1389                 wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1390                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1391                                  "0142 PLOGI RSP: Invalid WWN.\n");
1392                 goto out;
1393         }
1394         if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1395                 goto out;
1396         /* PLOGI chkparm OK */
1397         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1398                          "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1399                          ndlp->nlp_DID, ndlp->nlp_state,
1400                          ndlp->nlp_flag, ndlp->nlp_rpi);
1401         if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1402                 ndlp->nlp_fcp_info |= CLASS2;
1403         else
1404                 ndlp->nlp_fcp_info |= CLASS3;
1405
1406         ndlp->nlp_class_sup = 0;
1407         if (sp->cls1.classValid)
1408                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
1409         if (sp->cls2.classValid)
1410                 ndlp->nlp_class_sup |= FC_COS_CLASS2;
1411         if (sp->cls3.classValid)
1412                 ndlp->nlp_class_sup |= FC_COS_CLASS3;
1413         if (sp->cls4.classValid)
1414                 ndlp->nlp_class_sup |= FC_COS_CLASS4;
1415         ndlp->nlp_maxframe =
1416                 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1417
1418         if (test_bit(FC_PT2PT, &vport->fc_flag) &&
1419             test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
1420                 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
1421                 if (sp->cmn.edtovResolution) {
1422                         /* E_D_TOV ticks are in nanoseconds */
1423                         ed_tov = (phba->fc_edtov + 999999) / 1000000;
1424                 }
1425
1426                 ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
1427                 if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
1428                     sp->cmn.valid_vendor_ver_level) {
1429                         vid = be32_to_cpu(sp->un.vv.vid);
1430                         flag = be32_to_cpu(sp->un.vv.flags);
1431                         if ((vid == LPFC_VV_EMLX_ID) &&
1432                             (flag & LPFC_VV_SUPPRESS_RSP))
1433                                 ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
1434                 }
1435
1436                 /*
1437                  * Use the larger EDTOV
1438                  * RATOV = 2 * EDTOV for pt-to-pt
1439                  */
1440                 if (ed_tov > phba->fc_edtov)
1441                         phba->fc_edtov = ed_tov;
1442                 phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
1443
1444                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
1445
1446                 /* Issue config_link / reg_vfi to account for updated TOV's */
1447                 if (phba->sli_rev == LPFC_SLI_REV4) {
1448                         lpfc_issue_reg_vfi(vport);
1449                 } else {
1450                         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1451                         if (!mbox) {
1452                                 lpfc_printf_vlog(vport, KERN_ERR,
1453                                                  LOG_TRACE_EVENT,
1454                                                  "0133 PLOGI: no memory "
1455                                                  "for config_link "
1456                                                  "Data: x%x x%x x%x x%x\n",
1457                                                  ndlp->nlp_DID, ndlp->nlp_state,
1458                                                  ndlp->nlp_flag, ndlp->nlp_rpi);
1459                                 goto out;
1460                         }
1461
1462                         lpfc_config_link(phba, mbox);
1463
1464                         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1465                         mbox->vport = vport;
1466                         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1467                         if (rc == MBX_NOT_FINISHED) {
1468                                 mempool_free(mbox, phba->mbox_mem_pool);
1469                                 goto out;
1470                         }
1471                 }
1472         }
1473
1474         lpfc_unreg_rpi(vport, ndlp);
1475
1476         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1477         if (!mbox) {
1478                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1479                                  "0018 PLOGI: no memory for reg_login "
1480                                  "Data: x%x x%x x%x x%x\n",
1481                                  ndlp->nlp_DID, ndlp->nlp_state,
1482                                  ndlp->nlp_flag, ndlp->nlp_rpi);
1483                 goto out;
1484         }
1485
1486         did = get_job_els_rsp64_did(phba, cmdiocb);
1487
1488         if (lpfc_reg_rpi(phba, vport->vpi, did,
1489                          (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1490                 switch (ndlp->nlp_DID) {
1491                 case NameServer_DID:
1492                         mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1493                         /* Fabric Controller Node needs these parameters. */
1494                         memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm));
1495                         break;
1496                 case FDMI_DID:
1497                         mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1498                         break;
1499                 default:
1500                         ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1501                         mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1502                 }
1503
1504                 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
1505                 if (!mbox->ctx_ndlp)
1506                         goto out;
1507
1508                 mbox->vport = vport;
1509                 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1510                     != MBX_NOT_FINISHED) {
1511                         lpfc_nlp_set_state(vport, ndlp,
1512                                            NLP_STE_REG_LOGIN_ISSUE);
1513                         return ndlp->nlp_state;
1514                 }
1515                 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1516                         ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1517                 /* decrement node reference count to the failed mbox
1518                  * command
1519                  */
1520                 lpfc_nlp_put(ndlp);
1521                 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
1522                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1523                                  "0134 PLOGI: cannot issue reg_login "
1524                                  "Data: x%x x%x x%x x%x\n",
1525                                  ndlp->nlp_DID, ndlp->nlp_state,
1526                                  ndlp->nlp_flag, ndlp->nlp_rpi);
1527         } else {
1528                 mempool_free(mbox, phba->mbox_mem_pool);
1529
1530                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1531                                  "0135 PLOGI: cannot format reg_login "
1532                                  "Data: x%x x%x x%x x%x\n",
1533                                  ndlp->nlp_DID, ndlp->nlp_state,
1534                                  ndlp->nlp_flag, ndlp->nlp_rpi);
1535         }
1536
1537
1538 out:
1539         if (ndlp->nlp_DID == NameServer_DID) {
1540                 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1541                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1542                                  "0261 Cannot Register NameServer login\n");
1543         }
1544
1545         /*
1546         ** In case the node reference counter does not go to zero, ensure that
1547         ** the stale state for the node is not processed.
1548         */
1549
1550         ndlp->nlp_prev_state = ndlp->nlp_state;
1551         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1552         return NLP_STE_FREED_NODE;
1553 }
1554
1555 static uint32_t
1556 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1557                            void *arg, uint32_t evt)
1558 {
1559         return ndlp->nlp_state;
1560 }
1561
1562 static uint32_t
1563 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1564         struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1565 {
1566         struct lpfc_hba *phba;
1567         LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1568         MAILBOX_t *mb = &pmb->u.mb;
1569         uint16_t rpi;
1570
1571         phba = vport->phba;
1572         /* Release the RPI */
1573         if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
1574             !mb->mbxStatus) {
1575                 rpi = pmb->u.mb.un.varWords[0];
1576                 lpfc_release_rpi(phba, vport, ndlp, rpi);
1577         }
1578         return ndlp->nlp_state;
1579 }
1580
1581 static uint32_t
1582 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1583                            void *arg, uint32_t evt)
1584 {
1585         if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1586                 spin_lock_irq(&ndlp->lock);
1587                 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1588                 spin_unlock_irq(&ndlp->lock);
1589                 return ndlp->nlp_state;
1590         } else {
1591                 /* software abort outstanding PLOGI */
1592                 lpfc_els_abort(vport->phba, ndlp);
1593
1594                 lpfc_drop_node(vport, ndlp);
1595                 return NLP_STE_FREED_NODE;
1596         }
1597 }
1598
1599 static uint32_t
1600 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1601                               struct lpfc_nodelist *ndlp,
1602                               void *arg,
1603                               uint32_t evt)
1604 {
1605         struct lpfc_hba  *phba = vport->phba;
1606
1607         /* Don't do anything that will mess up processing of the
1608          * previous RSCN.
1609          */
1610         if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
1611                 return ndlp->nlp_state;
1612
1613         /* software abort outstanding PLOGI */
1614         lpfc_els_abort(phba, ndlp);
1615
1616         ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1617         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1618         spin_lock_irq(&ndlp->lock);
1619         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1620         spin_unlock_irq(&ndlp->lock);
1621
1622         return ndlp->nlp_state;
1623 }
1624
1625 static uint32_t
1626 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1627                            void *arg, uint32_t evt)
1628 {
1629         struct lpfc_hba   *phba = vport->phba;
1630         struct lpfc_iocbq *cmdiocb;
1631
1632         /* software abort outstanding ADISC */
1633         lpfc_els_abort(phba, ndlp);
1634
1635         cmdiocb = (struct lpfc_iocbq *) arg;
1636
1637         if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1638                 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1639                         spin_lock_irq(&ndlp->lock);
1640                         ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1641                         spin_unlock_irq(&ndlp->lock);
1642                         if (vport->num_disc_nodes)
1643                                 lpfc_more_adisc(vport);
1644                 }
1645                 return ndlp->nlp_state;
1646         }
1647         ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1648         lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1649         lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1650
1651         return ndlp->nlp_state;
1652 }
1653
1654 static uint32_t
1655 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1656                           void *arg, uint32_t evt)
1657 {
1658         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1659
1660         if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1661                 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1662         return ndlp->nlp_state;
1663 }
1664
1665 static uint32_t
1666 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1667                           void *arg, uint32_t evt)
1668 {
1669         struct lpfc_hba *phba = vport->phba;
1670         struct lpfc_iocbq *cmdiocb;
1671
1672         cmdiocb = (struct lpfc_iocbq *) arg;
1673
1674         /* software abort outstanding ADISC */
1675         lpfc_els_abort(phba, ndlp);
1676
1677         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1678         return ndlp->nlp_state;
1679 }
1680
1681 static uint32_t
1682 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1683                             struct lpfc_nodelist *ndlp,
1684                             void *arg, uint32_t evt)
1685 {
1686         struct lpfc_iocbq *cmdiocb;
1687
1688         cmdiocb = (struct lpfc_iocbq *) arg;
1689
1690         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1691         return ndlp->nlp_state;
1692 }
1693
1694 static uint32_t
1695 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1696                           void *arg, uint32_t evt)
1697 {
1698         struct lpfc_iocbq *cmdiocb;
1699
1700         cmdiocb = (struct lpfc_iocbq *) arg;
1701
1702         /* Treat like rcv logo */
1703         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1704         return ndlp->nlp_state;
1705 }
1706
1707 static uint32_t
1708 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1709                             struct lpfc_nodelist *ndlp,
1710                             void *arg, uint32_t evt)
1711 {
1712         struct lpfc_hba   *phba = vport->phba;
1713         struct lpfc_iocbq *cmdiocb, *rspiocb;
1714         ADISC *ap;
1715         int rc;
1716         u32 ulp_status;
1717
1718         cmdiocb = (struct lpfc_iocbq *) arg;
1719         rspiocb = cmdiocb->rsp_iocb;
1720
1721         ulp_status = get_job_ulpstatus(phba, rspiocb);
1722
1723         ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1724
1725         if ((ulp_status) ||
1726             (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1727                 /* 1 sec timeout */
1728                 mod_timer(&ndlp->nlp_delayfunc,
1729                           jiffies + msecs_to_jiffies(1000));
1730                 spin_lock_irq(&ndlp->lock);
1731                 ndlp->nlp_flag |= NLP_DELAY_TMO;
1732                 spin_unlock_irq(&ndlp->lock);
1733                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1734
1735                 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1736                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1737                 lpfc_unreg_rpi(vport, ndlp);
1738                 return ndlp->nlp_state;
1739         }
1740
1741         if (phba->sli_rev == LPFC_SLI_REV4) {
1742                 rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1743                 if (rc) {
1744                         /* Stay in state and retry. */
1745                         ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1746                         return ndlp->nlp_state;
1747                 }
1748         }
1749
1750         if (ndlp->nlp_type & NLP_FCP_TARGET)
1751                 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1752
1753         if (ndlp->nlp_type & NLP_NVME_TARGET)
1754                 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1755
1756         if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
1757                 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1758                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1759         } else {
1760                 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1761                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1762         }
1763
1764         return ndlp->nlp_state;
1765 }
1766
1767 static uint32_t
1768 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1769                            void *arg, uint32_t evt)
1770 {
1771         if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1772                 spin_lock_irq(&ndlp->lock);
1773                 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1774                 spin_unlock_irq(&ndlp->lock);
1775                 return ndlp->nlp_state;
1776         } else {
1777                 /* software abort outstanding ADISC */
1778                 lpfc_els_abort(vport->phba, ndlp);
1779
1780                 lpfc_drop_node(vport, ndlp);
1781                 return NLP_STE_FREED_NODE;
1782         }
1783 }
1784
1785 static uint32_t
1786 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1787                               struct lpfc_nodelist *ndlp,
1788                               void *arg,
1789                               uint32_t evt)
1790 {
1791         struct lpfc_hba  *phba = vport->phba;
1792
1793         /* Don't do anything that will mess up processing of the
1794          * previous RSCN.
1795          */
1796         if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
1797                 return ndlp->nlp_state;
1798
1799         /* software abort outstanding ADISC */
1800         lpfc_els_abort(phba, ndlp);
1801
1802         ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1803         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1804         spin_lock_irq(&ndlp->lock);
1805         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1806         spin_unlock_irq(&ndlp->lock);
1807         lpfc_disc_set_adisc(vport, ndlp);
1808         return ndlp->nlp_state;
1809 }
1810
1811 static uint32_t
1812 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1813                               struct lpfc_nodelist *ndlp,
1814                               void *arg,
1815                               uint32_t evt)
1816 {
1817         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1818
1819         lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1820         return ndlp->nlp_state;
1821 }
1822
1823 static uint32_t
1824 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1825                              struct lpfc_nodelist *ndlp,
1826                              void *arg,
1827                              uint32_t evt)
1828 {
1829         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1830         struct ls_rjt     stat;
1831
1832         if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
1833                 return ndlp->nlp_state;
1834         }
1835         if (vport->phba->nvmet_support) {
1836                 /* NVME Target mode.  Handle and respond to the PRLI and
1837                  * transition to UNMAPPED provided the RPI has completed
1838                  * registration.
1839                  */
1840                 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
1841                         lpfc_rcv_prli(vport, ndlp, cmdiocb);
1842                         lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1843                 } else {
1844                         /* RPI registration has not completed. Reject the PRLI
1845                          * to prevent an illegal state transition when the
1846                          * rpi registration does complete.
1847                          */
1848                         memset(&stat, 0, sizeof(struct ls_rjt));
1849                         stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1850                         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1851                         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
1852                                             ndlp, NULL);
1853                         return ndlp->nlp_state;
1854                 }
1855         } else {
1856                 /* Initiator mode. */
1857                 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1858         }
1859         return ndlp->nlp_state;
1860 }
1861
1862 static uint32_t
1863 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1864                              struct lpfc_nodelist *ndlp,
1865                              void *arg,
1866                              uint32_t evt)
1867 {
1868         struct lpfc_hba   *phba = vport->phba;
1869         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1870         LPFC_MBOXQ_t      *mb;
1871         LPFC_MBOXQ_t      *nextmb;
1872
1873         cmdiocb = (struct lpfc_iocbq *) arg;
1874
1875         /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1876         if ((mb = phba->sli.mbox_active)) {
1877                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1878                    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1879                         ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1880                         lpfc_nlp_put(ndlp);
1881                         mb->ctx_ndlp = NULL;
1882                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1883                 }
1884         }
1885
1886         spin_lock_irq(&phba->hbalock);
1887         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1888                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1889                    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1890                         ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1891                         lpfc_nlp_put(ndlp);
1892                         list_del(&mb->list);
1893                         phba->sli.mboxq_cnt--;
1894                         lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
1895                 }
1896         }
1897         spin_unlock_irq(&phba->hbalock);
1898
1899         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1900         return ndlp->nlp_state;
1901 }
1902
1903 static uint32_t
1904 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1905                                struct lpfc_nodelist *ndlp,
1906                                void *arg,
1907                                uint32_t evt)
1908 {
1909         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1910
1911         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1912         return ndlp->nlp_state;
1913 }
1914
1915 static uint32_t
1916 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1917                              struct lpfc_nodelist *ndlp,
1918                              void *arg,
1919                              uint32_t evt)
1920 {
1921         struct lpfc_iocbq *cmdiocb;
1922
1923         cmdiocb = (struct lpfc_iocbq *) arg;
1924         lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1925         return ndlp->nlp_state;
1926 }
1927
1928 static uint32_t
1929 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1930                                   struct lpfc_nodelist *ndlp,
1931                                   void *arg,
1932                                   uint32_t evt)
1933 {
1934         struct lpfc_hba *phba = vport->phba;
1935         LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1936         MAILBOX_t *mb = &pmb->u.mb;
1937         uint32_t did  = mb->un.varWords[1];
1938
1939         if (mb->mbxStatus) {
1940                 /* RegLogin failed */
1941                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1942                                  "0246 RegLogin failed Data: x%x x%x x%x x%x "
1943                                  "x%x\n",
1944                                  did, mb->mbxStatus, vport->port_state,
1945                                  mb->un.varRegLogin.vpi,
1946                                  mb->un.varRegLogin.rpi);
1947                 /*
1948                  * If RegLogin failed due to lack of HBA resources do not
1949                  * retry discovery.
1950                  */
1951                 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1952                         ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1953                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1954                         return ndlp->nlp_state;
1955                 }
1956
1957                 /* Put ndlp in npr state set plogi timer for 1 sec */
1958                 mod_timer(&ndlp->nlp_delayfunc,
1959                           jiffies + msecs_to_jiffies(1000 * 1));
1960                 spin_lock_irq(&ndlp->lock);
1961                 ndlp->nlp_flag |= NLP_DELAY_TMO;
1962                 spin_unlock_irq(&ndlp->lock);
1963                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1964
1965                 lpfc_issue_els_logo(vport, ndlp, 0);
1966                 return ndlp->nlp_state;
1967         }
1968
1969         /* SLI4 ports have preallocated logical rpis. */
1970         if (phba->sli_rev < LPFC_SLI_REV4)
1971                 ndlp->nlp_rpi = mb->un.varWords[0];
1972
1973         ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1974
1975         /* Only if we are not a fabric nport do we issue PRLI */
1976         lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1977                          "3066 RegLogin Complete on x%x x%x x%x\n",
1978                          did, ndlp->nlp_type, ndlp->nlp_fc4_type);
1979         if (!(ndlp->nlp_type & NLP_FABRIC) &&
1980             (phba->nvmet_support == 0)) {
1981                 /* The driver supports FCP and NVME concurrently.  If the
1982                  * ndlp's nlp_fc4_type is still zero, the driver doesn't
1983                  * know what PRLI to send yet.  Figure that out now and
1984                  * call PRLI depending on the outcome.
1985                  */
1986                 if (test_bit(FC_PT2PT, &vport->fc_flag)) {
1987                         /* If we are pt2pt, there is no Fabric to determine
1988                          * the FC4 type of the remote nport. So if NVME
1989                          * is configured try it.
1990                          */
1991                         ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1992                         if ((!test_bit(FC_PT2PT_NO_NVME, &vport->fc_flag)) &&
1993                             (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
1994                             vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
1995                                 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1996                                 /* We need to update the localport also */
1997                                 lpfc_nvme_update_localport(vport);
1998                         }
1999
2000                 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2001                         ndlp->nlp_fc4_type |= NLP_FC4_FCP;
2002
2003                 } else if (ndlp->nlp_fc4_type == 0) {
2004                         /* If we are only configured for FCP, the driver
2005                          * should just issue PRLI for FCP. Otherwise issue
2006                          * GFT_ID to determine if remote port supports NVME.
2007                          */
2008                         if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
2009                                 lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
2010                                             ndlp->nlp_DID);
2011                                 return ndlp->nlp_state;
2012                         }
2013                         ndlp->nlp_fc4_type = NLP_FC4_FCP;
2014                 }
2015
2016                 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
2017                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2018                 if (lpfc_issue_els_prli(vport, ndlp, 0)) {
2019                         lpfc_issue_els_logo(vport, ndlp, 0);
2020                         ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
2021                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2022                 }
2023         } else {
2024                 if (test_bit(FC_PT2PT, &vport->fc_flag) && phba->nvmet_support)
2025                         phba->targetport->port_id = vport->fc_myDID;
2026
2027                 /* Only Fabric ports should transition. NVME target
2028                  * must complete PRLI.
2029                  */
2030                 if (ndlp->nlp_type & NLP_FABRIC) {
2031                         ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
2032                         ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
2033                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2034                 }
2035         }
2036         return ndlp->nlp_state;
2037 }
2038
2039 static uint32_t
2040 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
2041                               struct lpfc_nodelist *ndlp,
2042                               void *arg,
2043                               uint32_t evt)
2044 {
2045         if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2046                 spin_lock_irq(&ndlp->lock);
2047                 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2048                 spin_unlock_irq(&ndlp->lock);
2049                 return ndlp->nlp_state;
2050         } else {
2051                 lpfc_drop_node(vport, ndlp);
2052                 return NLP_STE_FREED_NODE;
2053         }
2054 }
2055
2056 static uint32_t
2057 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
2058                                  struct lpfc_nodelist *ndlp,
2059                                  void *arg,
2060                                  uint32_t evt)
2061 {
2062         /* Don't do anything that will mess up processing of the
2063          * previous RSCN.
2064          */
2065         if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
2066                 return ndlp->nlp_state;
2067
2068         ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
2069         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2070         spin_lock_irq(&ndlp->lock);
2071
2072         /* If we are a target we won't immediately transition into PRLI,
2073          * so if REG_LOGIN already completed we don't need to ignore it.
2074          */
2075         if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
2076             !vport->phba->nvmet_support)
2077                 ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
2078
2079         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2080         spin_unlock_irq(&ndlp->lock);
2081         lpfc_disc_set_adisc(vport, ndlp);
2082         return ndlp->nlp_state;
2083 }
2084
2085 static uint32_t
2086 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2087                           void *arg, uint32_t evt)
2088 {
2089         struct lpfc_iocbq *cmdiocb;
2090
2091         cmdiocb = (struct lpfc_iocbq *) arg;
2092
2093         lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2094         return ndlp->nlp_state;
2095 }
2096
2097 static uint32_t
2098 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2099                          void *arg, uint32_t evt)
2100 {
2101         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2102
2103         if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2104                 return ndlp->nlp_state;
2105         lpfc_rcv_prli(vport, ndlp, cmdiocb);
2106         lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2107         return ndlp->nlp_state;
2108 }
2109
2110 static uint32_t
2111 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2112                          void *arg, uint32_t evt)
2113 {
2114         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2115
2116         /* Software abort outstanding PRLI before sending acc */
2117         lpfc_els_abort(vport->phba, ndlp);
2118
2119         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2120         return ndlp->nlp_state;
2121 }
2122
2123 static uint32_t
2124 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2125                            void *arg, uint32_t evt)
2126 {
2127         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2128
2129         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2130         return ndlp->nlp_state;
2131 }
2132
2133 /* This routine is envoked when we rcv a PRLO request from a nport
2134  * we are logged into.  We should send back a PRLO rsp setting the
2135  * appropriate bits.
2136  * NEXT STATE = PRLI_ISSUE
2137  */
2138 static uint32_t
2139 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2140                          void *arg, uint32_t evt)
2141 {
2142         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2143
2144         lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2145         return ndlp->nlp_state;
2146 }
2147
2148 static uint32_t
2149 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2150                           void *arg, uint32_t evt)
2151 {
2152         struct lpfc_iocbq *cmdiocb, *rspiocb;
2153         struct lpfc_hba   *phba = vport->phba;
2154         PRLI *npr;
2155         struct lpfc_nvme_prli *nvpr;
2156         void *temp_ptr;
2157         u32 ulp_status;
2158         bool acc_imode_sps = false;
2159
2160         cmdiocb = (struct lpfc_iocbq *) arg;
2161         rspiocb = cmdiocb->rsp_iocb;
2162
2163         ulp_status = get_job_ulpstatus(phba, rspiocb);
2164
2165         /* A solicited PRLI is either FCP or NVME.  The PRLI cmd/rsp
2166          * format is different so NULL the two PRLI types so that the
2167          * driver correctly gets the correct context.
2168          */
2169         npr = NULL;
2170         nvpr = NULL;
2171         temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
2172         if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
2173                 npr = (PRLI *) temp_ptr;
2174         else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
2175                 nvpr = (struct lpfc_nvme_prli *) temp_ptr;
2176
2177         if (ulp_status) {
2178                 if ((vport->port_type == LPFC_NPIV_PORT) &&
2179                     vport->cfg_restrict_login) {
2180                         goto out;
2181                 }
2182
2183                 /* Adjust the nlp_type accordingly if the PRLI failed */
2184                 if (npr)
2185                         ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
2186                 if (nvpr)
2187                         ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
2188
2189                 /* We can't set the DSM state till BOTH PRLIs complete */
2190                 goto out_err;
2191         }
2192
2193         if (npr && npr->prliType == PRLI_FCP_TYPE) {
2194                 lpfc_printf_vlog(vport, KERN_INFO,
2195                                  LOG_ELS | LOG_NODE | LOG_DISCOVERY,
2196                                  "6028 FCP NPR PRLI Cmpl Init %d Target %d "
2197                                  "EIP %d AccCode x%x\n",
2198                                  npr->initiatorFunc, npr->targetFunc,
2199                                  npr->estabImagePair, npr->acceptRspCode);
2200
2201                 if (npr->acceptRspCode == PRLI_INV_SRV_PARM) {
2202                         /* Strict initiators don't establish an image pair. */
2203                         if (npr->initiatorFunc && !npr->targetFunc &&
2204                             !npr->estabImagePair)
2205                                 acc_imode_sps = true;
2206                 }
2207
2208                 if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) {
2209                         if (npr->initiatorFunc)
2210                                 ndlp->nlp_type |= NLP_FCP_INITIATOR;
2211                         if (npr->targetFunc) {
2212                                 ndlp->nlp_type |= NLP_FCP_TARGET;
2213                                 if (npr->writeXferRdyDis)
2214                                         ndlp->nlp_flag |= NLP_FIRSTBURST;
2215                         }
2216                         if (npr->Retry)
2217                                 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
2218                 }
2219         } else if (nvpr &&
2220                    (bf_get_be32(prli_acc_rsp_code, nvpr) ==
2221                     PRLI_REQ_EXECUTED) &&
2222                    (bf_get_be32(prli_type_code, nvpr) ==
2223                     PRLI_NVME_TYPE)) {
2224
2225                 /* Complete setting up the remote ndlp personality. */
2226                 if (bf_get_be32(prli_init, nvpr))
2227                         ndlp->nlp_type |= NLP_NVME_INITIATOR;
2228
2229                 if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
2230                     bf_get_be32(prli_conf, nvpr))
2231
2232                         ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
2233                 else
2234                         ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
2235
2236                 /* Target driver cannot solicit NVME FB. */
2237                 if (bf_get_be32(prli_tgt, nvpr)) {
2238                         /* Complete the nvme target roles.  The transport
2239                          * needs to know if the rport is capable of
2240                          * discovery in addition to its role.
2241                          */
2242                         ndlp->nlp_type |= NLP_NVME_TARGET;
2243                         if (bf_get_be32(prli_disc, nvpr))
2244                                 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
2245
2246                         /*
2247                          * If prli_fba is set, the Target supports FirstBurst.
2248                          * If prli_fb_sz is 0, the FirstBurst size is unlimited,
2249                          * otherwise it defines the actual size supported by
2250                          * the NVME Target.
2251                          */
2252                         if ((bf_get_be32(prli_fba, nvpr) == 1) &&
2253                             (phba->cfg_nvme_enable_fb) &&
2254                             (!phba->nvmet_support)) {
2255                                 /* Both sides support FB. The target's first
2256                                  * burst size is a 512 byte encoded value.
2257                                  */
2258                                 ndlp->nlp_flag |= NLP_FIRSTBURST;
2259                                 ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
2260                                                                  nvpr);
2261
2262                                 /* Expressed in units of 512 bytes */
2263                                 if (ndlp->nvme_fb_size)
2264                                         ndlp->nvme_fb_size <<=
2265                                                 LPFC_NVME_FB_SHIFT;
2266                                 else
2267                                         ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
2268                         }
2269                 }
2270
2271                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2272                                  "6029 NVME PRLI Cmpl w1 x%08x "
2273                                  "w4 x%08x w5 x%08x flag x%x, "
2274                                  "fcp_info x%x nlp_type x%x\n",
2275                                  be32_to_cpu(nvpr->word1),
2276                                  be32_to_cpu(nvpr->word4),
2277                                  be32_to_cpu(nvpr->word5),
2278                                  ndlp->nlp_flag, ndlp->nlp_fcp_info,
2279                                  ndlp->nlp_type);
2280         }
2281         if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
2282             (vport->port_type == LPFC_NPIV_PORT) &&
2283              vport->cfg_restrict_login) {
2284 out:
2285                 spin_lock_irq(&ndlp->lock);
2286                 ndlp->nlp_flag |= NLP_TARGET_REMOVE;
2287                 spin_unlock_irq(&ndlp->lock);
2288                 lpfc_issue_els_logo(vport, ndlp, 0);
2289
2290                 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2291                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2292                 return ndlp->nlp_state;
2293         }
2294
2295 out_err:
2296         /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
2297          * are complete.
2298          */
2299         if (ndlp->fc4_prli_sent == 0) {
2300                 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2301                 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
2302                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
2303                 else if (ndlp->nlp_type &
2304                          (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
2305                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2306         } else
2307                 lpfc_printf_vlog(vport,
2308                                  KERN_INFO, LOG_ELS,
2309                                  "3067 PRLI's still outstanding "
2310                                  "on x%06x - count %d, Pend Node Mode "
2311                                  "transition...\n",
2312                                  ndlp->nlp_DID, ndlp->fc4_prli_sent);
2313
2314         return ndlp->nlp_state;
2315 }
2316
2317 /*! lpfc_device_rm_prli_issue
2318  *
2319  * \pre
2320  * \post
2321  * \param   phba
2322  * \param   ndlp
2323  * \param   arg
2324  * \param   evt
2325  * \return  uint32_t
2326  *
2327  * \b Description:
2328  *    This routine is envoked when we a request to remove a nport we are in the
2329  *    process of PRLIing. We should software abort outstanding prli, unreg
2330  *    login, send a logout. We will change node state to UNUSED_NODE, put it
2331  *    on plogi list so it can be freed when LOGO completes.
2332  *
2333  */
2334
2335 static uint32_t
2336 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2337                           void *arg, uint32_t evt)
2338 {
2339         if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2340                 spin_lock_irq(&ndlp->lock);
2341                 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2342                 spin_unlock_irq(&ndlp->lock);
2343                 return ndlp->nlp_state;
2344         } else {
2345                 /* software abort outstanding PLOGI */
2346                 lpfc_els_abort(vport->phba, ndlp);
2347
2348                 lpfc_drop_node(vport, ndlp);
2349                 return NLP_STE_FREED_NODE;
2350         }
2351 }
2352
2353
2354 /*! lpfc_device_recov_prli_issue
2355  *
2356  * \pre
2357  * \post
2358  * \param   phba
2359  * \param   ndlp
2360  * \param   arg
2361  * \param   evt
2362  * \return  uint32_t
2363  *
2364  * \b Description:
2365  *    The routine is envoked when the state of a device is unknown, like
2366  *    during a link down. We should remove the nodelist entry from the
2367  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
2368  *    outstanding PRLI command, then free the node entry.
2369  */
2370 static uint32_t
2371 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
2372                              struct lpfc_nodelist *ndlp,
2373                              void *arg,
2374                              uint32_t evt)
2375 {
2376         struct lpfc_hba  *phba = vport->phba;
2377
2378         /* Don't do anything that will mess up processing of the
2379          * previous RSCN.
2380          */
2381         if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
2382                 return ndlp->nlp_state;
2383
2384         /* software abort outstanding PRLI */
2385         lpfc_els_abort(phba, ndlp);
2386
2387         ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2388         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2389         spin_lock_irq(&ndlp->lock);
2390         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2391         spin_unlock_irq(&ndlp->lock);
2392         lpfc_disc_set_adisc(vport, ndlp);
2393         return ndlp->nlp_state;
2394 }
2395
2396 static uint32_t
2397 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2398                           void *arg, uint32_t evt)
2399 {
2400         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2401         struct ls_rjt     stat;
2402
2403         memset(&stat, 0, sizeof(struct ls_rjt));
2404         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2405         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2406         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2407         return ndlp->nlp_state;
2408 }
2409
2410 static uint32_t
2411 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2412                          void *arg, uint32_t evt)
2413 {
2414         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2415         struct ls_rjt     stat;
2416
2417         memset(&stat, 0, sizeof(struct ls_rjt));
2418         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2419         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2420         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2421         return ndlp->nlp_state;
2422 }
2423
2424 static uint32_t
2425 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2426                          void *arg, uint32_t evt)
2427 {
2428         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2429
2430         spin_lock_irq(&ndlp->lock);
2431         ndlp->nlp_flag |= NLP_LOGO_ACC;
2432         spin_unlock_irq(&ndlp->lock);
2433         lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2434         return ndlp->nlp_state;
2435 }
2436
2437 static uint32_t
2438 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2439                            void *arg, uint32_t evt)
2440 {
2441         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2442         struct ls_rjt     stat;
2443
2444         memset(&stat, 0, sizeof(struct ls_rjt));
2445         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2446         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2447         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2448         return ndlp->nlp_state;
2449 }
2450
2451 static uint32_t
2452 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2453                          void *arg, uint32_t evt)
2454 {
2455         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2456         struct ls_rjt     stat;
2457
2458         memset(&stat, 0, sizeof(struct ls_rjt));
2459         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2460         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2461         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2462         return ndlp->nlp_state;
2463 }
2464
2465 static uint32_t
2466 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2467                           void *arg, uint32_t evt)
2468 {
2469         ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
2470         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2471         spin_lock_irq(&ndlp->lock);
2472         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2473         spin_unlock_irq(&ndlp->lock);
2474         lpfc_disc_set_adisc(vport, ndlp);
2475         return ndlp->nlp_state;
2476 }
2477
2478 static uint32_t
2479 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2480                           void *arg, uint32_t evt)
2481 {
2482         /*
2483          * DevLoss has timed out and is calling for Device Remove.
2484          * In this case, abort the LOGO and cleanup the ndlp
2485          */
2486
2487         lpfc_unreg_rpi(vport, ndlp);
2488         /* software abort outstanding PLOGI */
2489         lpfc_els_abort(vport->phba, ndlp);
2490         lpfc_drop_node(vport, ndlp);
2491         return NLP_STE_FREED_NODE;
2492 }
2493
2494 static uint32_t
2495 lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
2496                              struct lpfc_nodelist *ndlp,
2497                              void *arg, uint32_t evt)
2498 {
2499         /*
2500          * Device Recovery events have no meaning for a node with a LOGO
2501          * outstanding.  The LOGO has to complete first and handle the
2502          * node from that point.
2503          */
2504         return ndlp->nlp_state;
2505 }
2506
2507 static uint32_t
2508 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2509                           void *arg, uint32_t evt)
2510 {
2511         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2512
2513         lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2514         return ndlp->nlp_state;
2515 }
2516
2517 static uint32_t
2518 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2519                          void *arg, uint32_t evt)
2520 {
2521         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2522
2523         if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2524                 return ndlp->nlp_state;
2525
2526         lpfc_rcv_prli(vport, ndlp, cmdiocb);
2527         lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2528         return ndlp->nlp_state;
2529 }
2530
2531 static uint32_t
2532 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2533                          void *arg, uint32_t evt)
2534 {
2535         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2536
2537         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2538         return ndlp->nlp_state;
2539 }
2540
2541 static uint32_t
2542 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2543                            void *arg, uint32_t evt)
2544 {
2545         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2546
2547         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2548         return ndlp->nlp_state;
2549 }
2550
2551 static uint32_t
2552 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2553                          void *arg, uint32_t evt)
2554 {
2555         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2556
2557         lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2558         return ndlp->nlp_state;
2559 }
2560
2561 static uint32_t
2562 lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
2563                           struct lpfc_nodelist *ndlp,
2564                           void *arg,
2565                           uint32_t evt)
2566 {
2567         lpfc_drop_node(vport, ndlp);
2568         return NLP_STE_FREED_NODE;
2569 }
2570
2571 static uint32_t
2572 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2573                              struct lpfc_nodelist *ndlp,
2574                              void *arg,
2575                              uint32_t evt)
2576 {
2577         ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2578         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2579         spin_lock_irq(&ndlp->lock);
2580         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2581         ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2582         spin_unlock_irq(&ndlp->lock);
2583         lpfc_disc_set_adisc(vport, ndlp);
2584
2585         return ndlp->nlp_state;
2586 }
2587
2588 static uint32_t
2589 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2590                            void *arg, uint32_t evt)
2591 {
2592         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2593
2594         lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2595         return ndlp->nlp_state;
2596 }
2597
2598 static uint32_t
2599 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2600                           void *arg, uint32_t evt)
2601 {
2602         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2603
2604         if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2605                 return ndlp->nlp_state;
2606         lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2607         return ndlp->nlp_state;
2608 }
2609
2610 static uint32_t
2611 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2612                           void *arg, uint32_t evt)
2613 {
2614         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2615
2616         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2617         return ndlp->nlp_state;
2618 }
2619
2620 static uint32_t
2621 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2622                             struct lpfc_nodelist *ndlp,
2623                             void *arg, uint32_t evt)
2624 {
2625         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2626
2627         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2628         return ndlp->nlp_state;
2629 }
2630
2631 static uint32_t
2632 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2633                           void *arg, uint32_t evt)
2634 {
2635         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2636
2637         /* flush the target */
2638         lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2639
2640         /* Treat like rcv logo */
2641         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2642         return ndlp->nlp_state;
2643 }
2644
2645 static uint32_t
2646 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2647                               struct lpfc_nodelist *ndlp,
2648                               void *arg,
2649                               uint32_t evt)
2650 {
2651         lpfc_disc_set_adisc(vport, ndlp);
2652
2653         ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2654         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2655         spin_lock_irq(&ndlp->lock);
2656         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2657         ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2658         spin_unlock_irq(&ndlp->lock);
2659         return ndlp->nlp_state;
2660 }
2661
2662 static uint32_t
2663 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2664                         void *arg, uint32_t evt)
2665 {
2666         struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2667
2668         /* Ignore PLOGI if we have an outstanding LOGO */
2669         if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2670                 return ndlp->nlp_state;
2671         if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2672                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2673                 spin_lock_irq(&ndlp->lock);
2674                 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2675                 spin_unlock_irq(&ndlp->lock);
2676         } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2677                 /* send PLOGI immediately, move to PLOGI issue state */
2678                 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2679                         ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2680                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2681                         lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2682                 }
2683         }
2684         return ndlp->nlp_state;
2685 }
2686
2687 static uint32_t
2688 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2689                        void *arg, uint32_t evt)
2690 {
2691         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2692         struct ls_rjt     stat;
2693
2694         memset(&stat, 0, sizeof (struct ls_rjt));
2695         stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2696         stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2697         lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2698
2699         if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2700                 /*
2701                  * ADISC nodes will be handled in regular discovery path after
2702                  * receiving response from NS.
2703                  *
2704                  * For other nodes, Send PLOGI to trigger an implicit LOGO.
2705                  */
2706                 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2707                         ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2708                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2709                         lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2710                 }
2711         }
2712         return ndlp->nlp_state;
2713 }
2714
2715 static uint32_t
2716 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2717                        void *arg, uint32_t evt)
2718 {
2719         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2720
2721         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2722         return ndlp->nlp_state;
2723 }
2724
2725 static uint32_t
2726 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2727                          void *arg, uint32_t evt)
2728 {
2729         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2730
2731         lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2732         /*
2733          * Do not start discovery if discovery is about to start
2734          * or discovery in progress for this node. Starting discovery
2735          * here will affect the counting of discovery threads.
2736          */
2737         if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2738             !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2739                 /*
2740                  * ADISC nodes will be handled in regular discovery path after
2741                  * receiving response from NS.
2742                  *
2743                  * For other nodes, Send PLOGI to trigger an implicit LOGO.
2744                  */
2745                 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2746                         ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2747                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2748                         lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2749                 }
2750         }
2751         return ndlp->nlp_state;
2752 }
2753
2754 static uint32_t
2755 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2756                        void *arg, uint32_t evt)
2757 {
2758         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2759
2760         spin_lock_irq(&ndlp->lock);
2761         ndlp->nlp_flag |= NLP_LOGO_ACC;
2762         spin_unlock_irq(&ndlp->lock);
2763
2764         lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2765
2766         if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2767                 mod_timer(&ndlp->nlp_delayfunc,
2768                           jiffies + msecs_to_jiffies(1000 * 1));
2769                 spin_lock_irq(&ndlp->lock);
2770                 ndlp->nlp_flag |= NLP_DELAY_TMO;
2771                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2772                 spin_unlock_irq(&ndlp->lock);
2773                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2774         } else {
2775                 spin_lock_irq(&ndlp->lock);
2776                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2777                 spin_unlock_irq(&ndlp->lock);
2778         }
2779         return ndlp->nlp_state;
2780 }
2781
2782 static uint32_t
2783 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2784                          void *arg, uint32_t evt)
2785 {
2786         struct lpfc_hba *phba = vport->phba;
2787         struct lpfc_iocbq *cmdiocb, *rspiocb;
2788         u32 ulp_status;
2789
2790         cmdiocb = (struct lpfc_iocbq *) arg;
2791         rspiocb = cmdiocb->rsp_iocb;
2792
2793         ulp_status = get_job_ulpstatus(phba, rspiocb);
2794
2795         if (ulp_status)
2796                 return NLP_STE_FREED_NODE;
2797
2798         return ndlp->nlp_state;
2799 }
2800
2801 static uint32_t
2802 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2803                         void *arg, uint32_t evt)
2804 {
2805         struct lpfc_hba *phba = vport->phba;
2806         struct lpfc_iocbq *cmdiocb, *rspiocb;
2807         u32 ulp_status;
2808
2809         cmdiocb = (struct lpfc_iocbq *) arg;
2810         rspiocb = cmdiocb->rsp_iocb;
2811
2812         ulp_status = get_job_ulpstatus(phba, rspiocb);
2813
2814         if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2815                 lpfc_drop_node(vport, ndlp);
2816                 return NLP_STE_FREED_NODE;
2817         }
2818         return ndlp->nlp_state;
2819 }
2820
2821 static uint32_t
2822 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2823                         void *arg, uint32_t evt)
2824 {
2825         /* For the fabric port just clear the fc flags. */
2826         if (ndlp->nlp_DID == Fabric_DID) {
2827                 clear_bit(FC_FABRIC, &vport->fc_flag);
2828                 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
2829         }
2830         lpfc_unreg_rpi(vport, ndlp);
2831         return ndlp->nlp_state;
2832 }
2833
2834 static uint32_t
2835 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2836                          void *arg, uint32_t evt)
2837 {
2838         struct lpfc_hba *phba = vport->phba;
2839         struct lpfc_iocbq *cmdiocb, *rspiocb;
2840         u32 ulp_status;
2841
2842         cmdiocb = (struct lpfc_iocbq *) arg;
2843         rspiocb = cmdiocb->rsp_iocb;
2844
2845         ulp_status = get_job_ulpstatus(phba, rspiocb);
2846
2847         if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2848                 lpfc_drop_node(vport, ndlp);
2849                 return NLP_STE_FREED_NODE;
2850         }
2851         return ndlp->nlp_state;
2852 }
2853
2854 static uint32_t
2855 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2856                             struct lpfc_nodelist *ndlp,
2857                             void *arg, uint32_t evt)
2858 {
2859         LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2860         MAILBOX_t    *mb = &pmb->u.mb;
2861
2862         if (!mb->mbxStatus) {
2863                 /* SLI4 ports have preallocated logical rpis. */
2864                 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2865                         ndlp->nlp_rpi = mb->un.varWords[0];
2866                 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2867                 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2868                         lpfc_unreg_rpi(vport, ndlp);
2869                 }
2870         } else {
2871                 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2872                         lpfc_drop_node(vport, ndlp);
2873                         return NLP_STE_FREED_NODE;
2874                 }
2875         }
2876         return ndlp->nlp_state;
2877 }
2878
2879 static uint32_t
2880 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2881                         void *arg, uint32_t evt)
2882 {
2883         if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2884                 spin_lock_irq(&ndlp->lock);
2885                 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2886                 spin_unlock_irq(&ndlp->lock);
2887                 return ndlp->nlp_state;
2888         }
2889         lpfc_drop_node(vport, ndlp);
2890         return NLP_STE_FREED_NODE;
2891 }
2892
2893 static uint32_t
2894 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2895                            void *arg, uint32_t evt)
2896 {
2897         /* Don't do anything that will mess up processing of the
2898          * previous RSCN.
2899          */
2900         if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
2901                 return ndlp->nlp_state;
2902
2903         lpfc_cancel_retry_delay_tmo(vport, ndlp);
2904         spin_lock_irq(&ndlp->lock);
2905         ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2906         ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2907         spin_unlock_irq(&ndlp->lock);
2908         return ndlp->nlp_state;
2909 }
2910
2911
2912 /* This next section defines the NPort Discovery State Machine */
2913
2914 /* There are 4 different double linked lists nodelist entries can reside on.
2915  * The plogi list and adisc list are used when Link Up discovery or RSCN
2916  * processing is needed. Each list holds the nodes that we will send PLOGI
2917  * or ADISC on. These lists will keep track of what nodes will be effected
2918  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2919  * The unmapped_list will contain all nodes that we have successfully logged
2920  * into at the Fibre Channel level. The mapped_list will contain all nodes
2921  * that are mapped FCP targets.
2922  */
2923 /*
2924  * The bind list is a list of undiscovered (potentially non-existent) nodes
2925  * that we have saved binding information on. This information is used when
2926  * nodes transition from the unmapped to the mapped list.
2927  */
2928 /* For UNUSED_NODE state, the node has just been allocated .
2929  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2930  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2931  * and put on the unmapped list. For ADISC processing, the node is taken off
2932  * the ADISC list and placed on either the mapped or unmapped list (depending
2933  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2934  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2935  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2936  * node, the node is taken off the unmapped list. The binding list is checked
2937  * for a valid binding, or a binding is automatically assigned. If binding
2938  * assignment is unsuccessful, the node is left on the unmapped list. If
2939  * binding assignment is successful, the associated binding list entry (if
2940  * any) is removed, and the node is placed on the mapped list.
2941  */
2942 /*
2943  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2944  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2945  * expire, all effected nodes will receive a DEVICE_RM event.
2946  */
2947 /*
2948  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2949  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2950  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2951  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2952  * we will first process the ADISC list.  32 entries are processed initially and
2953  * ADISC is initited for each one.  Completions / Events for each node are
2954  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2955  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2956  * waiting, and the ADISC list count is identically 0, then we are done. For
2957  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2958  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2959  * list.  32 entries are processed initially and PLOGI is initited for each one.
2960  * Completions / Events for each node are funnelled thru the state machine.  As
2961  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2962  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2963  * indentically 0, then we are done. We have now completed discovery / RSCN
2964  * handling. Upon completion, ALL nodes should be on either the mapped or
2965  * unmapped lists.
2966  */
2967
2968 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2969      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2970         /* Action routine                  Event       Current State  */
2971         lpfc_rcv_plogi_unused_node,     /* RCV_PLOGI   UNUSED_NODE    */
2972         lpfc_rcv_els_unused_node,       /* RCV_PRLI        */
2973         lpfc_rcv_logo_unused_node,      /* RCV_LOGO        */
2974         lpfc_rcv_els_unused_node,       /* RCV_ADISC       */
2975         lpfc_rcv_els_unused_node,       /* RCV_PDISC       */
2976         lpfc_rcv_els_unused_node,       /* RCV_PRLO        */
2977         lpfc_disc_illegal,              /* CMPL_PLOGI      */
2978         lpfc_disc_illegal,              /* CMPL_PRLI       */
2979         lpfc_cmpl_logo_unused_node,     /* CMPL_LOGO       */
2980         lpfc_disc_illegal,              /* CMPL_ADISC      */
2981         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
2982         lpfc_device_rm_unused_node,     /* DEVICE_RM       */
2983         lpfc_device_recov_unused_node,  /* DEVICE_RECOVERY */
2984
2985         lpfc_rcv_plogi_plogi_issue,     /* RCV_PLOGI   PLOGI_ISSUE    */
2986         lpfc_rcv_prli_plogi_issue,      /* RCV_PRLI        */
2987         lpfc_rcv_logo_plogi_issue,      /* RCV_LOGO        */
2988         lpfc_rcv_els_plogi_issue,       /* RCV_ADISC       */
2989         lpfc_rcv_els_plogi_issue,       /* RCV_PDISC       */
2990         lpfc_rcv_els_plogi_issue,       /* RCV_PRLO        */
2991         lpfc_cmpl_plogi_plogi_issue,    /* CMPL_PLOGI      */
2992         lpfc_disc_illegal,              /* CMPL_PRLI       */
2993         lpfc_cmpl_logo_plogi_issue,     /* CMPL_LOGO       */
2994         lpfc_disc_illegal,              /* CMPL_ADISC      */
2995         lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2996         lpfc_device_rm_plogi_issue,     /* DEVICE_RM       */
2997         lpfc_device_recov_plogi_issue,  /* DEVICE_RECOVERY */
2998
2999         lpfc_rcv_plogi_adisc_issue,     /* RCV_PLOGI   ADISC_ISSUE    */
3000         lpfc_rcv_prli_adisc_issue,      /* RCV_PRLI        */
3001         lpfc_rcv_logo_adisc_issue,      /* RCV_LOGO        */
3002         lpfc_rcv_padisc_adisc_issue,    /* RCV_ADISC       */
3003         lpfc_rcv_padisc_adisc_issue,    /* RCV_PDISC       */
3004         lpfc_rcv_prlo_adisc_issue,      /* RCV_PRLO        */
3005         lpfc_disc_illegal,              /* CMPL_PLOGI      */
3006         lpfc_disc_illegal,              /* CMPL_PRLI       */
3007         lpfc_disc_illegal,              /* CMPL_LOGO       */
3008         lpfc_cmpl_adisc_adisc_issue,    /* CMPL_ADISC      */
3009         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3010         lpfc_device_rm_adisc_issue,     /* DEVICE_RM       */
3011         lpfc_device_recov_adisc_issue,  /* DEVICE_RECOVERY */
3012
3013         lpfc_rcv_plogi_reglogin_issue,  /* RCV_PLOGI  REG_LOGIN_ISSUE */
3014         lpfc_rcv_prli_reglogin_issue,   /* RCV_PLOGI       */
3015         lpfc_rcv_logo_reglogin_issue,   /* RCV_LOGO        */
3016         lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC       */
3017         lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC       */
3018         lpfc_rcv_prlo_reglogin_issue,   /* RCV_PRLO        */
3019         lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
3020         lpfc_disc_illegal,              /* CMPL_PRLI       */
3021         lpfc_disc_illegal,              /* CMPL_LOGO       */
3022         lpfc_disc_illegal,              /* CMPL_ADISC      */
3023         lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
3024         lpfc_device_rm_reglogin_issue,  /* DEVICE_RM       */
3025         lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
3026
3027         lpfc_rcv_plogi_prli_issue,      /* RCV_PLOGI   PRLI_ISSUE     */
3028         lpfc_rcv_prli_prli_issue,       /* RCV_PRLI        */
3029         lpfc_rcv_logo_prli_issue,       /* RCV_LOGO        */
3030         lpfc_rcv_padisc_prli_issue,     /* RCV_ADISC       */
3031         lpfc_rcv_padisc_prli_issue,     /* RCV_PDISC       */
3032         lpfc_rcv_prlo_prli_issue,       /* RCV_PRLO        */
3033         lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
3034         lpfc_cmpl_prli_prli_issue,      /* CMPL_PRLI       */
3035         lpfc_disc_illegal,              /* CMPL_LOGO       */
3036         lpfc_disc_illegal,              /* CMPL_ADISC      */
3037         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3038         lpfc_device_rm_prli_issue,      /* DEVICE_RM       */
3039         lpfc_device_recov_prli_issue,   /* DEVICE_RECOVERY */
3040
3041         lpfc_rcv_plogi_logo_issue,      /* RCV_PLOGI   LOGO_ISSUE     */
3042         lpfc_rcv_prli_logo_issue,       /* RCV_PRLI        */
3043         lpfc_rcv_logo_logo_issue,       /* RCV_LOGO        */
3044         lpfc_rcv_padisc_logo_issue,     /* RCV_ADISC       */
3045         lpfc_rcv_padisc_logo_issue,     /* RCV_PDISC       */
3046         lpfc_rcv_prlo_logo_issue,       /* RCV_PRLO        */
3047         lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
3048         lpfc_disc_illegal,              /* CMPL_PRLI       */
3049         lpfc_cmpl_logo_logo_issue,      /* CMPL_LOGO       */
3050         lpfc_disc_illegal,              /* CMPL_ADISC      */
3051         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3052         lpfc_device_rm_logo_issue,      /* DEVICE_RM       */
3053         lpfc_device_recov_logo_issue,   /* DEVICE_RECOVERY */
3054
3055         lpfc_rcv_plogi_unmap_node,      /* RCV_PLOGI   UNMAPPED_NODE  */
3056         lpfc_rcv_prli_unmap_node,       /* RCV_PRLI        */
3057         lpfc_rcv_logo_unmap_node,       /* RCV_LOGO        */
3058         lpfc_rcv_padisc_unmap_node,     /* RCV_ADISC       */
3059         lpfc_rcv_padisc_unmap_node,     /* RCV_PDISC       */
3060         lpfc_rcv_prlo_unmap_node,       /* RCV_PRLO        */
3061         lpfc_disc_illegal,              /* CMPL_PLOGI      */
3062         lpfc_disc_illegal,              /* CMPL_PRLI       */
3063         lpfc_disc_illegal,              /* CMPL_LOGO       */
3064         lpfc_disc_illegal,              /* CMPL_ADISC      */
3065         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3066         lpfc_device_rm_unmap_node,      /* DEVICE_RM       */
3067         lpfc_device_recov_unmap_node,   /* DEVICE_RECOVERY */
3068
3069         lpfc_rcv_plogi_mapped_node,     /* RCV_PLOGI   MAPPED_NODE    */
3070         lpfc_rcv_prli_mapped_node,      /* RCV_PRLI        */
3071         lpfc_rcv_logo_mapped_node,      /* RCV_LOGO        */
3072         lpfc_rcv_padisc_mapped_node,    /* RCV_ADISC       */
3073         lpfc_rcv_padisc_mapped_node,    /* RCV_PDISC       */
3074         lpfc_rcv_prlo_mapped_node,      /* RCV_PRLO        */
3075         lpfc_disc_illegal,              /* CMPL_PLOGI      */
3076         lpfc_disc_illegal,              /* CMPL_PRLI       */
3077         lpfc_disc_illegal,              /* CMPL_LOGO       */
3078         lpfc_disc_illegal,              /* CMPL_ADISC      */
3079         lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
3080         lpfc_disc_illegal,              /* DEVICE_RM       */
3081         lpfc_device_recov_mapped_node,  /* DEVICE_RECOVERY */
3082
3083         lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
3084         lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
3085         lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
3086         lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
3087         lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
3088         lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
3089         lpfc_cmpl_plogi_npr_node,       /* CMPL_PLOGI      */
3090         lpfc_cmpl_prli_npr_node,        /* CMPL_PRLI       */
3091         lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
3092         lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
3093         lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
3094         lpfc_device_rm_npr_node,        /* DEVICE_RM       */
3095         lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
3096 };
3097
3098 int
3099 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3100                         void *arg, uint32_t evt)
3101 {
3102         uint32_t cur_state, rc;
3103         uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
3104                          uint32_t);
3105         uint32_t got_ndlp = 0;
3106         uint32_t data1;
3107
3108         if (lpfc_nlp_get(ndlp))
3109                 got_ndlp = 1;
3110
3111         cur_state = ndlp->nlp_state;
3112
3113         data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
3114                 ((uint32_t)ndlp->nlp_type));
3115         /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
3116         lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3117                          "0211 DSM in event x%x on NPort x%x in "
3118                          "state %d rpi x%x Data: x%x x%x\n",
3119                          evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
3120                          ndlp->nlp_flag, data1);
3121
3122         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3123                  "DSM in:          evt:%d ste:%d did:x%x",
3124                 evt, cur_state, ndlp->nlp_DID);
3125
3126         func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
3127         rc = (func) (vport, ndlp, arg, evt);
3128
3129         /* DSM out state <rc> on NPort <nlp_DID> */
3130         if (got_ndlp) {
3131                 data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
3132                         ((uint32_t)ndlp->nlp_type));
3133                 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3134                          "0212 DSM out state %d on NPort x%x "
3135                          "rpi x%x Data: x%x x%x\n",
3136                          rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
3137                          data1);
3138
3139                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3140                         "DSM out:         ste:%d did:x%x flg:x%x",
3141                         rc, ndlp->nlp_DID, ndlp->nlp_flag);
3142                 /* Decrement the ndlp reference count held for this function */
3143                 lpfc_nlp_put(ndlp);
3144         } else {
3145                 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3146                         "0213 DSM out state %d on NPort free\n", rc);
3147
3148                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
3149                         "DSM out:         ste:%d did:x%x flg:x%x",
3150                         rc, 0, 0);
3151         }
3152
3153         return rc;
3154 }