summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c220
1 files changed, 192 insertions, 28 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 1f87b4fb8b50..0639c994349c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -275,7 +275,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
+ (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
+ (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
lpfc_unregister_unused_fcf(phba);
@@ -586,6 +588,8 @@ lpfc_work_done(struct lpfc_hba *phba)
(status &
HA_RXMASK));
}
+ if (pring->txq_cnt)
+ lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
*/
@@ -1297,7 +1301,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
* used for this FCF when the function returns.
* If the FCF record need to be used with a particular vlan id, the vlan is
* set in the vlan_id on return of the function. If not VLAN tagging need to
- * be used with the FCF vlan_id will be set to 0xFFFF;
+ * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
**/
static int
lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
@@ -1333,7 +1337,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
if (phba->valid_vlan)
*vlan_id = phba->vlan_id;
else
- *vlan_id = 0xFFFF;
+ *vlan_id = LPFC_FCOE_NULL_VID;
return 1;
}
@@ -1357,7 +1361,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
if (fcf_vlan_id)
*vlan_id = fcf_vlan_id;
else
- *vlan_id = 0xFFFF;
+ *vlan_id = LPFC_FCOE_NULL_VID;
return 1;
}
@@ -1466,7 +1470,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
else if (fcf_vlan_id)
*vlan_id = fcf_vlan_id;
else
- *vlan_id = 0xFFFF;
+ *vlan_id = LPFC_FCOE_NULL_VID;
return 1;
}
@@ -1518,6 +1522,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
* flag
*/
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2833 Stop FCF discovery process due to link "
+ "state change (x%x)\n", phba->link_state);
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
@@ -1565,7 +1572,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
}
/**
- * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
* @next_fcf_index: pointer to holder of next fcf index.
@@ -1693,6 +1700,37 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
}
/**
+ lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to an existing FCF record.
+ * @new_fcf_record: pointer to a new FCF record.
+ * @new_vlan_id: vlan id from the new FCF record.
+ *
+ * This function performs matching test of a new FCF record against an existing
+ * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
+ * will not be used as part of the FCF record matching criteria.
+ *
+ * Returns true if all the fields matching, otherwise returns false.
+ */
+static bool
+lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
+ struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record,
+ uint16_t new_vlan_id)
+{
+ if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
+ if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
+ return false;
+ if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
+ return false;
+ if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
+ return false;
+ if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
+ return false;
+ return true;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1755,7 +1793,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
if (!rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2781 FCF record fcf_index:x%x failed FCF "
+ "2781 FCF record (x%x) failed FCF "
"connection list check, fcf_avail:x%x, "
"fcf_valid:x%x\n",
bf_get(lpfc_fcf_record_fcf_index,
@@ -1764,6 +1802,32 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record),
bf_get(lpfc_fcf_record_fcf_valid,
new_fcf_record));
+ if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+ /*
+ * In case the current in-use FCF record becomes
+ * invalid/unavailable during FCF discovery that
+ * was not triggered by fast FCF failover process,
+ * treat it as fast FCF failover.
+ */
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
+ !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2835 Invalid in-use FCF "
+ "record (x%x) reported, "
+ "entering fast FCF failover "
+ "mode scanning.\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ return;
+ }
+ }
goto read_next_fcf;
} else {
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
@@ -1780,14 +1844,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
- if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
- new_fcf_record) &&
- lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
- new_fcf_record) &&
- lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
- new_fcf_record) &&
- lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
- vlan_id)) {
+ if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ new_fcf_record, vlan_id)) {
phba->fcf.fcf_flag |= FCF_AVAILABLE;
if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
/* Stop FCF redisc wait timer if pending */
@@ -1797,6 +1855,13 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2836 The new FCF record (x%x) "
+ "matches the in-use FCF record "
+ "(x%x)\n",
+ phba->fcf.current_rec.fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
goto out;
}
/*
@@ -1828,6 +1893,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
/* Choose this FCF record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2837 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, BOOT_ENABLE);
spin_unlock_irq(&phba->hbalock);
@@ -1848,6 +1919,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
if (new_fcf_record->fip_priority < fcf_rec->priority) {
/* Choose the new FCF record with lower priority */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2838 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, 0);
/* Reset running random FCF selection count */
@@ -1857,11 +1934,18 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcf.eligible_fcf_cnt++;
select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
phba->fcf.eligible_fcf_cnt);
- if (select_new_fcf)
+ if (select_new_fcf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2839 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
/* Choose the new FCF by random selection */
__lpfc_update_fcf_record(phba, fcf_rec,
new_fcf_record,
addr_mode, vlan_id, 0);
+ }
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -1871,6 +1955,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* initial best-fit FCF.
*/
if (fcf_rec) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2840 Update current FCF record "
+ "with initial FCF record (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, (boot_flag ?
BOOT_ENABLE : 0));
@@ -1928,12 +2017,23 @@ read_next_fcf:
lpfc_unregister_fcf(phba);
/* Replace in-use record with the new record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2842 Replace the current in-use "
+ "FCF record (x%x) with failover FCF "
+ "record (x%x)\n",
+ phba->fcf.current_rec.fcf_indx,
+ phba->fcf.failover_rec.fcf_indx);
memcpy(&phba->fcf.current_rec,
&phba->fcf.failover_rec,
sizeof(struct lpfc_fcf_rec));
- /* mark the FCF fast failover completed */
+ /*
+ * Mark the fast FCF failover rediscovery completed
+ * and the start of the first round of the roundrobin
+ * FCF failover.
+ */
spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ phba->fcf.fcf_flag &=
+ ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
spin_unlock_irq(&phba->hbalock);
/*
* Set up the initial registered FCF index for FLOGI
@@ -1951,15 +2051,42 @@ read_next_fcf:
if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
(phba->fcf.fcf_flag & FCF_REDISC_PEND))
return;
+
+ if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ /*
+ * In case the current in-use FCF record no
+ * longer existed during FCF discovery that
+ * was not triggered by fast FCF failover
+ * process, treat it as fast FCF failover.
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2841 In-use FCF record (x%x) "
+ "not reported, entering fast "
+ "FCF failover mode scanning.\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ return;
+ }
+
/*
* Otherwise, initial scan or post linkdown rescan,
* register with the best FCF record found so far
* through the FCF scanning process.
*/
- /* mark the initial FCF discovery completed */
+ /*
+ * Mark the initial FCF discovery completed and
+ * the start of the first round of the roundrobin
+ * FCF failover.
+ */
spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ phba->fcf.fcf_flag &=
+ ~(FCF_INIT_DISC | FCF_REDISC_RRU);
spin_unlock_irq(&phba->hbalock);
/*
* Set up the initial registered FCF index for FLOGI
@@ -2033,6 +2160,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
next_fcf_index);
/* Upload new FCF record to the failover FCF record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2834 Update the current FCF record (x%x) "
+ "with the next FCF record (x%x)\n",
+ phba->fcf.failover_rec.fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
spin_lock_irq(&phba->hbalock);
__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
new_fcf_record, addr_mode, vlan_id,
@@ -2050,7 +2182,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2783 FLOGI round robin FCF failover from FCF "
- "(index:x%x) to FCF (index:x%x).\n",
+ "(x%x) to FCF (x%x).\n",
current_fcf_index,
bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
@@ -2084,7 +2216,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto out;
/* If FCF discovery period is over, no need to proceed */
- if (phba->fcf.fcf_flag & FCF_DISCOVERY)
+ if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
goto out;
/* Parse the FCF record from the non-embedded mailbox command */
@@ -2715,11 +2847,35 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
pmb->context1 = NULL;
- /* Good status, call state machine */
- lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+ if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ /* We rcvd a rscn after issuing this
+ * mbox reg login, we may have cycled
+ * back through the state and be
+ * back at reg login state so this
+ * mbox needs to be ignored becase
+ * there is another reg login in
+ * proccess.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock_irq(shost->host_lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba,
+ pmb->u.mb.un.varRegLogin.rpi);
+
+ } else
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(vport, ndlp, pmb,
+ NLP_EVT_CMPL_REG_LOGIN);
+
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
@@ -3427,7 +3583,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
- ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
}
struct lpfc_nodelist *
@@ -3700,6 +3856,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mempool_free(mbox, phba->mbox_mem_pool);
}
lpfc_no_rpi(phba, ndlp);
+
ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_VALID;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -3842,6 +3999,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
kfree(mp);
}
list_del(&mb->list);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba,
+ mb->u.mb.un.varRegLogin.rpi);
mempool_free(mb, phba->mbox_mem_pool);
/* We shall not invoke the lpfc_nlp_put to decrement
* the ndlp reference count as we are in the process
@@ -3883,6 +4043,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+ !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
!(ndlp->nlp_flag & NLP_RPI_VALID)) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
@@ -5180,13 +5341,16 @@ void
lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
{
/*
- * If HBA is not running in FIP mode or if HBA does not support
- * FCoE or if FCF is not registered, do nothing.
+ * If HBA is not running in FIP mode, if HBA does not support
+ * FCoE, if FCF discovery is ongoing, or if FCF has not been
+ * registered, do nothing.
*/
spin_lock_irq(&phba->hbalock);
if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
- !(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+ (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
+ (phba->pport->port_state == LPFC_FLOGI)) {
spin_unlock_irq(&phba->hbalock);
return;
}