summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c543
1 files changed, 498 insertions, 45 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48c37e38ed01..be1a8fcbb1fb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,11 +24,18 @@
*/
char qla2x00_version_str[40];
+static int apidev_major;
+
/*
* SRB allocation cache
*/
static struct kmem_cache *srb_cachep;
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -65,13 +72,19 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging, "
"Default is 0 - no logging. 1 - log errors.");
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+ "Set to control shifting of command type processing "
+ "based on total number of SG elements.");
+
static void qla2x00_free_device(scsi_qla_host_t *);
int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
- "Enables FDMI registratons "
- "Default is 0 - no FDMI. 1 - perfom FDMI.");
+ "Enables FDMI registrations. "
+ "0 - no FDMI. Default is 1 - perform FDMI.");
#define MAX_Q_DEPTH 32
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -79,6 +92,19 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
+/* Do not change the value of this after module load */
+int ql2xenabledif = 1;
+module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenabledif,
+ " Enable T10-CRC-DIF "
+ " Default is 0 - No DIF Support. 1 - Enable it");
+
+int ql2xenablehba_err_chk;
+module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenablehba_err_chk,
+ " Enable T10-CRC-DIF Error isolation by HBA"
+ " Default is 0 - Error isolation disabled, 1 - Enable it");
+
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
@@ -114,6 +140,32 @@ MODULE_PARM_DESC(ql2xetsenable,
"Enables firmware ETS burst."
"Default is 0 - skip ETS enablement.");
+int ql2xdbwr;
+module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+ "Option to specify scheme for request queue posting\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+ "Option to specify reset behaviour\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
+
+int ql2xtargetreset = 1;
+module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xtargetreset,
+ "Enable target reset."
+ "Default is 1 - use hw defaults.");
+
+
+int ql2xasynctmfenable;
+module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xasynctmfenable,
+ "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
+ "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
/*
* SCSI host template entry points
*/
@@ -183,6 +235,10 @@ qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
+ /* Currently used for 82XX only. */
+ if (vha->device_flags & DFLG_DEV_FAILED)
+ return;
+
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -500,6 +556,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (fcport->drport)
goto qc24_target_busy;
+ if (!vha->flags.difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
+ cmd->cmnd[0]));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -618,6 +682,50 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
return (return_status);
}
+/*
+ * qla2x00_wait_for_reset_ready
+ * Wait till the HBA is online after going through
+ * <= MAX_RETRIES_OF_ISP_ABORT or
+ * finally HBA is disabled ie marked offline or flash
+ * operations are in progress.
+ *
+ * Input:
+ * ha - pointer to host adapter structure
+ *
+ * Note:
+ * Does context switching-Release SPIN_LOCK
+ * (if any) before calling this routine.
+ *
+ * Return:
+ * Success (Adapter is online/no flash ops) : 0
+ * Failed (Adapter is offline/disabled/flash ops in progress) : 1
+ */
+int
+qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
+{
+ int return_status;
+ unsigned long wait_online;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->optrom_state != QLA_SWAITING ||
+ ha->dpc_active) && time_before(jiffies, wait_online))
+ msleep(1000);
+
+ if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
+ return_status = QLA_SUCCESS;
+ else
+ return_status = QLA_FUNCTION_FAILED;
+
+ DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
+
+ return return_status;
+}
+
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{
@@ -739,7 +847,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp == NULL)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
+ !IS_PROT_IO(sp))
continue;
if (sp->cmd != cmd)
continue;
@@ -805,7 +914,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !IS_PROT_IO(sp))
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
@@ -834,6 +943,24 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
return status;
}
+void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
+{
+ int cnt;
+ srb_t *sp;
+ struct req_que *req = vha->req;
+
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Waiting for pending commands\n"));
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ sp, WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
+
static char *reset_errors[] = {
"HBA not online",
"HBA not ready",
@@ -1004,7 +1131,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
/*
@@ -1020,11 +1147,19 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
+ if (IS_QLA82XX(vha->hw)) {
+ if (!qla82xx_fcoe_ctx_reset(vha)) {
+ /* Ctx reset success */
+ ret = SUCCESS;
+ goto eh_host_reset_lock;
+ }
+ /* fall thru if ctx reset failed */
+ }
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
@@ -1064,7 +1199,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (ha->flags.enable_target_reset) {
+ if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
continue;
@@ -1078,7 +1213,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@@ -1125,23 +1260,28 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
+ IS_PROT_IO(sp)) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
- del_timer_sync(&ctx->timer);
- ctx->free(sp);
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD) {
+ ctx->u.iocb_cmd->free(sp);
} else {
- struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
- if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ struct fc_bsg_job *bsg_job =
+ ctx->u.bsg_job;
+ if (bsg_job->request->msgcode
+ == FC_BSG_HST_CT)
kfree(sp->fcport);
- sp_bsg->bsg_job->req->errors = 0;
- sp_bsg->bsg_job->reply->result = res;
- sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ bsg_job->req->errors = 0;
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
+ mempool_free(sp,
+ ha->srb_mempool);
}
}
}
@@ -1379,6 +1519,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1414,6 +1555,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1449,6 +1591,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1483,7 +1626,8 @@ static struct isp_operations qla25xx_isp_ops = {
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
- .start_scsi = qla24xx_start_scsi,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1519,6 +1663,43 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla82xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla82xx_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla82xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla82xx_abort_isp,
};
static inline void
@@ -1607,10 +1788,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8021:
+ ha->device_type |= DT_ISP8021;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
}
- /* Get adapter physical port no from interrupt pin register. */
- pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA82XX(ha))
+ ha->port_no = !(ha->portnum & 1);
+ else
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+
if (ha->port_no & 1)
ha->flags.port0 = 1;
else
@@ -1624,6 +1817,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
qla_printk(KERN_WARNING, ha,
@@ -1767,7 +1963,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
}
@@ -1897,6 +2094,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLA82XX(ha)) {
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->isp_ops = &qla82xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
mutex_init(&ha->vport_lock);
@@ -1969,6 +2179,7 @@ que_init:
" pointers\n");
goto probe_init_failed;
}
+
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
rsp->req = req;
@@ -1987,6 +2198,12 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
+ if (IS_QLA82XX(ha)) {
+ req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+ rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+ rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+ }
+
if (qla2x00_initialize_adapter(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");
@@ -1995,6 +2212,14 @@ que_init:
"Adapter flags %x.\n",
base_vha->host_no, base_vha->device_flags));
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ }
+
ret = -ENODEV;
goto probe_failed;
}
@@ -2033,6 +2258,24 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
+ if (IS_QLA25XX(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ base_vha->flags.difdix_supported = 1;
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "Registering for DIF/DIX type 1 and 3"
+ " protection.\n"));
+ scsi_host_set_prot(host,
+ SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+ scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+ } else
+ base_vha->flags.difdix_supported = 0;
+ }
+
+ ha->isp_ops->enable_intrs(ha);
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2040,8 +2283,6 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
- ha->isp_ops->enable_intrs(ha);
-
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2083,9 +2324,17 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (ha->iobase)
- iounmap(ha->iobase);
-
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
ha = NULL;
@@ -2152,11 +2401,17 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
- if (ha->iobase)
- iounmap(ha->iobase);
+ if (IS_QLA82XX(ha)) {
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
- if (ha->mqiobase)
- iounmap(ha->mqiobase);
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -2205,8 +2460,10 @@ qla2x00_free_device(scsi_qla_host_t *vha)
vha->flags.online = 0;
/* turn-off interrupts on the card */
- if (ha->interrupts_on)
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
ha->isp_ops->disable_intrs(ha);
+ }
qla2x00_free_irqs(vha);
@@ -2351,10 +2608,25 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
+ if (IS_QLA82XX(ha)) {
+ /* Allocate cache for CT6 Ctx. */
+ if (!ctx_cachep) {
+ ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+ sizeof(struct ct6_dsd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ctx_cachep)
+ goto fail_free_gid_list;
+ }
+ ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ ctx_cachep);
+ if (!ha->ctx_mempool)
+ goto fail_free_srb_mempool;
+ }
+
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
- goto fail_free_srb_mempool;
+ goto fail_free_ctx_mempool;
snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
ha->pdev->device);
@@ -2363,6 +2635,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DSD_LIST_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dl_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - dl_dma_pool\n");
+ goto fail_s_dma_pool;
+ }
+
+ ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ FCP_CMND_DMA_POOL_SIZE, 8, 0);
+ if (!ha->fcp_cmnd_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ goto fail_dl_dma_pool;
+ }
+ }
+
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
@@ -2429,16 +2719,28 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA8XXX_TYPE(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
}
+ INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
+ /* Get consistent memory allocated for Async Port-Database. */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->async_pd_dma);
+ if (!ha->async_pd)
+ goto fail_async_pd;
+ }
+
INIT_LIST_HEAD(&ha->vp_list);
return 1;
+fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
kfree(ha->npiv_info);
fail_npiv_info:
@@ -2465,11 +2767,24 @@ fail_free_ms_iocb:
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
fail_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
+ }
+fail_dl_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
+ }
+fail_s_dma_pool:
dma_pool_destroy(ha->s_dma_pool);
ha->s_dma_pool = NULL;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
+fail_free_ctx_mempool:
+ mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
@@ -2538,7 +2853,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
if (ha->ex_init_cb)
- dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ dma_pool_free(ha->s_dma_pool,
+ ha->ex_init_cb, ha->ex_init_cb_dma);
+
+ if (ha->async_pd)
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
@@ -2547,14 +2866,39 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
+ if (IS_QLA82XX(ha)) {
+ if (!list_empty(&ha->gbl_dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr,
+ tdsd_ptr, &ha->gbl_dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool,
+ dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+ }
+
+ if (ha->dl_dma_pool)
+ dma_pool_destroy(ha->dl_dma_pool);
+
+ if (ha->fcp_cmnd_dma_pool)
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
- ha->init_cb, ha->init_cb_dma);
+ ha->init_cb, ha->init_cb_dma);
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
ha->srb_mempool = NULL;
+ ha->ctx_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@@ -2567,8 +2911,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->init_cb_dma = 0;
ha->ex_init_cb = NULL;
ha->ex_init_cb_dma = 0;
+ ha->async_pd = NULL;
+ ha->async_pd_dma = 0;
ha->s_dma_pool = NULL;
+ ha->dl_dma_pool = NULL;
+ ha->fcp_cmnd_dma_pool = NULL;
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -2691,6 +3039,8 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
+qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -2760,6 +3110,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_logout_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
+ case QLA_EVT_ASYNC_ADISC:
+ qla2x00_async_adisc(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_ADISC_DONE:
+ qla2x00_async_adisc_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
@@ -2785,9 +3143,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* If the port is not ONLINE then try to login
* to it if we haven't run out of retries.
*/
- if (atomic_read(&fcport->state) !=
- FCS_ONLINE && fcport->login_retry) {
-
+ if (atomic_read(&fcport->state) != FCS_ONLINE &&
+ fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags & FCF_FCP2_DEVICE)
@@ -2798,6 +3155,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->d_id.b.al_pa);
if (IS_ALOGIO_CAPABLE(ha)) {
+ fcport->flags |= FCF_ASYNC_SENT;
data[0] = 0;
data[1] = QLA_LOGIO_LOGIN_RETRIED;
status = qla2x00_post_async_login_work(
@@ -2896,6 +3254,45 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
+ if (IS_QLA82XX(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha,
+ "HW State: FAILED\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
+
+ if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ DEBUG(printk(KERN_INFO
+ "scsi(%ld): dpc: sched "
+ "qla82xx_fcoe_ctx_reset ha = %p\n",
+ base_vha->host_no, ha));
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+ if (qla82xx_fcoe_ctx_reset(base_vha)) {
+ /* FCoE-ctx reset failed.
+ * Escalate to chip-reset
+ */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ DEBUG(printk("scsi(%ld): dpc:"
+ " qla82xx_fcoe_ctx_reset end\n",
+ base_vha->host_no));
+ }
+ }
+
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
@@ -2905,7 +3302,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags);
@@ -3038,11 +3435,31 @@ static void
qla2x00_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, sp->ctx,
+ ((struct crc_context *)sp->ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
CMD_SP(cmd) = NULL;
}
@@ -3053,8 +3470,18 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx = sp->ctx;
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
+ ctx->fcp_cmnd_dma);
+ list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx->dsd_use_cnt;
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -3079,6 +3506,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+
/* Hardware read to raise pending EEH errors during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
@@ -3143,7 +3573,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx)
+ if (sp->ctx && !IS_PROT_IO(sp))
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3193,6 +3623,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
qla2xxx_wake_dpc(vha);
@@ -3202,7 +3634,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 7
+#define FW_BLOBS 8
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3210,6 +3642,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP24XX 4
#define FW_ISP25XX 5
#define FW_ISP81XX 6
+#define FW_ISP82XX 7
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3218,6 +3651,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP24XX "ql2400_fw.bin"
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
+#define FW_FILE_ISP82XX "ql8200_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3229,6 +3663,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP24XX, },
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
+ { .name = FW_FILE_ISP82XX, },
};
struct fw_blob *
@@ -3252,6 +3687,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP25XX];
} else if (IS_QLA81XX(ha)) {
blob = &qla_fw_blobs[FW_ISP81XX];
+ } else if (IS_QLA82XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP82XX];
}
mutex_lock(&qla_fw_lock);
@@ -3392,11 +3829,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
msleep(1000);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
+ if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- pci_cleanup_aer_uncorrect_error_status(pdev);
DEBUG17(qla_printk(KERN_WARNING, ha,
"slot_reset-return:ret=%x\n", ret));
@@ -3420,6 +3856,8 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"from slot/link_reset");
}
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
ha->flags.eeh_busy = 0;
}
@@ -3445,6 +3883,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3460,6 +3899,10 @@ static struct pci_driver qla2xxx_pci_driver = {
.err_handler = &qla2xxx_err_handler,
};
+static struct file_operations apidev_fops = {
+ .owner = THIS_MODULE,
+};
+
/**
* qla2x00_module_init - Module initialization.
**/
@@ -3488,6 +3931,13 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
return -ENODEV;
}
+
+ apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+ if (apidev_major < 0) {
+ printk(KERN_WARNING "qla2xxx: Unable to register char device "
+ "%s\n", QLA2XXX_APIDEV);
+ }
+
qla2xxx_transport_vport_template =
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
@@ -3513,9 +3963,12 @@ qla2x00_module_init(void)
static void __exit
qla2x00_module_exit(void)
{
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ if (ctx_cachep)
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}