From 9af05d90e91026838330a2bf86a6692c86f3c4e7 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Tue, 4 Jan 2011 11:35:41 +0530 Subject: [SCSI] mpt2sas: Revision P MPI Header Update Revision P MPI Header Update: a) Added enable/disable SATA NCQ operations to SAS IO Unit Control Request. b) Modified Host Based Discovery Action Request message format. c) Removed Device Path bit from IO Unit Page 1 Flags field. d) Added description of ChainOffset field for Diagnostic Data Upload Tool.Chaining is not allowed. Removed mpi2_history.txt file Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpi/mpi2.h | 5 +- drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | 6 +- drivers/scsi/mpt2sas/mpi/mpi2_history.txt | 384 ------------------------------ drivers/scsi/mpt2sas/mpi/mpi2_sas.h | 7 +- drivers/scsi/mpt2sas/mpi/mpi2_tool.h | 8 +- 5 files changed, 18 insertions(+), 392 deletions(-) delete mode 100644 drivers/scsi/mpt2sas/mpi/mpi2_history.txt (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 8be75e65f763..a3e60385787f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.16 + * mpi2.h Version: 02.00.17 * * Version History * --------------- @@ -63,6 +63,7 @@ * function codes, 0xF0 to 0xFF. * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -88,7 +89,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x10) +#define MPI2_HEADER_VERSION_UNIT (0x11) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index d76a65847603..f5b9c766e28f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.15 + * mpi2_cnfg.h Version: 02.00.16 * * Version History * --------------- @@ -125,6 +125,8 @@ * define. * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. * -------------------------------------------------------------------------- */ @@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 #define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) #define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) #define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) -#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002) -#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000) /* IO Unit Page 3 */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt deleted file mode 100644 index b1e88f26b748..000000000000 --- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt +++ /dev/null @@ -1,384 +0,0 @@ - ============================== - Fusion-MPT MPI 2.0 Header File Change History - ============================== - - Copyright (c) 2000-2010 LSI Corporation. - - --------------------------------------- - Header Set Release Version: 02.00.14 - Header Set Release Date: 10-28-09 - --------------------------------------- - - Filename Current version Prior version - ---------- --------------- ------------- - mpi2.h 02.00.14 02.00.13 - mpi2_cnfg.h 02.00.13 02.00.12 - mpi2_init.h 02.00.08 02.00.07 - mpi2_ioc.h 02.00.13 02.00.12 - mpi2_raid.h 02.00.04 02.00.04 - mpi2_sas.h 02.00.03 02.00.02 - mpi2_targ.h 02.00.03 02.00.03 - mpi2_tool.h 02.00.04 02.00.04 - mpi2_type.h 02.00.00 02.00.00 - mpi2_ra.h 02.00.00 02.00.00 - mpi2_hbd.h 02.00.00 - mpi2_history.txt 02.00.14 02.00.13 - - - * Date Version Description - * -------- -------- ------------------------------------------------------ - -mpi2.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved ReplyPostHostIndex register to offset 0x6C of the - * MPI2_SYSTEM_INTERFACE_REGS and modified the define for - * MPI2_REPLY_POST_HOST_INDEX_OFFSET. - * Added union of request descriptors. - * Added union of reply descriptors. - * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_VERSION_02_00. - * Fixed the size of the FunctionDependent5 field in the - * MPI2_DEFAULT_REPLY structure. - * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. - * Removed the MPI-defined Fault Codes and extended the - * product specific codes up to 0xEFFF. - * Added a sixth key value for the WriteSequence register - * and changed the flush value to 0x0. - * Added message function codes for Diagnostic Buffer Post - * and Diagnsotic Release. - * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED - * Moved MPI2_VERSION_UNION from mpi2_ioc.h. - * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. - * Added #defines for marking a reply descriptor as unused. - * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved LUN field defines from mpi2_init.h. - * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. - * In all request and reply descriptors, replaced VF_ID - * field with MSIxIndex field. - * Removed DevHandle field from - * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those - * bytes reserved. - * Added RAID Accelerator functionality. - * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MSI-x index mask and shift for Reply Post Host - * Index register. - * Added function code for Host Based Discovery Action. - * -------------------------------------------------------------------------- - -mpi2_cnfg.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. - * Added Manufacturing Page 11. - * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE - * define. - * 06-26-07 02.00.02 Adding generic structure for product-specific - * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. - * Rework of BIOS Page 2 configuration page. - * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the - * forms. - * Added configuration pages IOC Page 8 and Driver - * Persistent Mapping Page 0. - * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated - * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, - * RAID Physical Disk Pages 0 and 1, RAID Configuration - * Page 0). - * Added new value for AccessStatus field of SAS Device - * Page 0 (_SATA_NEEDS_INITIALIZATION). - * 10-31-07 02.00.04 Added missing SEPDevHandle field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for - * NVDATA. - * Modified IOC Page 7 to use masks and added field for - * SASBroadcastPrimitiveMasks. - * Added MPI2_CONFIG_PAGE_BIOS_4. - * Added MPI2_CONFIG_PAGE_LOG_0. - * 02-29-08 02.00.06 Modified various names to make them 32-character unique. - * Added SAS Device IDs. - * Updated Integrated RAID configuration pages including - * Manufacturing Page 4, IOC Page 6, and RAID Configuration - * Page 0. - * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. - * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. - * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. - * Added missing MaxNumRoutedSasAddresses field to - * MPI2_CONFIG_PAGE_EXPANDER_0. - * Added SAS Port Page 0. - * Modified structure layout for - * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. - * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use - * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. - * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF - * to 0x000000FF. - * Added two new values for the Physical Disk Coercion Size - * bits in the Flags field of Manufacturing Page 4. - * Added product-specific Manufacturing pages 16 to 31. - * Modified Flags bits for controlling write cache on SATA - * drives in IO Unit Page 1. - * Added new bit to AdditionalControlFlags of SAS IO Unit - * Page 1 to control Invalid Topology Correction. - * Added SupportedPhysDisks field to RAID Volume Page 1 and - * added related defines. - * Added additional defines for RAID Volume Page 0 - * VolumeStatusFlags field. - * Modified meaning of RAID Volume Page 0 VolumeSettings - * define for auto-configure of hot-swap drives. - * Added PhysDiskAttributes field (and related defines) to - * RAID Physical Disk Page 0. - * Added MPI2_SAS_PHYINFO_PHY_VACANT define. - * Added three new DiscoveryStatus bits for SAS IO Unit - * Page 0 and SAS Expander Page 0. - * Removed multiplexing information from SAS IO Unit pages. - * Added BootDeviceWaitTime field to SAS IO Unit Page 4. - * Removed Zone Address Resolved bit from PhyInfo and from - * Expander Page 0 Flags field. - * Added two new AccessStatus values to SAS Device Page 0 - * for indicating routing problems. Added 3 reserved words - * to this page. - * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. - * Inserted missing reserved field into structure for IOC - * Page 6. - * Added more pending task bits to RAID Volume Page 0 - * VolumeStatusFlags defines. - * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. - * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 - * and SAS Expander Page 0 to flag a downstream initiator - * when in simplified routing mode. - * Removed SATA Init Failure defines for DiscoveryStatus - * fields of SAS IO Unit Page 0 and SAS Expander Page 0. - * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. - * Added PortGroups, DmaGroup, and ControlGroup fields to - * SAS Device Page 0. - * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO - * Unit Page 6. - * Added expander reduced functionality data to SAS - * Expander Page 0. - * Added SAS PHY Page 2 and SAS PHY Page 3. - * 07-30-09 02.00.12 Added IO Unit Page 7. - * Added new device ids. - * Added SAS IO Unit Page 5. - * Added partial and slumber power management capable flags - * to SAS Device Page 0 Flags field. - * Added PhyInfo defines for power condition. - * Added Ethernet configuration pages. - * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. - * Added SAS PHY Page 4 structure and defines. - * -------------------------------------------------------------------------- - -mpi2_init.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. - * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. - * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. - * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. - * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. - * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO - * Control field Task Attribute flags. - * Moved LUN field defines to mpi2.h becasue they are - * common to many structures. - * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to - * Query Asynchronous Event. - * Defined two new bits in the SlotStatus field of the SCSI - * Enclosure Processor Request and Reply. - * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for - * both SCSI IO Error Reply and SCSI Task Management Reply. - * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. - * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. - * -------------------------------------------------------------------------- - -mpi2_ioc.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to - * MaxTargets. - * Added TotalImageSize field to FWDownload Request. - * Added reserved words to FWUpload Request. - * 06-26-07 02.00.02 Added IR Configuration Change List Event. - * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit - * request and replaced it with - * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. - * Replaced the MinReplyQueueDepth field of the IOCFacts - * reply with MaxReplyDescriptorPostQueueDepth. - * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum - * depth for the Reply Descriptor Post Queue. - * Added SASAddress field to Initiator Device Table - * Overflow Event data. - * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING - * for SAS Initiator Device Status Change Event data. - * Modified Reason Code defines for SAS Topology Change - * List Event data, including adding a bit for PHY Vacant - * status, and adding a mask for the Reason Code. - * Added define for - * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. - * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. - * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of - * the IOCFacts Reply. - * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Moved MPI2_VERSION_UNION to mpi2.h. - * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks - * instead of enables, and added SASBroadcastPrimitiveMasks - * field. - * Added Log Entry Added Event and related structure. - * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. - * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. - * Added MaxVolumes and MaxPersistentEntries fields to - * IOCFacts reply. - * Added ProtocalFlags and IOCCapabilities fields to - * MPI2_FW_IMAGE_HEADER. - * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. - * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to - * a U16 (from a U32). - * Removed extra 's' from EventMasks name. - * 06-27-08 02.00.08 Fixed an offset in a comment. - * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. - * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and - * renamed MinReplyFrameSize to ReplyFrameSize. - * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. - * Added two new RAIDOperation values for Integrated RAID - * Operations Status Event data. - * Added four new IR Configuration Change List Event data - * ReasonCode values. - * Added two new ReasonCode defines for SAS Device Status - * Change Event data. - * Added three new DiscoveryStatus bits for the SAS - * Discovery event data. - * Added Multiplexing Status Change bit to the PhyStatus - * field of the SAS Topology Change List event data. - * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. - * BootFlags are now product-specific. - * Added defines for the indivdual signature bytes - * for MPI2_INIT_IMAGE_FOOTER. - * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. - * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR - * define. - * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE - * define. - * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. - * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. - * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. - * Added two new reason codes for SAS Device Status Change - * Event. - * Added new event: SAS PHY Counter. - * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. - * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Added new product id family for 2208. - * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. - * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. - * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. - * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. - * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. - * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. - * Added Host Based Discovery Phy Event data. - * Added defines for ProductID Product field - * (MPI2_FW_HEADER_PID_). - * Modified values for SAS ProductID Family - * (MPI2_FW_HEADER_PID_FAMILY_). - * -------------------------------------------------------------------------- - -mpi2_raid.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Modifications to RAID Action request and reply, - * including the Actions and ActionData. - * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. - * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that - * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT - * can be sized by the build environment. - * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of - * VolumeCreationFlags and marked the old one as obsolete. - * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. - * -------------------------------------------------------------------------- - -mpi2_sas.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit - * Control Request. - * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control - * Request. - * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST - * to MPI2_SGE_IO_UNION since it supports chained SGLs. - * 05-12-10 02.00.04 Modified some comments. - * -------------------------------------------------------------------------- - -mpi2_targ.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to - * BufferPostFlags field of CommandBufferPostBase Request. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 10-02-08 02.00.03 Removed NextCmdBufferOffset from - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * Target Status Send Request only takes a single SGE for - * response data. - * -------------------------------------------------------------------------- - -mpi2_tool.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release - * structures and defines. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. - * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request - * and reply messages. - * Added MPI2_DIAG_BUF_TYPE_EXTENDED. - * Incremented MPI2_DIAG_BUF_TYPE_COUNT. - * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. - * -------------------------------------------------------------------------- - -mpi2_type.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * -------------------------------------------------------------------------- - -mpi2_ra.h - * 05-06-09 02.00.00 Initial version. - * -------------------------------------------------------------------------- - -mpi2_hbd.h - * 10-28-09 02.00.00 Initial version. - * -------------------------------------------------------------------------- - - -mpi2_history.txt Parts list history - -Filename 02.00.14 02.00.13 02.00.12 ----------- -------- -------- -------- -mpi2.h 02.00.14 02.00.13 02.00.12 -mpi2_cnfg.h 02.00.13 02.00.12 02.00.11 -mpi2_init.h 02.00.08 02.00.07 02.00.07 -mpi2_ioc.h 02.00.13 02.00.12 02.00.11 -mpi2_raid.h 02.00.04 02.00.04 02.00.03 -mpi2_sas.h 02.00.03 02.00.02 02.00.02 -mpi2_targ.h 02.00.03 02.00.03 02.00.03 -mpi2_tool.h 02.00.04 02.00.04 02.00.03 -mpi2_type.h 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.00 - -Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 -mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06 -mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03 -mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06 -mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02 -mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - -Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 -mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index 608f6d6e6fca..fdffde1ebc0f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h @@ -6,7 +6,7 @@ * Title: MPI Serial Attached SCSI structures and definitions * Creation Date: February 9, 2007 * - * mpi2_sas.h Version: 02.00.04 + * mpi2_sas.h Version: 02.00.05 * * Version History * --------------- @@ -21,6 +21,7 @@ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST * to MPI2_SGE_IO_UNION since it supports chained SGLs. * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. * -------------------------------------------------------------------------- */ @@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST U32 Reserved4; /* 0x14 */ U32 DataLength; /* 0x18 */ U8 CommandFIS[20]; /* 0x1C */ - MPI2_SGE_IO_UNION SGL; /* 0x20 */ + MPI2_SGE_IO_UNION SGL; /* 0x30 */ } MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; @@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST #define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) #define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) #define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) #define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) /* values for the PrimFlags field */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 5c6e3a67bb94..2a4bceda364b 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -6,7 +6,7 @@ * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.05 + * mpi2_tool.h Version: 02.00.06 * * Version History * --------------- @@ -23,6 +23,8 @@ * Added MPI2_DIAG_BUF_TYPE_EXTENDED. * Incremented MPI2_DIAG_BUF_TYPE_COUNT. * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. * -------------------------------------------------------------------------- */ @@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST /* count of the number of buffer types */ #define MPI2_DIAG_BUF_TYPE_COUNT (0x03) +/* values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + /**************************************************************************** * Diagnostic Buffer Post reply -- cgit v1.2.3 From fb396bec76187aae1a0eaf5b7f21327b8cb34fec Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Tue, 4 Jan 2011 11:36:37 +0530 Subject: [SCSI] mpt2sas: Add support for Customer specific branding messages Add support for Customer specific branding messages when device driver loads, based on specific customer subsystem vendor and device Ids Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 26 ++++++++++++++++++++++++++ drivers/scsi/mpt2sas/mpt2sas_base.h | 14 ++++++++++++++ 2 files changed, 40 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index b2a817055b8b..a08e1beaa03c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1724,6 +1724,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc) ioc->pdev->subsystem_device); } +/** + * _base_display_intel_branding - Display branding string + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL && + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) { + + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RMS2LL080_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL080_BRANDING); + break; + case MPT2SAS_INTEL_RMS2LL040_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL040_BRANDING); + break; + } + } +} + /** * _base_display_ioc_capabilities - Disply IOC's capabilities. * @ioc: per adapter object @@ -1754,6 +1779,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) ioc->bios_pg3.BiosVersion & 0x000000FF); _base_display_dell_branding(ioc); + _base_display_intel_branding(ioc); printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 283568c6fb04..82579fbca9e3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -153,6 +153,20 @@ #define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 #define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 +/* + * Intel HBA branding + */ +#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ + "Intel Integrated RAID Module RMS2LL080" +#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ + "Intel Integrated RAID Module RMS2LL040" + +/* + * Intel HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F + /* * per target private data */ -- cgit v1.2.3 From d5bd3491c8c30b98f642cab4361ff1a43955ccd4 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Tue, 4 Jan 2011 11:39:20 +0530 Subject: [SCSI] mpt2sas: Basic Code Cleanup in mpt2sas_base Basic Code Cleanup: (1) _base_get_cb_idx and mpt2sas_base_free_smid were reorganized in similar fashion so the order of obtaining the cbx and smid are scsiio, hi_priority, and internal. (2) The hi_priority and internal request queue struct was made smaller by removing the scmd and chain_tracker, thus saving memory allocation. (3) For scsiio request, a new structure was created having the same elements from the former request tracker struct. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 102 ++++++++++++++++++------------------ drivers/scsi/mpt2sas/mpt2sas_base.h | 22 ++++++-- 2 files changed, 68 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index a08e1beaa03c..36008abab259 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -752,20 +752,19 @@ static u8 _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) { int i; - u8 cb_idx = 0xFF; - - if (smid >= ioc->hi_priority_smid) { - if (smid < ioc->internal_smid) { - i = smid - ioc->hi_priority_smid; - cb_idx = ioc->hpr_lookup[i].cb_idx; - } else if (smid <= ioc->hba_queue_depth) { - i = smid - ioc->internal_smid; - cb_idx = ioc->internal_lookup[i].cb_idx; - } - } else { + u8 cb_idx; + + if (smid < ioc->hi_priority_smid) { i = smid - 1; cb_idx = ioc->scsi_lookup[i].cb_idx; - } + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } else + cb_idx = 0xFF; return cb_idx; } @@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx, struct scsi_cmnd *scmd) { unsigned long flags; - struct request_tracker *request; + struct scsiio_tracker *request; u16 smid; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); @@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx, } request = list_entry(ioc->free_list.next, - struct request_tracker, tracker_list); + struct scsiio_tracker, tracker_list); request->scmd = scmd; request->cb_idx = cb_idx; smid = request->smid; @@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) struct chain_tracker *chain_req, *next; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - if (smid >= ioc->hi_priority_smid) { - if (smid < ioc->internal_smid) { - /* hi-priority */ - i = smid - ioc->hi_priority_smid; - ioc->hpr_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->hpr_lookup[i].tracker_list, - &ioc->hpr_free_list); - } else { - /* internal queue */ - i = smid - ioc->internal_smid; - ioc->internal_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->internal_lookup[i].tracker_list, - &ioc->internal_free_list); + if (smid < ioc->hi_priority_smid) { + /* scsiio queue */ + i = smid - 1; + if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { + list_for_each_entry_safe(chain_req, next, + &ioc->scsi_lookup[i].chain_list, tracker_list) { + list_del_init(&chain_req->tracker_list); + list_add_tail(&chain_req->tracker_list, + &ioc->free_chain_list); + } } + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].scmd = NULL; + list_add_tail(&ioc->scsi_lookup[i].tracker_list, + &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - return; - } - /* scsiio queue */ - i = smid - 1; - if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { - list_for_each_entry_safe(chain_req, next, - &ioc->scsi_lookup[i].chain_list, tracker_list) { - list_del_init(&chain_req->tracker_list); - list_add_tail(&chain_req->tracker_list, - &ioc->free_chain_list); + /* + * See _wait_for_commands_to_complete() call with regards + * to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; } + return; + } else if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); } - ioc->scsi_lookup[i].cb_idx = 0xFF; - ioc->scsi_lookup[i].scmd = NULL; - list_add_tail(&ioc->scsi_lookup[i].tracker_list, - &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - - /* - * See _wait_for_commands_to_complete() call with regards to this code. - */ - if (ioc->shost_recovery && ioc->pending_io_count) { - if (ioc->pending_io_count == 1) - wake_up(&ioc->reset_wq); - ioc->pending_io_count--; - } } /** @@ -2278,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->name, (unsigned long long) ioc->request_dma)); total_sz += sz; - sz = ioc->scsiio_depth * sizeof(struct request_tracker); + sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); ioc->scsi_lookup_pages = get_order(sz); - ioc->scsi_lookup = (struct request_tracker *)__get_free_pages( + ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( GFP_KERNEL, ioc->scsi_lookup_pages); if (!ioc->scsi_lookup) { printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, " diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 82579fbca9e3..445f1cf9f68f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -101,7 +101,8 @@ #define MPT_NAME_LENGTH 32 /* generic length of strings */ #define MPT_STRING_LENGTH 64 -#define MPT_MAX_CALLBACKS 16 +#define MPT_MAX_CALLBACKS 16 + #define CAN_SLEEP 1 #define NO_SLEEP 0 @@ -445,14 +446,14 @@ struct chain_tracker { }; /** - * struct request_tracker - firmware request tracker + * struct scsiio_tracker - scsi mf request tracker * @smid: system message id * @scmd: scsi request pointer * @cb_idx: callback index * @chain_list: list of chains associated to this IO * @tracker_list: list of free request (ioc->free_list) */ -struct request_tracker { +struct scsiio_tracker { u16 smid; struct scsi_cmnd *scmd; u8 cb_idx; @@ -460,6 +461,19 @@ struct request_tracker { struct list_head tracker_list; }; +/** + * struct request_tracker - misc mf request tracker + * @smid: system message id + * @scmd: scsi request pointer + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + /** * struct _tr_list - target reset list * @handle: device handle @@ -723,7 +737,7 @@ struct MPT2SAS_ADAPTER { u8 *request; dma_addr_t request_dma; u32 request_dma_sz; - struct request_tracker *scsi_lookup; + struct scsiio_tracker *scsi_lookup; ulong scsi_lookup_pages; spinlock_t scsi_lookup_lock; struct list_head free_list; -- cgit v1.2.3 From aa023b8b2fc09baeeb27af959ab38a8d684946bf Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Tue, 4 Jan 2011 11:40:59 +0530 Subject: [SCSI] mpt2sas: Bump version 08.100.00.00 Upgrade driver version from 7.100.00.00 to 8.100.00.00 Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 445f1cf9f68f..a3f8aa9baea4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "07.100.00.00" -#define MPT2SAS_MAJOR_VERSION 07 +#define MPT2SAS_DRIVER_VERSION "08.100.00.00" +#define MPT2SAS_MAJOR_VERSION 08 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 -- cgit v1.2.3 From 7c66e9a5e6b0459c619562c5ad321071e2767faa Mon Sep 17 00:00:00 2001 From: Joseph Gruher Date: Wed, 5 Jan 2011 16:00:20 -0500 Subject: [SCSI] scsi_dh_alua: fix submit_stpg return submit_stpg() will always return failure so alua_activate() will report failure via dm-multipath callback function. Even though the stpg fired successfuly dm-multipath does not know and always fails to change the valid path. By returning SCSI_DH_OK we're now skipping alua_activate()'s call to activate_complete 'fn'. But this is fine because stpg_endio() will call it via h->callback_fn(). Signed-off-by: Joseph Gruher Signed-off-by: Ilgu Hong Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6b729324b8d3..afb1d058edbd 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -303,7 +303,6 @@ done: static unsigned submit_stpg(struct alua_dh_data *h) { struct request *rq; - int err = SCSI_DH_RES_TEMP_UNAVAIL; int stpg_len = 8; struct scsi_device *sdev = h->sdev; @@ -332,7 +331,7 @@ static unsigned submit_stpg(struct alua_dh_data *h) rq->end_io_data = h; blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); - return err; + return SCSI_DH_OK; } /* -- cgit v1.2.3 From ed0f36bc5719b25659b637f80ceea85494b84502 Mon Sep 17 00:00:00 2001 From: Joseph Gruher Date: Wed, 5 Jan 2011 16:00:21 -0500 Subject: [SCSI] scsi_dh_alua: fix deadlock in stpg_endio The use of blk_execute_rq_nowait() implies __blk_put_request() is needed in stpg_endio() rather than blk_put_request() -- blk_finish_request() is called with queue lock already held. Signed-off-by: Joseph Gruher Signed-off-by: Ilgu Hong Signed-off-by: Mike Snitzer Cc: stable@kernel.org Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index afb1d058edbd..ba3c6491929f 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -285,7 +285,8 @@ static void stpg_endio(struct request *req, int error) print_alua_state(h->state)); } done: - blk_put_request(req); + req->end_io_data = NULL; + __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; -- cgit v1.2.3 From 9349923d3f83bea1ba287a851c3f6737c6513039 Mon Sep 17 00:00:00 2001 From: Joseph Gruher Date: Wed, 5 Jan 2011 16:00:22 -0500 Subject: [SCSI] scsi_dh_alua: fix stpg_endio group state reporting Initialize stpg_endio() 'err' to SCSI_DH_OK and only change it to SCSI_DH_IO accordingly. This allows the switching of target group state to be properly reported when no error has occurred. Signed-off-by: Joseph Gruher Signed-off-by: Ilgu Hong Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index ba3c6491929f..9bd2c8a69a41 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error) { struct alua_dh_data *h = req->end_io_data; struct scsi_sense_hdr sense_hdr; - unsigned err = SCSI_DH_IO; + unsigned err = SCSI_DH_OK; if (error || host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE) + msg_byte(req->errors) != COMMAND_COMPLETE) { + err = SCSI_DH_IO; goto done; + } - if (err == SCSI_DH_IO && h->senselen > 0) { + if (h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) { -- cgit v1.2.3 From a3b1eff70b35a8e3679ae8cd928a222fa38022c0 Mon Sep 17 00:00:00 2001 From: Ilgu Hong Date: Wed, 5 Jan 2011 16:00:23 -0500 Subject: [SCSI] scsi_dh_alua: Add Promise VTrak to dev list Adds Promise VTrak devices to the ALUA device handler. Signed-off-by: Ilgu Hong Signed-off-by: Joseph Gruher Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 9bd2c8a69a41..c1fd09d693f4 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -733,6 +733,7 @@ static const struct scsi_dh_devlist alua_dev_list[] = { {"Intel", "Multi-Flex"}, {"NETAPP", "LUN"}, {"AIX", "NVDISK"}, + {"Promise", "VTrak"}, {NULL, NULL} }; -- cgit v1.2.3 From 5fd1062fdfde39bbc0092ce91e7eedee1007eb96 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 5 Jan 2011 16:00:24 -0500 Subject: [SCSI] scsi_dh_alua: add scalable ONTAP lun to dev list Currently NetApp's VID/PID details in the INQUIRY response shows up as 'NETAPP' and 'LUN'. With upcoming scalable SAN ONTAP version on NetApp controllers, the PID entry alone is being modified to 'LUN C-Mode' (to distinguish current ONTAP LUNs from scalable ONTAP LUNs). 'LUN' would still suffice for matching 'LUN C-Mode' but best to explicitly add these new NetApp LUNs to the device list. Reported-by: Martin George Acked-by: Mike Christie Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index c1fd09d693f4..5b6f9ab10d7a 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -732,6 +732,7 @@ static const struct scsi_dh_devlist alua_dev_list[] = { {"Pillar", "Axiom" }, {"Intel", "Multi-Flex"}, {"NETAPP", "LUN"}, + {"NETAPP", "LUN C-Mode"}, {"AIX", "NVDISK"}, {"Promise", "VTrak"}, {NULL, NULL} -- cgit v1.2.3 From b03a7771c81a0d5f026250c8cd4091d9ee767fdc Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:47:48 -0600 Subject: [SCSI] hpsa: defend against zero sized buffers in passthru ioctls Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 12deffccb8da..e4b5f3cda82e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2433,15 +2433,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; - } - if (iocommand.Request.Type.Direction == XFER_WRITE) { - /* Copy the data into the buffer we created */ - if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { - kfree(buff); - return -EFAULT; + if (iocommand.Request.Type.Direction == XFER_WRITE) { + /* Copy the data into the buffer we created */ + if (copy_from_user(buff, iocommand.buf, + iocommand.buf_size)) { + kfree(buff); + return -EFAULT; + } + } else { + memset(buff, 0, iocommand.buf_size); } - } else - memset(buff, 0, iocommand.buf_size); + } c = cmd_special_alloc(h); if (c == NULL) { kfree(buff); @@ -2487,8 +2489,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) cmd_special_free(h, c); return -EFAULT; } - - if (iocommand.Request.Type.Direction == XFER_READ) { + if (iocommand.Request.Type.Direction == XFER_READ && + iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); @@ -2581,14 +2583,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; - - if (ioc->buf_size > 0) { - c->Header.SGList = sg_used; - c->Header.SGTotal = sg_used; - } else { - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } + c->Header.SGList = c->Header.SGTotal = sg_used; memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); c->Header.Tag.lower = c->busaddr; memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); @@ -2605,7 +2600,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } } hpsa_scsi_do_simple_cmd_core(h, c); - hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); + if (sg_used) + hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); @@ -2614,7 +2610,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = -EFAULT; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_READ) { + if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { -- cgit v1.2.3 From d896f3f3d129f1e2fbb4e3824242bc0dc2fb1a07 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:47:53 -0600 Subject: [SCSI] hpsa: fixup DMA address before freeing. Some low bits might have been set by the driver, causing a message like this to come out: [ 13.288062] ------------[ cut here ]------------ [ 13.293211] WARNING: at lib/dma-debug.c:803 check_unmap+0x1a1/0x654() [ 13.300387] Hardware name: ProLiant DL180 G6 [ 13.305335] hpsa 0000:06:00.0: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000007f81e001] [size=640 bytes] Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- drivers/scsi/hpsa_cmd.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index e4b5f3cda82e..e2089a3b5061 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2267,7 +2267,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) pci_free_consistent(h->pdev, sizeof(*c->err_info), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(*c), - c, (dma_addr_t) c->busaddr); + c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); } #ifdef CONFIG_COMPAT diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index f5c4c3cc0530..7910c1411156 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -265,6 +265,7 @@ struct ErrorInfo { #define DIRECT_LOOKUP_SHIFT 5 #define DIRECT_LOOKUP_BIT 0x10 +#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) #define HPSA_ERROR_BIT 0x02 struct ctlr_info; /* defined in hpsa.h */ -- cgit v1.2.3 From 922a9e4da34270d81e216728f929c6e11e169794 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:47:58 -0600 Subject: [SCSI] hpsa: Remove duplicate defines of DIRECT_LOOKUP_ constants They are defined in hpsa_cmd.h Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index e2089a3b5061..b345cc476940 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2863,13 +2863,11 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag) static inline u32 hpsa_tag_contains_index(u32 tag) { -#define DIRECT_LOOKUP_BIT 0x10 return tag & DIRECT_LOOKUP_BIT; } static inline u32 hpsa_tag_to_index(u32 tag) { -#define DIRECT_LOOKUP_SHIFT 5 return tag >> DIRECT_LOOKUP_SHIFT; } -- cgit v1.2.3 From fe5389c87f13c16cd77d976801c93422d0c05a49 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:03 -0600 Subject: [SCSI] hpsa: fix board status waiting code After a reset, we should first wait for the board to become "not ready", and then wait for it to become "ready", instead of immediately waiting for it to become "ready", and do this waiting *after* restoring PCI config space registers. Also, only wait 10 secs for board to become "not ready" after a reset (it should quickly become not ready.) Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 44 ++++++++++++++++++++++++++++++++++++-------- drivers/scsi/hpsa.h | 4 ++++ 2 files changed, 40 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index b345cc476940..cfd30adc3f18 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -173,6 +173,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); +static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, + void __iomem *vaddr, int wait_for_ready); +#define BOARD_NOT_READY 0 +#define BOARD_READY 1 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); @@ -3237,6 +3241,20 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); + /* Wait for board to become not ready, then ready. */ + dev_info(&pdev->dev, "Waiting for board to become ready.\n"); + rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); + if (rc) + dev_warn(&pdev->dev, + "failed waiting for board to become not ready\n"); + rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); + if (rc) { + dev_warn(&pdev->dev, + "failed waiting for board to become ready\n"); + goto unmap_cfgtable; + } + dev_info(&pdev->dev, "board ready.\n"); + /* Controller should be in simple mode at this point. If it's not, * It means we're on one of those controllers which doesn't support * the doorbell reset method and on which the PCI power management reset @@ -3432,18 +3450,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, return -ENODEV; } -static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h) +static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, + void __iomem *vaddr, int wait_for_ready) { - int i; + int i, iterations; u32 scratchpad; + if (wait_for_ready) + iterations = HPSA_BOARD_READY_ITERATIONS; + else + iterations = HPSA_BOARD_NOT_READY_ITERATIONS; - for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { - scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); - if (scratchpad == HPSA_FIRMWARE_READY) - return 0; + for (i = 0; i < iterations; i++) { + scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); + if (wait_for_ready) { + if (scratchpad == HPSA_FIRMWARE_READY) + return 0; + } else { + if (scratchpad != HPSA_FIRMWARE_READY) + return 0; + } msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); } - dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); + dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } @@ -3635,7 +3663,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h) err = -ENOMEM; goto err_out_free_res; } - err = hpsa_wait_for_board_ready(h); + err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = hpsa_find_cfgtables(h); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 19586e189f0f..074d237f4497 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -154,12 +154,16 @@ struct ctlr_info { * HPSA_BOARD_READY_ITERATIONS are derived from those. */ #define HPSA_BOARD_READY_WAIT_SECS (120) +#define HPSA_BOARD_NOT_READY_WAIT_SECS (10) #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL \ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) #define HPSA_BOARD_READY_ITERATIONS \ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ HPSA_BOARD_READY_POLL_INTERVAL_MSECS) +#define HPSA_BOARD_NOT_READY_ITERATIONS \ + ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ + HPSA_BOARD_READY_POLL_INTERVAL_MSECS) #define HPSA_POST_RESET_PAUSE_MSECS (3000) #define HPSA_POST_RESET_NOOP_RETRIES (12) -- cgit v1.2.3 From 270d05de2b8d82df4ed19955f6c0c7400f6ffbf5 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:08 -0600 Subject: [SCSI] hpsa: Use kernel provided PCI state save and restore functions and use the doorbell reset method if available (which doesn't lock up the controller if you properly save and restore all the PCI registers that you're supposed to.) Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 81 ++++++++++------------------------------------------- 1 file changed, 15 insertions(+), 66 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index cfd30adc3f18..82b94e2c745b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3053,38 +3053,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) #define hpsa_noop(p) hpsa_message(p, 3, 0) -static __devinit int hpsa_reset_msi(struct pci_dev *pdev) -{ -/* the #defines are stolen from drivers/pci/msi.h. */ -#define msi_control_reg(base) (base + PCI_MSI_FLAGS) -#define PCI_MSIX_FLAGS_ENABLE (1 << 15) - - int pos; - u16 control = 0; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSI_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI\n"); - pci_write_config_word(pdev, msi_control_reg(pos), - control & ~PCI_MSI_FLAGS_ENABLE); - } - } - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSIX_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI-X\n"); - pci_write_config_word(pdev, msi_control_reg(pos), - control & ~PCI_MSIX_FLAGS_ENABLE); - } - } - - return 0; -} - static int hpsa_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, bool use_doorbell) { @@ -3140,17 +3108,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, */ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) { - u16 saved_config_space[32]; u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support, active_transport; - int rc, i; + int rc; struct CfgTable __iomem *cfgtable; bool use_doorbell; u32 board_id; + u16 command_register; /* For controllers as old as the P600, this is very nearly * the same thing as @@ -3160,14 +3128,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * - * but we can't use these nice canned kernel routines on - * kexec, because they also check the MSI/MSI-X state in PCI - * configuration space and do the wrong thing when it is - * set/cleared. Also, the pci_save/restore_state functions - * violate the ordering requirements for restoring the - * configuration space from the CCISS document (see the - * comment below). So we roll our own .... - * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. @@ -3184,9 +3144,13 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) if (board_id == 0x409C0E11 || board_id == 0x409D0E11) return -ENOTSUPP; - for (i = 0; i < 32; i++) - pci_read_config_word(pdev, 2*i, &saved_config_space[i]); - + /* Save the PCI command register */ + pci_read_config_word(pdev, 4, &command_register); + /* Turn the board off. This is so that later pci_restore_state() + * won't turn the board on before the rest of config space is ready. + */ + pci_disable_device(pdev); + pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = hpsa_pci_find_memory_BAR(pdev, &paddr); @@ -3212,30 +3176,17 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; - /* The doorbell reset seems to cause lockups on some Smart - * Arrays (e.g. P410, P410i, maybe others). Until this is - * fixed or at least isolated, avoid the doorbell reset. - */ - use_doorbell = 0; - rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; - /* Restore the PCI configuration space. The Open CISS - * Specification says, "Restore the PCI Configuration - * Registers, offsets 00h through 60h. It is important to - * restore the command register, 16-bits at offset 04h, - * last. Do not restore the configuration status register, - * 16-bits at offset 06h." Note that the offset is 2*i. - */ - for (i = 0; i < 32; i++) { - if (i == 2 || i == 3) - continue; - pci_write_config_word(pdev, 2*i, saved_config_space[i]); + pci_restore_state(pdev); + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "failed to enable device.\n"); + goto unmap_cfgtable; } - wmb(); - pci_write_config_word(pdev, 4, saved_config_space[2]); + pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ @@ -3732,8 +3683,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) return 0; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; - if (hpsa_reset_msi(pdev)) - return -ENODEV; /* Now try to get the controller to respond to a no-op */ for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { -- cgit v1.2.3 From 72ceeaecb748dff3d35b10d7518bed651b895f3e Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:13 -0600 Subject: [SCSI] hpsa: limit commands allocated on reset_devices This is to conserve memory in a memory-limited kdump scenario Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 82b94e2c745b..688b24333a6f 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3470,6 +3470,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); + + /* Limit commands in memory limited kdump scenario. */ + if (reset_devices && h->max_commands > 32) + h->max_commands = 32; + if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " -- cgit v1.2.3 From 25c1e56a04e60af2414f8d81eda0fd10b8e7b961 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:18 -0600 Subject: [SCSI] hpsa: do not reset unknown boards on reset_devices This is to prevent hpsa from resetting older boards which the cciss driver may be controlling. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 688b24333a6f..5b9cd41d9320 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3140,7 +3140,11 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * likely not be happy. Just forbid resetting this conjoined mess. * The 640x isn't really supported by hpsa anyway. */ - hpsa_lookup_board_id(pdev, &board_id); + rc = hpsa_lookup_board_id(pdev, &board_id); + if (rc < 0) { + dev_warn(&pdev->dev, "Not resetting device.\n"); + return -ENODEV; + } if (board_id == 0x409C0E11 || board_id == 0x409D0E11) return -ENOTSUPP; -- cgit v1.2.3 From 6eaf46fdc719991a3ccda1e14b274e9adb515978 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:24 -0600 Subject: [SCSI] hpsa: take the adapter lock in hpsa_wait_for_mode_change_ack Need to take the lock while accessing the register to check to see if config table changes have taken effect. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5b9cd41d9320..4fb62c2aac05 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3553,13 +3553,18 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; + u32 doorbell_value; + unsigned long flags; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!doorbell_value & CFGTBL_ChangeReq) break; /* delay and try again */ msleep(10); @@ -3731,6 +3736,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->busy_initializing = 1; INIT_HLIST_HEAD(&h->cmpQ); INIT_HLIST_HEAD(&h->reqQ); + spin_lock_init(&h->lock); + spin_lock_init(&h->scan_lock); rc = hpsa_pci_init(h); if (rc != 0) goto clean1; @@ -3790,8 +3797,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, } if (hpsa_allocate_sg_chain_blocks(h)) goto clean4; - spin_lock_init(&h->lock); - spin_lock_init(&h->scan_lock); init_waitqueue_head(&h->scan_wait_queue); h->scan_finished = 1; /* no scan currently in progress */ -- cgit v1.2.3 From 02ec19c82e87e3748d326ca5e15e7ddb18c73476 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:29 -0600 Subject: [SCSI] hpsa: allow driver to put controller in either simple or performant mode Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4fb62c2aac05..10076f1868e0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -74,6 +74,10 @@ static int hpsa_allow_any; module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_allow_any, "Allow hpsa driver to access unknown HP Smart Array hardware"); +static int hpsa_simple_mode; +module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(hpsa_simple_mode, + "Use 'simple mode' rather than 'performant mode'"); /* define the PCI info for the cards we can control */ static const struct pci_device_id hpsa_pci_device_id[] = { @@ -4038,6 +4042,9 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; + if (hpsa_simple_mode) + return; + trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return; -- cgit v1.2.3 From 60d3f5b068e65d424f3cf5d108fb0747dd156d00 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:34 -0600 Subject: [SCSI] hpsa: use usleep_range not msleep for small sleeps Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 10076f1868e0..ddd729e2ea89 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3571,7 +3571,7 @@ static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) if (!doorbell_value & CFGTBL_ChangeReq) break; /* delay and try again */ - msleep(10); + usleep_range(10000, 20000); } } -- cgit v1.2.3 From 94a136495a3fbe59b960c46fba3574b1159e8489 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 6 Jan 2011 14:48:39 -0600 Subject: [SCSI] hpsa: Add a per controller commands_outstanding entry in /sys Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ddd729e2ea89..c255f46e6405 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -159,6 +159,8 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t host_show_commands_outstanding(struct device *dev, + struct device_attribute *attr, char *buf); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -188,6 +190,8 @@ static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); +static DEVICE_ATTR(commands_outstanding, S_IRUGO, + host_show_commands_outstanding, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, @@ -199,6 +203,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = { static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_rescan, &dev_attr_firmware_revision, + &dev_attr_commands_outstanding, NULL, }; @@ -299,6 +304,15 @@ static ssize_t host_show_firmware_revision(struct device *dev, fwrev[0], fwrev[1], fwrev[2], fwrev[3]); } +static ssize_t host_show_commands_outstanding(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ctlr_info *h = shost_to_hba(shost); + + return snprintf(buf, 20, "%d\n", h->commands_outstanding); +} + /* Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct hlist_head *list, struct CommandList *c) { -- cgit v1.2.3 From c4f8a299d04bd083643ba93e982ab910219dd1f0 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Fri, 7 Jan 2011 10:55:43 -0600 Subject: [SCSI] hpsa: fix use of uninitialized variable in hpsa_add_msa2xxx_enclosure_device() Thanks to Scott Teel for noticing this. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c255f46e6405..c6c13b0c68eb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1617,6 +1617,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ return 0; + memset(scsi3addr, 0, 8); + scsi3addr[3] = target; if (is_hba_lunid(scsi3addr)) return 0; /* Don't add the RAID controller here. */ @@ -1631,8 +1633,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, return 0; } - memset(scsi3addr, 0, 8); - scsi3addr[3] = target; if (hpsa_update_device_info(h, scsi3addr, this_device)) return 0; (*nmsa2xxx_enclosures)++; -- cgit v1.2.3 From 1d5e2ed0805bf426226b7daa54b3e60af78baa10 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Fri, 7 Jan 2011 10:55:48 -0600 Subject: [SCSI] hpsa: Fix problem that CMD_UNABORTABLE command status was treated as unknown Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c6c13b0c68eb..5828bcb82964 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1152,6 +1152,10 @@ static void complete_scsi_command(struct CommandList *cp, cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); break; + case CMD_UNABORTABLE: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "Command unabortable\n"); + break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", @@ -1317,6 +1321,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp) case CMD_TIMEOUT: dev_warn(d, "cp %p timed out\n", cp); break; + case CMD_UNABORTABLE: + dev_warn(d, "Command unabortable\n"); + break; default: dev_warn(d, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); -- cgit v1.2.3 From 938abd8449c27fc67203e1a7c350199cea1158da Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Fri, 7 Jan 2011 10:55:53 -0600 Subject: [SCSI] hpsa: avoid leaking stack contents to userland memset arg64 to zero in the passthrough ioctls to avoid leaking contents of kernel stack memory to userland via uninitialized padding fields inserted by the compiler for alignment reasons. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5828bcb82964..959eeb202d92 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2310,6 +2310,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); @@ -2346,6 +2347,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); -- cgit v1.2.3 From c343a01cca2a4182b89d976e203a54ec0ea5446b Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Fri, 7 Jan 2011 14:45:39 -0800 Subject: [SCSI] cxgbi: set ulpmode only if digest is on There is no need to set ulpmode on the tx skbs if no digest is enabled. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/libcxgbi.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d2ad3d676724..6de6a6f3f1e8 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1908,13 +1908,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) { - u8 submode = 0; - - if (hcrc) - submode |= 1; - if (dcrc) - submode |= 2; - cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; + if (hcrc || dcrc) { + u8 submode = 0; + + if (hcrc) + submode |= 1; + if (dcrc) + submode |= 2; + cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; + } else + cxgbi_skcb_ulp_mode(skb) = 0; } int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, -- cgit v1.2.3 From b8ce8b59b8d64ee0d864706b9d3cca2a9b314bf5 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Fri, 7 Jan 2011 14:45:39 -0800 Subject: [SCSI] cxgbi: get rid of gl_skb in cxgbi_ddp_info Remove gl_skb from cxgbi_ddp_info as it is only used by cxgb3i. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 51 ++++++-------------------------------- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2 -- drivers/scsi/cxgbi/libcxgbi.c | 15 +---------- drivers/scsi/cxgbi/libcxgbi.h | 3 --- 4 files changed, 8 insertions(+), 63 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index a129a170b47b..e2362b97f329 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1108,10 +1108,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, csk, idx, npods, gl); for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + PPOD_SIZE, 0, GFP_ATOMIC); - /* hold on to the skb until we clear the ddp mapping */ - skb_get(skb); + if (!skb) + return -ENOMEM; ulp_mem_io_set_hdr(skb, pm_addr); cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + @@ -1136,56 +1137,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, cdev, idx, npods, tag); for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + PPOD_SIZE, 0, GFP_ATOMIC); if (!skb) { - pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n", + pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", tag, idx, i, npods); continue; } - ddp->gl_skb[idx] = NULL; - memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE); ulp_mem_io_set_hdr(skb, pm_addr); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(cdev->lldev, skb); } } -static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt) -{ - int i; - - log_debug(1 << CXGBI_DBG_DDP, - "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); - - for (i = 0; i < cnt; i++, idx++) - if (ddp->gl_skb[idx]) { - kfree_skb(ddp->gl_skb[idx]); - ddp->gl_skb[idx] = NULL; - } -} - -static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, - int cnt, gfp_t gfp) -{ - int i; - - log_debug(1 << CXGBI_DBG_DDP, - "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); - - for (i = 0; i < cnt; i++) { - struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, gfp); - if (skb) - ddp->gl_skb[idx + i] = skb; - else { - ddp_free_gl_skb(ddp, idx, i); - return -ENOMEM; - } - } - return 0; -} - static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, int pg_idx, bool reply) { @@ -1316,8 +1281,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) } tdev->ulp_iscsi = ddp; - cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb; - cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set = ddp_set_map; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 8c04fada710b..5b1f0785dafd 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1425,8 +1425,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) cxgbi_ddp_page_size_factor(pgsz_factor); cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); - cdev->csk_ddp_free_gl_skb = NULL; - cdev->csk_ddp_alloc_gl_skb = NULL; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set = ddp_set_map; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 6de6a6f3f1e8..b2acdef3dcb7 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1277,12 +1277,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, return idx; } - if (cdev->csk_ddp_alloc_gl_skb) { - err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); - if (err < 0) - goto unmark_entries; - } - tag = cxgbi_ddp_tag_base(tformat, sw_tag); tag |= idx << PPOD_IDX_SHIFT; @@ -1293,11 +1287,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, hdr.page_offset = htonl(gl->offset); err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); - if (err < 0) { - if (cdev->csk_ddp_free_gl_skb) - cdev->csk_ddp_free_gl_skb(ddp, idx, npods); + if (err < 0) goto unmark_entries; - } ddp->idx_last = idx; log_debug(1 << CXGBI_DBG_DDP, @@ -1363,8 +1354,6 @@ static void ddp_destroy(struct kref *kref) >> PPOD_PAGES_SHIFT; pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); kfree(gl); - if (cdev->csk_ddp_free_gl_skb) - cdev->csk_ddp_free_gl_skb(ddp, i, npods); i += npods; } else i++; @@ -1407,8 +1396,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev, return -ENOMEM; } ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); - ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + - ppmax * sizeof(struct cxgbi_gather_list *)); cdev->ddp = ddp; spin_lock_init(&ddp->map_lock); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index c57d59db000c..23cbc5854503 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -131,7 +131,6 @@ struct cxgbi_ddp_info { unsigned int rsvd_tag_mask; spinlock_t map_lock; struct cxgbi_gather_list **gl_map; - struct sk_buff **gl_skb; }; #define DDP_PGIDX_MAX 4 @@ -536,8 +535,6 @@ struct cxgbi_device { struct cxgbi_ddp_info *ddp; void (*dev_ddp_cleanup)(struct cxgbi_device *); - void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); - int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, unsigned int, unsigned int, struct cxgbi_gather_list *); -- cgit v1.2.3 From 70e14722718ea3fc66400924f7a99c7d8c62b8ff Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Sat, 8 Jan 2011 18:00:24 -0800 Subject: [SCSI] bnx2i: Added reconnect fix connecting against Lefthand targets The nopout's reserved field was not being initialized to zero before being reused. Stale CDB values from previous SCSI cmds of the same BHS offset was the cause of the disconnection initiated by the Lefthand target. Signed-off-by: Eddie Wai Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_hwi.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 96505e3ab986..603db9d64956 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -490,6 +490,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; nopout_hdr = (struct iscsi_nopout *)task->hdr; nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; + + memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request)); + nopout_wqe->op_code = nopout_hdr->opcode; nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); -- cgit v1.2.3 From efee0bd9b272b0dae1c7fb1bb0be75606c8fa6f8 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 26 Dec 2010 22:14:01 +0100 Subject: [SCSI] mptfusion: Fix memory leak in mptctl_getiocinfo() A 'kfree(karg)' is missing in a failure path in mptctl.c::mptctl_getiocinfo() which can cause a memory leak. Signed-off-by: Jesper Juhl Acked-by: "Desai, Kashyap" Signed-off-by: James Bottomley --- drivers/message/fusion/mptctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90aef..1dcc13a52c7e 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -1307,8 +1307,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; - if (karg->hdr.port > 1) + if (karg->hdr.port > 1) { + kfree(karg); return -EINVAL; + } port = karg->hdr.port; karg->port = port; -- cgit v1.2.3 From 716163ff908be72f6b74752ad0796cc7c00b047a Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Mon, 10 Jan 2011 16:45:07 -0800 Subject: [SCSI] cxgb3i: fixed connection problem with iscsi private ip fixed the connection problem when the private iscsi ipv4 address is provisioned on the interface. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h index 5f5e3394b594..20593fd69d8f 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h @@ -24,10 +24,21 @@ extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; -#define cxgb3i_get_private_ipv4addr(ndev) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) -#define cxgb3i_set_private_ipv4addr(ndev, addr) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr +static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev) +{ + return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr; +} + +static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev, + unsigned int addr) +{ + struct port_info *pi = (struct port_info *)netdev_priv(ndev); + + pi->iscsic.flags = addr ? 1 : 0; + pi->iscsi_ipv4addr = addr; + if (addr) + memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN); +} struct cpl_iscsi_hdr_norss { union opcode_tid ot; -- cgit v1.2.3 From 38a039be2e7bda32517642ecfce54c9317292a9c Mon Sep 17 00:00:00 2001 From: Peter Jones Date: Thu, 6 Jan 2011 15:31:29 -0500 Subject: [SCSI] Add scsi_dev_info_list_del_keyed() For scsi_dh.c to use devinfo lists, we have to be able to remove entries before rmmod. Signed-off-by: James Bottomley --- drivers/scsi/scsi_devinfo.c | 85 +++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/scsi_priv.h | 1 + 2 files changed, 86 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 43fad4c09beb..82e9e5c0476e 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -381,6 +381,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, } EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); +/** + * scsi_dev_info_list_del_keyed - remove one dev_info list entry. + * @vendor: vendor string + * @model: model (product) string + * @key: specify list to use + * + * Description: + * Remove and destroy one dev_info entry for @vendor, @model + * in list specified by @key. + * + * Returns: 0 OK, -error on failure. + **/ +int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key) +{ + struct scsi_dev_info_list *devinfo, *found = NULL; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + + if (IS_ERR(devinfo_table)) + return PTR_ERR(devinfo_table); + + list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, + dev_info_list) { + if (devinfo->compatible) { + /* + * Behave like the older version of get_device_flags. + */ + size_t max; + /* + * XXX why skip leading spaces? If an odd INQUIRY + * value, that should have been part of the + * scsi_static_device_list[] entry, such as " FOO" + * rather than "FOO". Since this code is already + * here, and we don't know what device it is + * trying to work with, leave it as-is. + */ + max = 8; /* max length of vendor */ + while ((max > 0) && *vendor == ' ') { + max--; + vendor++; + } + /* + * XXX removing the following strlen() would be + * good, using it means that for a an entry not in + * the list, we scan every byte of every vendor + * listed in scsi_static_device_list[], and never match + * a single one (and still have to compare at + * least the first byte of each vendor). + */ + if (memcmp(devinfo->vendor, vendor, + min(max, strlen(devinfo->vendor)))) + continue; + /* + * Skip spaces again. + */ + max = 16; /* max length of model */ + while ((max > 0) && *model == ' ') { + max--; + model++; + } + if (memcmp(devinfo->model, model, + min(max, strlen(devinfo->model)))) + continue; + found = devinfo; + } else { + if (!memcmp(devinfo->vendor, vendor, + sizeof(devinfo->vendor)) && + !memcmp(devinfo->model, model, + sizeof(devinfo->model))) + found = devinfo; + } + if (found) + break; + } + + if (found) { + list_del(&found->dev_info_list); + kfree(found); + return 0; + } + + return -ENOENT; +} +EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); + /** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index b4056d14f812..ce9e0adc8df8 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -56,6 +56,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, int flags, int key); +extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key); extern int scsi_dev_info_add_list(int key, const char *name); extern int scsi_dev_info_remove_list(int key); -- cgit v1.2.3 From 940d7faa4818f386fcdf1b7266ec7b62bf07a7d0 Mon Sep 17 00:00:00 2001 From: Peter Jones Date: Thu, 6 Jan 2011 15:38:24 -0500 Subject: [SCSI] scsi_dh: Use scsi_devinfo functions to do matching of device_handler tables. Previously we were using strncmp in order to avoid having to include whitespace in the devlist, but this means "HSV1000" matches a device list entry that says "HSV100", which is wrong. This patch changes scsi_dh.c to use scsi_devinfo's matching functions instead, since they handle these cases correctly. Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh.c | 112 +++++++++++----------------------- drivers/scsi/scsi_priv.h | 1 + 2 files changed, 38 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index b837c5b3c8f9..564e6ecd17c2 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -25,16 +25,9 @@ #include #include "../scsi_priv.h" -struct scsi_dh_devinfo_list { - struct list_head node; - char vendor[9]; - char model[17]; - struct scsi_device_handler *handler; -}; - static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(scsi_dh_list); -static LIST_HEAD(scsi_dh_dev_list); +static int scsi_dh_list_idx = 1; static struct scsi_device_handler *get_device_handler(const char *name) { @@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name) return found; } - -static struct scsi_device_handler * -scsi_dh_cache_lookup(struct scsi_device *sdev) +static struct scsi_device_handler *get_device_handler_by_idx(int idx) { - struct scsi_dh_devinfo_list *tmp; - struct scsi_device_handler *found_dh = NULL; + struct scsi_device_handler *tmp, *found = NULL; spin_lock(&list_lock); - list_for_each_entry(tmp, &scsi_dh_dev_list, node) { - if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) && - !strncmp(sdev->model, tmp->model, strlen(tmp->model))) { - found_dh = tmp->handler; + list_for_each_entry(tmp, &scsi_dh_list, list) { + if (tmp->idx == idx) { + found = tmp; break; } } spin_unlock(&list_lock); - - return found_dh; -} - -static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh, - struct scsi_device *sdev) -{ - int i, found = 0; - - for(i = 0; scsi_dh->devlist[i].vendor; i++) { - if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor, - strlen(scsi_dh->devlist[i].vendor)) && - !strncmp(sdev->model, scsi_dh->devlist[i].model, - strlen(scsi_dh->devlist[i].model))) { - found = 1; - break; - } - } return found; } @@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh, struct scsi_device *sdev) { struct scsi_device_handler *found_dh = NULL; - struct scsi_dh_devinfo_list *tmp; + int idx; - found_dh = scsi_dh_cache_lookup(sdev); - if (found_dh) - return found_dh; + idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model, + SCSI_DEVINFO_DH); + found_dh = get_device_handler_by_idx(idx); - if (scsi_dh) { - if (scsi_dh_handler_lookup(scsi_dh, sdev)) - found_dh = scsi_dh; - } else { - struct scsi_device_handler *tmp_dh; - - spin_lock(&list_lock); - list_for_each_entry(tmp_dh, &scsi_dh_list, list) { - if (scsi_dh_handler_lookup(tmp_dh, sdev)) - found_dh = tmp_dh; - } - spin_unlock(&list_lock); - } - - if (found_dh) { /* If device is found, add it to the cache */ - tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); - if (tmp) { - strncpy(tmp->vendor, sdev->vendor, 8); - strncpy(tmp->model, sdev->model, 16); - tmp->vendor[8] = '\0'; - tmp->model[16] = '\0'; - tmp->handler = found_dh; - spin_lock(&list_lock); - list_add(&tmp->node, &scsi_dh_dev_list); - spin_unlock(&list_lock); - } else { - found_dh = NULL; - } - } + if (scsi_dh && found_dh != scsi_dh) + found_dh = NULL; return found_dh; } @@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data) */ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) { + int i; + if (get_device_handler(scsi_dh->name)) return -EBUSY; spin_lock(&list_lock); + scsi_dh->idx = scsi_dh_list_idx++; list_add(&scsi_dh->list, &scsi_dh_list); spin_unlock(&list_lock); + + for (i = 0; scsi_dh->devlist[i].vendor; i++) { + scsi_dev_info_list_add_keyed(0, + scsi_dh->devlist[i].vendor, + scsi_dh->devlist[i].model, + NULL, + scsi_dh->idx, + SCSI_DEVINFO_DH); + } + bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); @@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler); */ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) { - struct scsi_dh_devinfo_list *tmp, *pos; + int i; if (!get_device_handler(scsi_dh->name)) return -ENODEV; @@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_remove); + for (i = 0; scsi_dh->devlist[i].vendor; i++) { + scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor, + scsi_dh->devlist[i].model, + SCSI_DEVINFO_DH); + } + spin_lock(&list_lock); list_del(&scsi_dh->list); - list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) { - if (pos->handler == scsi_dh) { - list_del(&pos->node); - kfree(pos); - } - } spin_unlock(&list_lock); printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); @@ -576,6 +533,10 @@ static int __init scsi_dh_init(void) { int r; + r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler"); + if (r) + return r; + r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb); if (!r) @@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void) bus_for_each_dev(&scsi_bus_type, NULL, NULL, scsi_dh_sysfs_attr_remove); bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb); + scsi_dev_info_remove_list(SCSI_DEVINFO_DH); } module_init(scsi_dh_init); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index ce9e0adc8df8..9868afdb24e2 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) enum { SCSI_DEVINFO_GLOBAL = 0, SCSI_DEVINFO_SPI, + SCSI_DEVINFO_DH, }; extern int scsi_get_device_flags(struct scsi_device *sdev, -- cgit v1.2.3 From 75c0b3867b3d3ed17142888cd4f334fbee20e3f7 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:16:24 -0600 Subject: [SCSI] libsas: fix ATAPI check condition termination ATAPI check condition needs to be treated the same as a success or protocol return. The register returns from the PACKET command are all correctly positioned in the device to host register FIS and so we should collect them properly. Right at the moment this doesn't matter because libata sends a request sense always for ATAPI errors, but if it ever checked the registers, we should have the correct contents just in case. Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e1a395b438ee..4a6e203861d2 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) case SAS_SG_ERR: return AC_ERR_INVALID; - case SAM_STAT_CHECK_CONDITION: case SAS_OPEN_TO: case SAS_OPEN_REJECT: SAS_DPRINTK("%s: Saw error %d. What to do?\n", __func__, ts->stat); return AC_ERR_OTHER; + case SAM_STAT_CHECK_CONDITION: case SAS_ABORTED_TASK: return AC_ERR_DEV; @@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task) sas_ha = dev->port->ha; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); - if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) { + if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || + ((stat->stat == SAM_STAT_CHECK_CONDITION && + dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); dev->sata_dev.sstatus = resp->sstatus; dev->sata_dev.serror = resp->serror; dev->sata_dev.scontrol = resp->scontrol; - } else if (stat->stat != SAM_STAT_GOOD) { + } else { ac = sas_to_ata_err(stat); if (ac) { SAS_DPRINTK("%s: SAS error %x\n", __func__, -- cgit v1.2.3 From 96db6fa992570bfa46da8428fa466ee6b18e39d6 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:19:00 -0600 Subject: [SCSI] libsas: convert to standard kernel debugging Instead of using a config option for debugging, just dump the messages with KERN_DEBUG. Signed-off-by: James Bottomley --- drivers/scsi/libsas/Kconfig | 8 -------- drivers/scsi/libsas/Makefile | 4 ---- drivers/scsi/libsas/sas_dump.c | 4 ---- drivers/scsi/libsas/sas_dump.h | 12 ------------ drivers/scsi/libsas/sas_internal.h | 6 +----- 5 files changed, 1 insertion(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig index 18f33cd54411..9dafe64e7c7a 100644 --- a/drivers/scsi/libsas/Kconfig +++ b/drivers/scsi/libsas/Kconfig @@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP Allows sas hosts to receive SMP frames. Selecting this option builds an SMP interpreter into libsas. Say N here if you want to save the few kb this consumes. - -config SCSI_SAS_LIBSAS_DEBUG - bool "Compile the SAS Domain Transport Attributes in debug mode" - default y - depends on SCSI_SAS_LIBSAS - help - Compiles the SAS Layer in debug mode. In debug mode, the - SAS Layer prints diagnostic and debug messages. diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile index 1ad1323c60fa..566a10024598 100644 --- a/drivers/scsi/libsas/Makefile +++ b/drivers/scsi/libsas/Makefile @@ -21,10 +21,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA -ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y) - EXTRA_CFLAGS += -DSAS_DEBUG -endif - obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o libsas-y += sas_init.o \ sas_phy.o \ diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c index c17c25030f1c..fc460933575c 100644 --- a/drivers/scsi/libsas/sas_dump.c +++ b/drivers/scsi/libsas/sas_dump.c @@ -24,8 +24,6 @@ #include "sas_dump.h" -#ifdef SAS_DEBUG - static const char *sas_hae_str[] = { [0] = "HAE_RESET", }; @@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port) SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode); SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys); } - -#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h index 47b45d4f5258..800e4c69093f 100644 --- a/drivers/scsi/libsas/sas_dump.h +++ b/drivers/scsi/libsas/sas_dump.h @@ -24,19 +24,7 @@ #include "sas_internal.h" -#ifdef SAS_DEBUG - void sas_dprint_porte(int phyid, enum port_event pe); void sas_dprint_phye(int phyid, enum phy_event pe); void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he); void sas_dump_port(struct asd_sas_port *port); - -#else /* SAS_DEBUG */ - -static inline void sas_dprint_porte(int phyid, enum port_event pe) { } -static inline void sas_dprint_phye(int phyid, enum phy_event pe) { } -static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha, - enum ha_event he) { } -static inline void sas_dump_port(struct asd_sas_port *port) { } - -#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 0001374bd6b2..8b538bd1ff2b 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -33,11 +33,7 @@ #define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) -#ifdef SAS_DEBUG -#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) -#else -#define SAS_DPRINTK(fmt, ...) -#endif +#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__) #define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) -- cgit v1.2.3 From 1e34c8387380269b9d7707d625aeb9e9e92233f0 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:34:25 -0600 Subject: [SCSI] libsas: remove spurious sata control register read/write Originally, libata required the illusion that it could access the sata control register. Now, however, it can run perfectly well without them, so remove the dummy routines from libsas which tried to emulate them (but only ended up causing confusion). Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 51 ------------------------------------------- 1 file changed, 51 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 4a6e203861d2..c2a5cc75b407 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -301,55 +301,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc) } } -static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in, - u32 val) -{ - struct domain_device *dev = link->ap->private_data; - - SAS_DPRINTK("STUB %s\n", __func__); - switch (sc_reg_in) { - case SCR_STATUS: - dev->sata_dev.sstatus = val; - break; - case SCR_CONTROL: - dev->sata_dev.scontrol = val; - break; - case SCR_ERROR: - dev->sata_dev.serror = val; - break; - case SCR_ACTIVE: - dev->sata_dev.ap->link.sactive = val; - break; - default: - return -EINVAL; - } - return 0; -} - -static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, - u32 *val) -{ - struct domain_device *dev = link->ap->private_data; - - SAS_DPRINTK("STUB %s\n", __func__); - switch (sc_reg_in) { - case SCR_STATUS: - *val = dev->sata_dev.sstatus; - return 0; - case SCR_CONTROL: - *val = dev->sata_dev.scontrol; - return 0; - case SCR_ERROR: - *val = dev->sata_dev.serror; - return 0; - case SCR_ACTIVE: - *val = dev->sata_dev.ap->link.sactive; - return 0; - default: - return -EINVAL; - } -} - static struct ata_port_operations sas_sata_ops = { .phy_reset = sas_ata_phy_reset, .post_internal_cmd = sas_ata_post_internal, @@ -359,8 +310,6 @@ static struct ata_port_operations sas_sata_ops = { .qc_fill_rtf = sas_ata_qc_fill_rtf, .port_start = ata_sas_port_start, .port_stop = ata_sas_port_stop, - .scr_read = sas_ata_scr_read, - .scr_write = sas_ata_scr_write }; static struct ata_port_info sata_port_info = { -- cgit v1.2.3 From 4977c82504f58d7579acabd21688183eaa8768fb Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 23 Jan 2011 17:53:24 +0200 Subject: [SCSI] libosd: osd_req_read_sg, optimize the single entry case Since sg-read is a bidi operation, it is a gain to convert a single sg entry into a regular read. Better do this in the generic layer then force each caller to do so. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index b37c8a3c1bb0..86afb13f1e79 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or, const struct osd_sg_entry *sglist, unsigned numentries) { u64 len; - int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); + u64 off; + int ret; - if (ret) - return ret; - osd_req_read(or, obj, 0, bio, len); + if (numentries > 1) { + off = 0; + ret = _add_sg_continuation_descriptor(or, sglist, numentries, + &len); + if (ret) + return ret; + } else { + /* Optimize the case of single segment, read_sg is a + * bidi operation. + */ + len = sglist->len; + off = sglist->offset; + } + osd_req_read(or, obj, off, bio, len); return 0; } -- cgit v1.2.3 From a684b8da35a429a246ec2a91e2742bdff5209709 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jan 2011 14:57:28 +0100 Subject: [SCSI] remove flush_scheduled_work() usages Simple conversions to drop flush_scheduled_work() usages in drivers/scsi. More involved ones will be done in separate patches. * NCR5380, megaraid_sas: cancel_delayed_work() + flush_scheduled_work() -> cancel_delayed_work_sync(). * mpt2sas_scsih: drop unnecessary flush_scheduled_work(). * arcmsr_hba, ipr, pmcraid: flush the used work explicitly instead of using flush_scheduled_work(). Signed-off-by: Tejun Heo Signed-off-by: James Bottomley --- drivers/scsi/NCR5380.c | 3 +-- drivers/scsi/arcmsr/arcmsr_hba.c | 4 ++-- drivers/scsi/ipr.c | 2 +- drivers/scsi/megaraid/megaraid_sas_base.c | 6 ++---- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 1 - drivers/scsi/pmcraid.c | 2 +- 6 files changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 9a5629f94f95..e7cd2fcbe036 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - cancel_delayed_work(&hostdata->coroutine); - flush_scheduled_work(); + cancel_delayed_work_sync(&hostdata->coroutine); } /** diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 1cadcd6b7da6..d6d17a1a41df 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1022,7 +1022,7 @@ static void arcmsr_remove(struct pci_dev *pdev) int poll_count = 0; arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); - flush_scheduled_work(); + flush_work_sync(&acb->arcmsr_do_message_isr_bh); del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); @@ -1068,7 +1068,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev) (struct AdapterControlBlock *)host->hostdata; del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); - flush_scheduled_work(); + flush_work_sync(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9c5c8be72231..a8e24f06b8d8 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -8865,7 +8865,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); - flush_scheduled_work(); + flush_work_sync(&ioa_cfg->work_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5d6d07bd1cd0..e7c9b41def7b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4242,9 +4242,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work( + cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); - flush_scheduled_work(); instance->ev = NULL; } @@ -4417,9 +4416,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work( + cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); - flush_scheduled_work(); instance->ev = NULL; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index eda347c57979..b375150a379b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -6935,7 +6935,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) u32 device_state; mpt2sas_base_stop_watchdog(ioc); - flush_scheduled_work(); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 321cf3ae8630..bcf858e88c64 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev) pmcraid_shutdown(pdev); pmcraid_disable_interrupts(pinstance, ~0); - flush_scheduled_work(); + flush_work_sync(&pinstance->worker_q); pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); -- cgit v1.2.3 From 429305e4650c5d3395c21ca183455a3f3e3568af Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jan 2011 14:57:29 +0100 Subject: [SCSI] pm8001: simplify workqueue usage pm8001 manages its own list of pending works and cancel them on device free. It is unnecessarily complex and has a race condition - the works are canceled but not synced, so the work could still be running during and after the data structures are freed. This patch simplifies workqueue usage. * A driver specific workqueue pm8001_wq is created to serve these work items. * To avoid confusion, the "queue" suffixes are dropped from work items and functions. * Delayed queueing was never used. pm8001_work now uses work_struct instead. * The driver no longer keeps track of pending works. All pm8001_works are queued to pm8001_wq and the workqueue is flushed as necessary. flush_scheduled_work() usage is removed during conversion. Signed-off-by: Tejun Heo Acked-by: Jack Wang Signed-off-by: James Bottomley --- drivers/scsi/pm8001/pm8001_hwi.c | 37 +++++++++++++++++-------------------- drivers/scsi/pm8001/pm8001_init.c | 27 ++++++++++++++++++--------- drivers/scsi/pm8001/pm8001_sas.h | 10 ++++++---- 3 files changed, 41 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index d8db0137c0c7..18b6c55cd08c 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_queue(struct work_struct *work) +static void pm8001_work_fn(struct work_struct *work) { - struct delayed_work *dw = container_of(work, struct delayed_work, work); - struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q); + struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; - struct domain_device *dev; + struct domain_device *dev; - switch (wq->handler) { + switch (pw->handler) { case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_IN_ERROR: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_NON_OPERATIONAL: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; } - list_del(&wq->entry); - kfree(wq); + kfree(pw); } static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { - struct pm8001_wq *wq; + struct pm8001_work *pw; int ret = 0; - wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC); - if (wq) { - wq->pm8001_ha = pm8001_ha; - wq->data = data; - wq->handler = handler; - INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue); - list_add_tail(&wq->entry, &pm8001_ha->wq_list); - schedule_delayed_work(&wq->work_q, 0); + pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); + if (pw) { + pw->pm8001_ha = pm8001_ha; + pw->data = data; + pw->handler = handler; + INIT_WORK(&pw->work, pm8001_work_fn); + queue_work(pm8001_wq, &pw->work); } else ret = -ENOMEM; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index b95285f3383f..002360da01e3 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -51,6 +51,8 @@ static int pm8001_id; LIST_HEAD(hba_list); +struct workqueue_struct *pm8001_wq; + /** * The main structure which LLDD must register for scsi core. */ @@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, static void pm8001_free(struct pm8001_hba_info *pm8001_ha) { int i; - struct pm8001_wq *wq; if (!pm8001_ha) return; @@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); if (pm8001_ha->shost) scsi_host_put(pm8001_ha->shost); - list_for_each_entry(wq, &pm8001_ha->wq_list, entry) - cancel_delayed_work(&wq->work_q); + flush_workqueue(pm8001_wq); kfree(pm8001_ha->tags); kfree(pm8001_ha); } @@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost) pm8001_ha->sas = sha; pm8001_ha->shost = shost; pm8001_ha->id = pm8001_id++; - INIT_LIST_HEAD(&pm8001_ha->wq_list); pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); #ifdef PM8001_USE_TASKLET @@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) int i , pos; u32 device_state; pm8001_ha = sha->lldd_ha; - flush_scheduled_work(); + flush_workqueue(pm8001_wq); scsi_block_requests(pm8001_ha->shost); pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { @@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = { */ static int __init pm8001_init(void) { - int rc; + int rc = -ENOMEM; + + pm8001_wq = alloc_workqueue("pm8001", 0, 0); + if (!pm8001_wq) + goto err; + pm8001_id = 0; pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); if (!pm8001_stt) - return -ENOMEM; + goto err_wq; rc = pci_register_driver(&pm8001_pci_driver); if (rc) - goto err_out; + goto err_tp; return 0; -err_out: + +err_tp: sas_release_transport(pm8001_stt); +err_wq: + destroy_workqueue(pm8001_wq); +err: return rc; } @@ -888,6 +896,7 @@ static void __exit pm8001_exit(void) { pci_unregister_driver(&pm8001_pci_driver); sas_release_transport(pm8001_stt); + destroy_workqueue(pm8001_wq); } module_init(pm8001_init); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 7f064f9ca828..bdb6b27dedd6 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -379,18 +380,16 @@ struct pm8001_hba_info { #ifdef PM8001_USE_TASKLET struct tasklet_struct tasklet; #endif - struct list_head wq_list; u32 logging_level; u32 fw_status; const struct firmware *fw_image; }; -struct pm8001_wq { - struct delayed_work work_q; +struct pm8001_work { + struct work_struct work; struct pm8001_hba_info *pm8001_ha; void *data; int handler; - struct list_head entry; }; struct pm8001_fw_image_header { @@ -460,6 +459,9 @@ struct fw_control_ex { void *param3; }; +/* pm8001 workqueue */ +extern struct workqueue_struct *pm8001_wq; + /******************** function prototype *********************/ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); -- cgit v1.2.3 From 98cb7e4413d189cd2b54daf993a4667d9788c0bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Wed, 19 Jan 2011 10:01:14 +0100 Subject: [SCSI] megaraid_sas: Sanity check user supplied length before passing it to dma_alloc_coherent() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ioc->sgl[i].iov_len value is supplied by the ioctl caller, and can be zero in some cases. Assume that's valid and continue without error. Fixes (multiple individual reports of the same problem for quite a while): http://marc.info/?l=linux-ide&m=128941801715301 http://bugs.debian.org/604627 http://www.mail-archive.com/linux-poweredge@dell.com/msg02575.html megasas: Failed to alloc kernel SGL buffer for IOCTL and [ 69.162538] ------------[ cut here ]------------ [ 69.162806] kernel BUG at /build/buildd/linux-2.6.32/lib/swiotlb.c:368! [ 69.163134] invalid opcode: 0000 [#1] SMP [ 69.163570] last sysfs file: /sys/devices/system/cpu/cpu3/cache/index2/shared_cpu_map [ 69.163975] CPU 0 [ 69.164227] Modules linked in: fbcon tileblit font bitblit softcursor vga16fb vgastate ioatdma radeon ttm drm_kms_helper shpchp drm i2c_algo_bit lp parport floppy pata_jmicron megaraid_sas igb dca [ 69.167419] Pid: 1206, comm: smartctl Tainted: G W 2.6.32-25-server #45-Ubuntu X8DTN [ 69.167843] RIP: 0010:[] [] map_single+0x255/0x260 [ 69.168370] RSP: 0018:ffff88081c0ebc58 EFLAGS: 00010246 [ 69.168655] RAX: 000000000003bffc RBX: 00000000ffffffff RCX: 0000000000000002 [ 69.169000] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88001dffe000 [ 69.169346] RBP: ffff88081c0ebcb8 R08: 0000000000000000 R09: ffff880000030840 [ 69.169691] R10: 0000000000100000 R11: 0000000000000000 R12: 0000000000000000 [ 69.170036] R13: 00000000ffffffff R14: 0000000000000001 R15: 0000000000200000 [ 69.170382] FS: 00007fb8de189720(0000) GS:ffff88001de00000(0000) knlGS:0000000000000000 [ 69.170794] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 69.171094] CR2: 00007fb8dd59237c CR3: 000000081a790000 CR4: 00000000000006f0 [ 69.171439] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 69.171784] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 69.172130] Process smartctl (pid: 1206, threadinfo ffff88081c0ea000, task ffff88081a760000) [ 69.194513] Stack: [ 69.205788] 0000000000000034 00000002817e3390 0000000000000000 ffff88081c0ebe00 [ 69.217739] <0> 0000000000000000 000000000003bffc 0000000000000000 0000000000000000 [ 69.241250] <0> 0000000000000000 00000000ffffffff ffff88081c5b4080 ffff88081c0ebe00 [ 69.277310] Call Trace: [ 69.289278] [] swiotlb_alloc_coherent+0xec/0x130 [ 69.301118] [] x86_swiotlb_alloc_coherent+0x61/0x70 [ 69.313045] [] megasas_mgmt_fw_ioctl+0x1ae/0x690 [megaraid_sas] [ 69.336399] [] megasas_mgmt_ioctl_fw+0x198/0x240 [megaraid_sas] [ 69.359346] [] megasas_mgmt_ioctl+0x35/0x50 [megaraid_sas] [ 69.370902] [] vfs_ioctl+0x22/0xa0 [ 69.382322] [] ? alloc_fd+0x10a/0x150 [ 69.393622] [] do_vfs_ioctl+0x81/0x410 [ 69.404696] [] ? do_page_fault+0x153/0x3b0 [ 69.415761] [] sys_ioctl+0x81/0xa0 [ 69.426640] [] system_call_fastpath+0x16/0x1b [ 69.437491] Code: fe ff ff 48 8b 3d 74 38 76 00 41 bf 00 00 20 00 e8 51 f5 d7 ff 83 e0 ff 48 05 ff 07 00 00 48 c1 e8 0b 48 89 45 c8 e9 13 fe ff ff <0f> 0b eb fe 0f 1f 80 00 00 00 00 55 48 89 e5 48 83 ec 20 4c 89 [ 69.478216] RIP [] map_single+0x255/0x260 [ 69.489668] RSP [ 69.500975] ---[ end trace 6a2181b634e2abc7 ]--- Reported-by: Bokhan Artem Reported by: Marc-Christian Petersen Signed-off-by: Bjørn Mork Cc: "Benz, Michael" Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e7c9b41def7b..b71caa3f44ea 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4609,6 +4609,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { + if (!ioc->sgl[i].iov_len) + continue; + kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); -- cgit v1.2.3 From 7a1e9d829f8bd821466c5ea834ad6f378740d2be Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 25 Jan 2011 11:52:17 -0500 Subject: [SCSI] scsi_dh_hp_sw: fix deadlock in start_stop_endio The use of blk_execute_rq_nowait() implies __blk_put_request() is needed in start_stop_endio() rather than blk_put_request() -- blk_finish_request() is called with queue lock already held. Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index e3916641e627..d0363c8fa348 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error) } } done: - blk_put_request(req); + req->end_io_data = NULL; + __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; -- cgit v1.2.3 From 63583cca745f440167bf27877182dc13e19d4bcf Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 18 Jan 2011 10:13:11 +0100 Subject: [SCSI] Add detailed SCSI I/O errors Instead of just passing 'EIO' for any I/O error we should be notifying the upper layers with more details about the cause of this error. Update the possible I/O errors to: - ENOLINK: Link failure between host and target - EIO: Retryable I/O error - EREMOTEIO: Non-retryable I/O error - EBADE: I/O error restricted to the I_T_L nexus 'Retryable' in this context means that an I/O error _might_ be restricted to the I_T_L nexus (vulgo: path), so retrying on another nexus / path might succeed. 'Non-retryable' in general refers to a target failure, so this error will always be generated regardless of the I_T_L nexus it was send on. I/O errors restricted to the I_T_L nexus might be retried on another nexus / path, but they should _not_ be queued if no paths are available. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/scsi_error.c | 24 +++++++++++++++++------- drivers/scsi/scsi_lib.c | 28 ++++++++++++++++++++++++++-- 2 files changed, 43 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 45c75649b9e0..991de3c15cfc 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -223,7 +223,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, * @scmd: Cmd to have sense checked. * * Return value: - * SUCCESS or FAILED or NEEDS_RETRY + * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR * * Notes: * When a deferred error is detected the current command has @@ -326,17 +326,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) */ return SUCCESS; - /* these three are not supported */ + /* these are not supported */ case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: - return SUCCESS; + case BLANK_CHECK: + case DATA_PROTECT: + return TARGET_ERROR; case MEDIUM_ERROR: if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ sshdr.asc == 0x13 || /* AMNF DATA FIELD */ sshdr.asc == 0x14) { /* RECORD NOT FOUND */ - return SUCCESS; + return TARGET_ERROR; } return NEEDS_RETRY; @@ -344,11 +346,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->retry_hwerror) return ADD_TO_MLQUEUE; else - return SUCCESS; + return TARGET_ERROR; case ILLEGAL_REQUEST: - case BLANK_CHECK: - case DATA_PROTECT: default: return SUCCESS; } @@ -787,6 +787,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, case SUCCESS: case NEEDS_RETRY: case FAILED: + case TARGET_ERROR: break; case ADD_TO_MLQUEUE: rtn = NEEDS_RETRY; @@ -1469,6 +1470,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) rtn = scsi_check_sense(scmd); if (rtn == NEEDS_RETRY) goto maybe_retry; + else if (rtn == TARGET_ERROR) { + /* + * Need to modify host byte to signal a + * permanent target failure + */ + scmd->result |= (DID_TARGET_FAILURE << 16); + rtn = SUCCESS; + } /* if rtn == FAILED, we have no sense information; * returning FAILED will wake the error handler thread * to collect the sense and redo the decide @@ -1486,6 +1495,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) case RESERVATION_CONFLICT: sdev_printk(KERN_INFO, scmd->device, "reservation conflict\n"); + scmd->result |= (DID_NEXUS_FAILURE << 16); return SUCCESS; /* causes immediate i/o error */ default: return FAILED; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd25..8d4ef8efa3cd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) } EXPORT_SYMBOL(scsi_release_buffers); +static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) +{ + int error = 0; + + switch(host_byte(result)) { + case DID_TRANSPORT_FAILFAST: + error = -ENOLINK; + break; + case DID_TARGET_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EREMOTEIO; + break; + case DID_NEXUS_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EBADE; + break; + default: + error = -EIO; + break; + } + + return error; +} + /* * Function: scsi_io_completion() * @@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->sense_len = len; } if (!sense_deferred) - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); } req->resid_len = scsi_get_resid(cmd); @@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); if (host_byte(result) == DID_RESET) { /* Third party bus reset or reset for error recovery -- cgit v1.2.3 From 751b2a7d623ead9e55f751a6087efeab454b5659 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Tue, 18 Jan 2011 10:13:12 +0100 Subject: [SCSI] dm mpath: propagate target errors immediately DM now has more information about the nature of the underlying storage failure. Path failure is avoided if a request failed due to a target error. Instead the target error is immediately passed up the stack. Discard requests that fail due to non-target errors may now be retried. Errors restricted to the path will be retried or returned if no paths are available, irregarding the no_path_retry setting. Signed-off-by: Mike Snitzer Signed-off-by: Hannes Reinecke Acked-by: Alasdair G Kergon Signed-off-by: James Bottomley --- drivers/md/dm-mpath.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b82d28819e2a..4b0b63c290a6 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!error && !clone->errors) return 0; /* I/O complete */ - if (error == -EOPNOTSUPP) - return error; - - if (clone->cmd_flags & REQ_DISCARD) - /* - * Pass all discard request failures up. - * FIXME: only fail_path if the discard failed due to a - * transport problem. This requires precise understanding - * of the underlying failure (e.g. the SCSI sense). - */ + if (error == -EOPNOTSUPP || error == -EREMOTEIO) return error; if (mpio->pgpath) fail_path(mpio->pgpath); spin_lock_irqsave(&m->lock, flags); - if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m)) - r = -EIO; + if (!m->nr_valid_paths) { + if (!m->queue_if_no_path) { + if (!__must_push_back(m)) + r = -EIO; + } else { + if (error == -EBADE) + r = error; + } + } spin_unlock_irqrestore(&m->lock, flags); return r; -- cgit v1.2.3 From f56635a8695f6b4fccf546cb2d5cf246d1892a0f Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:03:15 -0800 Subject: [SCSI] libfc: always initialize the FCoE DDP exchange id for fsp as FC_XID_UNKNOWN The fsp's xfer_ddp is used as indication of the exchange id for the DDPed I/O. We should always initialize it as FC_XID_UNKNOWN for a newly allocated fsp, otherwise the fsp allocated in fc_fcp, i.e., not from queuecommand like LUN RESET that is not doing DDP may still think DDP is setup for it since xid 0 is valid and goes on to call fc_fcp_ddp_done() in fc_fcp_resp() from fc_tm_done(). So, set xfer_ddp as FC_XID_UNKNOWN in fc_fcp_pkt_alloc() now. Also removes the setting of fsp->lp as it's already done when fsp is allocated. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5962d1a5a674..21a64d4e2edb 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) if (fsp) { memset(fsp, 0, sizeof(*fsp)); fsp->lp = lport; + fsp->xfer_ddp = FC_XID_UNKNOWN; atomic_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); INIT_LIST_HEAD(&fsp->list); @@ -1842,9 +1843,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs * build the libfc request pkt */ fsp->cmd = sc_cmd; /* save the cmd */ - fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ - fsp->xfer_ddp = FC_XID_UNKNOWN; sc_cmd->scsi_done = done; /* @@ -2112,7 +2111,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) * the sc passed in is not setup for execution like when sent * through the queuecommand callout. */ - fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ /* -- cgit v1.2.3 From fa79dbdbdd166025b5835bddaff152bb57c4440a Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Fri, 28 Jan 2011 16:03:21 -0800 Subject: [SCSI] libfc: Return a valid return code in fc_fcp_pkt_abort() Here ticks_left is added to record the result of wait_for_completion_timeout(). [ Patch title and description edited by Robert Love to make it more descriptive ] Signed-off-by: Hillf Danton Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 21a64d4e2edb..ba639fad0a1c 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1202,6 +1202,7 @@ unlock: static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) { int rc = FAILED; + unsigned long ticks_left; if (fc_fcp_send_abort(fsp)) return FAILED; @@ -1210,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) fsp->wait_for_comp = 1; spin_unlock_bh(&fsp->scsi_pkt_lock); - rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); + ticks_left = wait_for_completion_timeout(&fsp->tm_done, + FC_SCSI_TM_TOV); spin_lock_bh(&fsp->scsi_pkt_lock); fsp->wait_for_comp = 0; - if (!rc) { + if (!ticks_left) { FC_FCP_DBG(fsp, "target abort cmd failed\n"); - rc = FAILED; } else if (fsp->state & FC_SRB_ABORTED) { FC_FCP_DBG(fsp, "target abort cmd passed\n"); rc = SUCCESS; -- cgit v1.2.3 From 28a4af1e43047531ab612564e32ab9969c9dd965 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Fri, 28 Jan 2011 16:03:26 -0800 Subject: [SCSI] libfc: Cleanup return paths in fc_rport_error_retry This patch makes it so that we only have one call to fc_rport_error. This patch does not completely consolidate return statements, there is still one return used when not calling fc_rport_error, but alternative solutions made the code more confusing. [ Patch modified by Robert Love ] [ Patch title and commit message edited by Robert Love to make it more relevant ] Signed-off-by: Hillf Danton Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a7175adab32d..325bc429dda4 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -575,7 +575,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, /* make sure this isn't an FC_EX_CLOSED error, never retry those */ if (PTR_ERR(fp) == -FC_EX_CLOSED) - return fc_rport_error(rdata, fp); + goto out; if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", @@ -588,7 +588,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, return; } - return fc_rport_error(rdata, fp); +out: + fc_rport_error(rdata, fp); } /** -- cgit v1.2.3 From e4a9a98289606392f2b1b24b4ca4e29154ec4d15 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Jan 2011 16:03:31 -0800 Subject: [SCSI] libfc:prevent dereferencing ERR_PTR in fc_tm_done() If we goto out, then it tries to call kfree_skb() on an ERR_PTR which will oops. Just return directly. Signed-off-by: Dan Carpenter Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index ba639fad0a1c..f4eb1ab7b288 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1323,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) * * scsi-eh will escalate for when either happens. */ - goto out; + return; } if (fc_fcp_lock_pkt(fsp)) -- cgit v1.2.3 From c954f8aed44686569347087a57c75c590ce2782d Mon Sep 17 00:00:00 2001 From: Venkata Siva Vijayendra Bhamidipati Date: Fri, 28 Jan 2011 16:03:36 -0800 Subject: [SCSI] fnic: fix memory leak Fix memory leak arising due to incorrect freeing of allocated memory for vnic stats when unregistering a vnic. Signed-off-by: Abhijeet Joglekar Signed-off-by: Venkata Siva Vijayendra Bhamidipati Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/vnic_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index db710148d156..b576be734e2e 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev) vdev->linkstatus_pa); if (vdev->stats) pci_free_consistent(vdev->pdev, - sizeof(struct vnic_dev), + sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) pci_free_consistent(vdev->pdev, -- cgit v1.2.3 From 6da92d3463cd60f5b804475f70f659dc07331929 Mon Sep 17 00:00:00 2001 From: Venkata Siva Vijayendra Bhamidipati Date: Fri, 28 Jan 2011 16:03:42 -0800 Subject: [SCSI] fnic: Bumping up fnic version from 1.4.0.145 to 1.5.0.1. Signed-off-by: Venkata Siva Vijayendra Bhamidipati Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 92f185081e62..671cde9d4060 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -37,7 +37,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.4.0.145" +#define DRV_VERSION "1.5.0.1" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " -- cgit v1.2.3 From 7287fb9114096503eddfffb7b2ed691c809a6106 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Fri, 28 Jan 2011 16:03:47 -0800 Subject: [SCSI] fcoe: Fix module reference count for vports vports are not grabbing module references but are releasing them. This causes the module reference count to decrement too many times and it wraps around past 0. The solution is to do a module_put() in fcoe_interface_release() so that the reference is only released when the fcoe_interface is released. There is a one-to-one relationship between the N_Port and the fcoe_interface, so the module reference will only be dropped when the N_Port is destroyed To create symetry in the code this patch moves the try_module_get() call into fcoe_interface_create(). This means that only the N_Port will grab a reference to the module when its corresponding fcoe_interface is created. This patch also makes it so that the fcoe_interface_create() routine encodes any error codes in the fcoe_interface pointer returned. This way its caller, fcoe_create(), can return an accurate error code. Signed-off-by: Robert Love Tested-by: Ross Brattain Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9f9600b67001..452b42169c48 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -356,10 +356,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, struct fcoe_interface *fcoe; int err; + if (!try_module_get(THIS_MODULE)) { + FCOE_NETDEV_DBG(netdev, + "Could not get a reference to the module\n"); + fcoe = ERR_PTR(-EBUSY); + goto out; + } + fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); if (!fcoe) { FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); - return NULL; + fcoe = ERR_PTR(-ENOMEM); + goto out_nomod; } dev_hold(netdev); @@ -378,9 +386,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, fcoe_ctlr_destroy(&fcoe->ctlr); kfree(fcoe); dev_put(netdev); - return NULL; + fcoe = ERR_PTR(err); + goto out_nomod; } + goto out; + +out_nomod: + module_put(THIS_MODULE); +out: return fcoe; } @@ -442,6 +456,7 @@ static void fcoe_interface_release(struct kref *kref) fcoe_ctlr_destroy(&fcoe->ctlr); kfree(fcoe); dev_put(netdev); + module_put(THIS_MODULE); } /** @@ -886,7 +901,6 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Release the Scsi_Host */ scsi_host_put(lport->host); - module_put(THIS_MODULE); } /** @@ -2135,15 +2149,10 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) */ if (THIS_MODULE->state != MODULE_STATE_LIVE) { rc = -ENODEV; - goto out_nomod; + goto out_nodev; } #endif - if (!try_module_get(THIS_MODULE)) { - rc = -EINVAL; - goto out_nomod; - } - netdev = fcoe_if_to_netdev(buffer); if (!netdev) { rc = -ENODEV; @@ -2157,8 +2166,8 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } fcoe = fcoe_interface_create(netdev, fip_mode); - if (!fcoe) { - rc = -ENOMEM; + if (IS_ERR(fcoe)) { + rc = PTR_ERR(fcoe); goto out_putdev; } @@ -2198,8 +2207,6 @@ out_free: out_putdev: dev_put(netdev); out_nodev: - module_put(THIS_MODULE); -out_nomod: rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return rc; -- cgit v1.2.3 From 52ee832195b0ae33f12e334e61cf43d1087f24d6 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Fri, 28 Jan 2011 16:03:52 -0800 Subject: [SCSI] fcoe: drop FCoE LOGO in FIP mode Allowing FCoE LOGO followed by CVL in this case prevents FIP login back to the FCF and then keeps lport offline, only FIP LOGO and CLV needs to be processed while in FIP mode, therefore this patch drops FCoE LOGO in FIP mode. Added fcoe_filter_frames() to filter out above mentioned LOGO in fcoe rx path along with other existing filtering in code for bad CRC frames. Adding separate fcoe_filter_frames function helped with better code indentations and if needed then same will allow adding more filters at one place in future. This LOGO drop is added after FCP frames passed up to avoid any additional checks on fast path for this. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 84 +++++++++++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 452b42169c48..d1146994a1b5 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1619,6 +1619,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) complete(&fcoe_flush_completion); } +/** + * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC + * @lport: The local port the frame was received on + * @fp: The received frame + * + * Return: 0 on passing filtering checks + */ +static inline int fcoe_filter_frames(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fcoe_interface *fcoe; + struct fc_frame_header *fh; + struct sk_buff *skb = (struct sk_buff *)fp; + struct fcoe_dev_stats *stats; + + /* + * We only check CRC if no offload is available and if it is + * it's solicited data, in which case, the FCP layer would + * check it during the copy. + */ + if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + else + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + + fh = (struct fc_frame_header *) skb_transport_header(skb); + fh = fc_frame_header_get(fp); + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) + return 0; + + fcoe = ((struct fcoe_port *)lport_priv(lport))->fcoe; + if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && + ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); + return -EINVAL; + } + + if (!fr_flags(fp) & FCPHF_CRC_UNCHECKED || + le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + return 0; + } + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); + return -EINVAL; +} + /** * fcoe_recv_frame() - process a single received frame * @skb: frame to process @@ -1629,7 +1679,6 @@ static void fcoe_recv_frame(struct sk_buff *skb) struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fcoe_dev_stats *stats; - struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fcoe_port *port; @@ -1660,7 +1709,6 @@ static void fcoe_recv_frame(struct sk_buff *skb) * was done in fcoe_rcv already. */ hp = (struct fcoe_hdr *) skb_network_header(skb); - fh = (struct fc_frame_header *) skb_transport_header(skb); stats = per_cpu_ptr(lport->dev_stats, get_cpu()); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { @@ -1693,35 +1741,11 @@ static void fcoe_recv_frame(struct sk_buff *skb) if (pskb_trim(skb, fr_len)) goto drop; - /* - * We only check CRC if no offload is available and if it is - * it's solicited data, in which case, the FCP layer would - * check it during the copy. - */ - if (lport->crc_offload && - skb->ip_summed == CHECKSUM_UNNECESSARY) - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; - else - fr_flags(fp) |= FCPHF_CRC_UNCHECKED; - - fh = fc_frame_header_get(fp); - if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA || - fh->fh_type != FC_TYPE_FCP) && - (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping " - "frame with CRC error\n"); - stats->InvalidCRCCount++; - goto drop; - } - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + if (!fcoe_filter_frames(lport, fp)) { + put_cpu(); + fc_exch_recv(lport, fp); + return; } - put_cpu(); - fc_exch_recv(lport, fp); - return; - drop: stats->ErrorFrames++; put_cpu(); -- cgit v1.2.3 From 55204909bb687c997d5601e9f24a25cf9e915d78 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 28 Jan 2011 16:03:57 -0800 Subject: [SCSI] libfc: fix sparse static and non-ANSI warnings Fix sparse warning for non-ANSI function declaration. Declare workqueue structs as static. Signed-off-by: Randy Dunlap Cc: Robert Love Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 6 +++--- drivers/scsi/libfc/fc_fcp.c | 6 +++--- drivers/scsi/libfc/fc_rport.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index d21367d3305f..e0b5b15c355b 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ -struct workqueue_struct *fc_exch_workqueue; +static struct workqueue_struct *fc_exch_workqueue; /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -2357,7 +2357,7 @@ EXPORT_SYMBOL(fc_exch_init); /** * fc_setup_exch_mgr() - Setup an exchange manager */ -int fc_setup_exch_mgr() +int fc_setup_exch_mgr(void) { fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2395,7 +2395,7 @@ int fc_setup_exch_mgr() /** * fc_destroy_exch_mgr() - Destroy an exchange manager */ -void fc_destroy_exch_mgr() +void fc_destroy_exch_mgr(void) { destroy_workqueue(fc_exch_workqueue); kmem_cache_destroy(fc_em_cachep); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index f4eb1ab7b288..383a98288d2e 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -42,7 +42,7 @@ #include "fc_libfc.h" -struct kmem_cache *scsi_pkt_cachep; +static struct kmem_cache *scsi_pkt_cachep; /* SRB state definitions */ #define FC_SRB_FREE 0 /* cmd is free */ @@ -2244,7 +2244,7 @@ void fc_fcp_destroy(struct fc_lport *lport) } EXPORT_SYMBOL(fc_fcp_destroy); -int fc_setup_fcp() +int fc_setup_fcp(void) { int rc = 0; @@ -2260,7 +2260,7 @@ int fc_setup_fcp() return rc; } -void fc_destroy_fcp() +void fc_destroy_fcp(void) { if (scsi_pkt_cachep) kmem_cache_destroy(scsi_pkt_cachep); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 325bc429dda4..309e3e713ea1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -58,7 +58,7 @@ #include "fc_libfc.h" -struct workqueue_struct *rport_event_queue; +static struct workqueue_struct *rport_event_queue; static void fc_rport_enter_flogi(struct fc_rport_priv *); static void fc_rport_enter_plogi(struct fc_rport_priv *); @@ -1890,7 +1890,7 @@ EXPORT_SYMBOL(fc_rport_init); /** * fc_setup_rport() - Initialize the rport_event_queue */ -int fc_setup_rport() +int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1901,7 +1901,7 @@ int fc_setup_rport() /** * fc_destroy_rport() - Destroy the rport_event_queue */ -void fc_destroy_rport() +void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } -- cgit v1.2.3 From 96ad846445ae33dcae1805b68752e3d5c840e3ed Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 28 Jan 2011 16:04:02 -0800 Subject: [SCSI] libfc: add hook for FC-4 provider registration Allow FC-4 provider modules to hook into libfc, mostly for targets. This should allow any FC-4 module to handle PRLI requests and maintain process-association states. Each provider registers its ops with libfc and then will be called for any incoming PRLI for that FC-4 type on any instance. The provider can decide whether to handle that particular instance using any method it likes, such as ACLs or other configuration information. A count is kept of the number of successful PRLIs from the remote port. Providers are called back with an implicit PRLO when the remote port is about to be deleted or has been reset. fc_lport_recv_req() now sends incoming FC-4 requests to FC-4 providers, and there is a built-in provider always registered for handling incoming ELS requests. The call to provider recv() routines uses rcu_read_lock() so that providers aren't removed during the call. That lock is very cheap and shouldn't affect any performance on ELS requests. Providers can rely on the RCU lock to protect a session lookup as well. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_libfc.c | 60 +++++++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 11 ++++ drivers/scsi/libfc/fc_lport.c | 65 ++++++++++++++++++--- drivers/scsi/libfc/fc_rport.c | 133 +++++++++++++++++++++++++++++++++--------- 4 files changed, 233 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 6a48c28e4420..ae3abef6523e 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -35,6 +35,23 @@ unsigned int fc_debug_logging; module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); +DEFINE_MUTEX(fc_prov_mutex); + +/* + * Providers which primarily send requests and PRLIs. + */ +struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = { + [0] = &fc_rport_t0_prov, + [FC_TYPE_FCP] = &fc_rport_fcp_init, +}; + +/* + * Providers which receive requests. + */ +struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = { + [FC_TYPE_ELS] = &fc_lport_els_prov, +}; + /** * libfc_init() - Initialize libfc.ko */ @@ -210,3 +227,46 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); } EXPORT_SYMBOL(fc_fill_reply_hdr); + +/** + * fc_fc4_register_provider() - register FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + * + * Returns 0 on success, negative error otherwise. + */ +int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + struct fc4_prov **prov_entry; + int ret = 0; + + if (type >= FC_FC4_PROV_SIZE) + return -EINVAL; + mutex_lock(&fc_prov_mutex); + prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type; + if (*prov_entry) + ret = -EBUSY; + else + *prov_entry = prov; + mutex_unlock(&fc_prov_mutex); + return ret; +} +EXPORT_SYMBOL(fc_fc4_register_provider); + +/** + * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + */ +void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + BUG_ON(type >= FC_FC4_PROV_SIZE); + mutex_lock(&fc_prov_mutex); + if (prov->recv) + rcu_assign_pointer(fc_passive_prov[type], NULL); + else + rcu_assign_pointer(fc_active_prov[type], NULL); + mutex_unlock(&fc_prov_mutex); + synchronize_rcu(); +} +EXPORT_SYMBOL(fc_fc4_deregister_provider); diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index eea0c3541b71..205de285e456 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -93,6 +93,17 @@ extern unsigned int fc_debug_logging; printk(KERN_INFO "host%u: scsi: " fmt, \ (lport)->host->host_no, ##args)) +/* + * FC-4 Providers. + */ +extern struct fc4_prov *fc_active_prov[]; /* providers without recv */ +extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */ +extern struct mutex fc_prov_mutex; /* lock over table changes */ + +extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */ +extern struct fc4_prov fc_lport_els_prov; /* ELS provider */ +extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */ + /* * Set up direct-data placement for this I/O request */ diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index c5a10f94f845..e2cd087e71b2 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -849,7 +849,7 @@ out: } /** - * fc_lport_recv_req() - The generic lport request handler + * fc_lport_recv_els_req() - The generic lport ELS request handler * @lport: The local port that received the request * @fp: The request frame * @@ -859,9 +859,9 @@ out: * Locking Note: This function should not be called with the lport * lock held becuase it will grab the lock. */ -static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) +static void fc_lport_recv_els_req(struct fc_lport *lport, + struct fc_frame *fp) { - struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); @@ -873,8 +873,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) */ if (!lport->link_up) fc_frame_free(fp); - else if (fh->fh_type == FC_TYPE_ELS && - fh->fh_r_ctl == FC_RCTL_ELS_REQ) { + else { /* * Check opcode. */ @@ -903,14 +902,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) } recv(lport, fp); - } else { - FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", - fr_eof(fp)); - fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); } +static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *spp_in, + struct fc_els_spp *spp_out) +{ + return FC_SPP_RESP_INVL; +} + +struct fc4_prov fc_lport_els_prov = { + .prli = fc_lport_els_prli, + .recv = fc_lport_recv_els_req, +}; + +/** + * fc_lport_recv_req() - The generic lport request handler + * @lport: The lport that received the request + * @fp: The frame the request is in + * + * Locking Note: This function should not be called with the lport + * lock held becuase it may grab the lock. + */ +static void fc_lport_recv_req(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_seq *sp = fr_seq(fp); + struct fc4_prov *prov; + + /* + * Use RCU read lock and module_lock to be sure module doesn't + * deregister and get unloaded while we're calling it. + * try_module_get() is inlined and accepts a NULL parameter. + * Only ELSes and FCP target ops should come through here. + * The locking is unfortunate, and a better scheme is being sought. + */ + + rcu_read_lock(); + if (fh->fh_type >= FC_FC4_PROV_SIZE) + goto drop; + prov = rcu_dereference(fc_passive_prov[fh->fh_type]); + if (!prov || !try_module_get(prov->module)) + goto drop; + rcu_read_unlock(); + prov->recv(lport, fp); + module_put(prov->module); + return; +drop: + rcu_read_unlock(); + FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); + fc_frame_free(fp); + lport->tt.exch_done(sp); +} + /** * fc_lport_reset() - Reset a local port * @lport: The local port which should be reset diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 309e3e713ea1..a92954c1f42f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -257,6 +257,8 @@ static void fc_rport_work(struct work_struct *work) struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; + struct fc4_prov *prov; + u8 type; mutex_lock(&rdata->rp_mutex); event = rdata->event; @@ -306,6 +308,15 @@ static void fc_rport_work(struct work_struct *work) case RPORT_EV_FAILED: case RPORT_EV_LOGO: case RPORT_EV_STOP: + if (rdata->prli_count) { + mutex_lock(&fc_prov_mutex); + for (type = 1; type < FC_FC4_PROV_SIZE; type++) { + prov = fc_passive_prov[type]; + if (prov && prov->prlo) + prov->prlo(rdata); + } + mutex_unlock(&fc_prov_mutex); + } port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); @@ -1643,9 +1654,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, unsigned int len; unsigned int plen; enum fc_els_spp_resp resp; + enum fc_els_spp_resp passive; struct fc_seq_els_data rjt_data; - u32 fcp_parm; - u32 roles = FC_RPORT_ROLE_UNKNOWN; + struct fc4_prov *prov; FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); @@ -1679,46 +1690,41 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, pp->prli.prli_len = htons(len); len -= sizeof(struct fc_els_prli); - /* reinitialize remote port roles */ - rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; - /* * Go through all the service parameter pages and build * response. If plen indicates longer SPP than standard, * use that. The entire response has been pre-cleared above. */ spp = &pp->spp; + mutex_lock(&fc_prov_mutex); while (len >= plen) { spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; - spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR; - resp = FC_SPP_RESP_ACK; - - switch (rspp->spp_type) { - case 0: /* common to all FC-4 types */ - break; - case FC_TYPE_FCP: - fcp_parm = ntohl(rspp->spp_params); - if (fcp_parm & FCP_SPPF_RETRY) - rdata->flags |= FC_RP_FLAGS_RETRY; - rdata->supported_classes = FC_COS_CLASS3; - if (fcp_parm & FCP_SPPF_INIT_FCN) - roles |= FC_RPORT_ROLE_FCP_INITIATOR; - if (fcp_parm & FCP_SPPF_TARG_FCN) - roles |= FC_RPORT_ROLE_FCP_TARGET; - rdata->ids.roles = roles; - - spp->spp_params = htonl(lport->service_params); - break; - default: - resp = FC_SPP_RESP_INVL; - break; + resp = 0; + + if (rspp->spp_type < FC_FC4_PROV_SIZE) { + prov = fc_active_prov[rspp->spp_type]; + if (prov) + resp = prov->prli(rdata, plen, rspp, spp); + prov = fc_passive_prov[rspp->spp_type]; + if (prov) { + passive = prov->prli(rdata, plen, rspp, spp); + if (!resp || passive == FC_SPP_RESP_ACK) + resp = passive; + } + } + if (!resp) { + if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) + resp |= FC_SPP_RESP_CONF; + else + resp |= FC_SPP_RESP_INVL; } spp->spp_flags |= resp; len -= plen; rspp = (struct fc_els_spp *)((char *)rspp + plen); spp = (struct fc_els_spp *)((char *)spp + plen); } + mutex_unlock(&fc_prov_mutex); /* * Send LS_ACC. If this fails, the originator should retry. @@ -1887,6 +1893,79 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); +/** + * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + * + * Returns the value for the response code to be placed in spp_flags; + * Returns 0 if not an initiator. + */ +static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + struct fc_lport *lport = rdata->local_port; + u32 fcp_parm; + + fcp_parm = ntohl(rspp->spp_params); + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + if (fcp_parm & FCP_SPPF_INIT_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (fcp_parm & FCP_SPPF_TARG_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + if (fcp_parm & FCP_SPPF_RETRY) + rdata->flags |= FC_RP_FLAGS_RETRY; + rdata->supported_classes = FC_COS_CLASS3; + + if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) + return 0; + + spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; + + /* + * OR in our service parameters with other providers (target), if any. + */ + fcp_parm = ntohl(spp->spp_params); + spp->spp_params = htonl(fcp_parm | lport->service_params); + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for FCP initiator. + */ +struct fc4_prov fc_rport_fcp_init = { + .prli = fc_rport_fcp_prli, +}; + +/** + * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0 + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + */ +static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) + return FC_SPP_RESP_INVL; + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for type 0 service parameters. + * + * This handles the special case of type 0 which is always successful + * but doesn't do anything otherwise. + */ +struct fc4_prov fc_rport_t0_prov = { + .prli = fc_rport_t0_prli, +}; + /** * fc_setup_rport() - Initialize the rport_event_queue */ -- cgit v1.2.3 From 1a5c2d7e5c8ef239804cb08b68363e0cd2f74a3d Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 28 Jan 2011 16:04:08 -0800 Subject: [SCSI] libfc: add method for setting handler for incoming exchange Add a method for setting handler for incoming exchange. For multi-sequence exchanges, this allows the target driver to add a response handler for handling subsequent sequences, and exchange manager resets. The new function is called fc_seq_set_resp(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index e0b5b15c355b..1f124c0351b4 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) return sp; } +/* + * Set the response handler for the exchange associated with a sequence. + */ +static void fc_seq_set_resp(struct fc_seq *sp, + void (*resp)(struct fc_seq *, struct fc_frame *, + void *), + void *arg) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + spin_lock_bh(&ep->ex_lock); + ep->resp = resp; + ep->arg = arg; + spin_unlock_bh(&ep->ex_lock); +} + /** * fc_seq_exch_abort() - Abort an exchange and sequence * @req_sp: The sequence to be aborted @@ -2329,6 +2345,9 @@ int fc_exch_init(struct fc_lport *lport) if (!lport->tt.seq_start_next) lport->tt.seq_start_next = fc_seq_start_next; + if (!lport->tt.seq_set_resp) + lport->tt.seq_set_resp = fc_seq_set_resp; + if (!lport->tt.exch_seq_send) lport->tt.exch_seq_send = fc_exch_seq_send; -- cgit v1.2.3 From 70d53b046a6221e3ceb3bd8eaa807ef6a1c53762 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 28 Jan 2011 16:04:18 -0800 Subject: [SCSI] libfc: add hook to notify providers of local port changes When an SCST provider is registered, it needs to know what local ports are available for configuration as targets. Add a notifier chain that is invoked when any local port that is added or deleted. Maintain a global list of local ports and add an interator function that calls a given function for every existing local port. This is used when first loading a provider. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_libfc.c | 41 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 2 ++ drivers/scsi/libfc/fc_lport.c | 2 ++ 3 files changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index ae3abef6523e..5e40dab8f919 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -36,6 +36,10 @@ module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); DEFINE_MUTEX(fc_prov_mutex); +static LIST_HEAD(fc_local_ports); +struct blocking_notifier_head fc_lport_notifier_head = + BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head); +EXPORT_SYMBOL(fc_lport_notifier_head); /* * Providers which primarily send requests and PRLIs. @@ -228,6 +232,17 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, } EXPORT_SYMBOL(fc_fill_reply_hdr); +void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg) +{ + struct fc_lport *lport; + + mutex_lock(&fc_prov_mutex); + list_for_each_entry(lport, &fc_local_ports, lport_list) + notify(lport, arg); + mutex_unlock(&fc_prov_mutex); +} +EXPORT_SYMBOL(fc_lport_iterate); + /** * fc_fc4_register_provider() - register FC-4 upper-level provider. * @type: FC-4 type, such as FC_TYPE_FCP @@ -270,3 +285,29 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) synchronize_rcu(); } EXPORT_SYMBOL(fc_fc4_deregister_provider); + +/** + * fc_fc4_add_lport() - add new local port to list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_add_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_add_tail(&lport->lport_list, &fc_local_ports); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_ADD, lport); + mutex_unlock(&fc_prov_mutex); +} + +/** + * fc_fc4_del_lport() - remove local port from list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_del_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_del(&lport->lport_list); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_DEL, lport); + mutex_unlock(&fc_prov_mutex); +} diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index 205de285e456..8496f7020b97 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -123,6 +123,8 @@ void fc_destroy_fcp(void); * Internal libfc functions */ const char *fc_els_resp_type(struct fc_frame *); +extern void fc_fc4_add_lport(struct fc_lport *); +extern void fc_fc4_del_lport(struct fc_lport *); /* * Copies a buffer into an sg list diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e2cd087e71b2..e0ef81426c33 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport) lport->tt.fcp_abort_io(lport); lport->tt.disc_stop_final(lport); lport->tt.exch_mgr_reset(lport, 0, 0); + fc_fc4_del_lport(lport); return 0; } EXPORT_SYMBOL(fc_lport_destroy); @@ -1633,6 +1634,7 @@ int fc_lport_init(struct fc_lport *lport) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; + fc_fc4_add_lport(lport); return 0; } -- cgit v1.2.3 From 925cedae2b223d44d59a02df1b35902fc8bdd6d2 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 28 Jan 2011 16:04:23 -0800 Subject: [SCSI] libfc: use PRLI hook to get parameters when sending outgoing PRLI When sending an outgoing PRLI as an initiator, get the parameters from registered providers so that they all get a chance to decide on roles. The passive provider is called last, and could override the initiator role. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a92954c1f42f..9ded6123ff6d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -961,6 +961,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_els_prli prli; struct fc_els_spp spp; } *pp; + struct fc_els_spp temp_spp; + struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; u8 op; @@ -1009,6 +1011,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; + prov = fc_passive_prov[FC_TYPE_FCP]; + if (prov) { + memset(&temp_spp, 0, sizeof(temp_spp)); + prov->prli(rdata, pp->prli.prli_spp_len, + &pp->spp, &temp_spp); + } + rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; @@ -1045,6 +1054,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) struct fc_els_spp spp; } *pp; struct fc_frame *fp; + struct fc4_prov *prov; /* * If the rport is one of the well known addresses @@ -1066,9 +1076,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, - fc_rport_prli_resp, rdata, - 2 * lport->r_a_tov)) + fc_prli_fill(lport, fp); + + prov = fc_passive_prov[FC_TYPE_FCP]; + if (prov) { + pp = fc_frame_payload_get(fp, sizeof(*pp)); + prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); + } + + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id, + fc_host_port_id(lport->host), FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, + NULL, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); -- cgit v1.2.3 From 04885b16a1ec86b4670702b99a81805e66bf9f30 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Fri, 28 Jan 2011 16:04:29 -0800 Subject: [SCSI] libfc: Remove usage of the Scsi_Host's host_lock This patch removes the use of the Scsi_Host's host_lock within fc_queuecommand. It also removes the DEF_SCSI_QCMD usage so that libfc has fully moved on to the new queuecommand interface. Signed-off-by: Robert Love Tested-by: Ross Brattain Reviewed-by: Nicholas A. Bellinger Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 383a98288d2e..b1b03af158bf 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1789,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) /** * fc_queuecommand() - The queuecommand function of the SCSI template + * @shost: The Scsi_Host that the command was issued to * @cmd: The scsi_cmnd to be executed - * @done: The callback function to be called when the scsi_cmnd is complete * - * This is the i/o strategy routine, called by the SCSI layer. This routine - * is called with the host_lock held. + * This is the i/o strategy routine, called by the SCSI layer. */ -static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) +int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) { - struct fc_lport *lport; + struct fc_lport *lport = shost_priv(shost); struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_fcp_pkt *fsp; struct fc_rport_libfc_priv *rpriv; @@ -1805,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs int rc = 0; struct fcoe_dev_stats *stats; - lport = shost_priv(sc_cmd->device->host); - rval = fc_remote_port_chkready(rport); if (rval) { sc_cmd->result = rval; - done(sc_cmd); + sc_cmd->scsi_done(sc_cmd); return 0; } - spin_unlock_irq(lport->host->host_lock); if (!*(struct fc_remote_port **)rport->dd_data) { /* @@ -1821,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs * online */ sc_cmd->result = DID_IMM_RETRY << 16; - done(sc_cmd); + sc_cmd->scsi_done(sc_cmd); goto out; } @@ -1845,7 +1841,6 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs */ fsp->cmd = sc_cmd; /* save the cmd */ fsp->rport = rport; /* set the remote port ptr */ - sc_cmd->scsi_done = done; /* * set up the transfer length @@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs rc = SCSI_MLQUEUE_HOST_BUSY; } out: - spin_lock_irq(lport->host->host_lock); return rc; } - -DEF_SCSI_QCMD(fc_queuecommand) EXPORT_SYMBOL(fc_queuecommand); /** -- cgit v1.2.3 From 62bdb6455e8326f864ae1b43b4c4db7f630edc1c Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 28 Jan 2011 16:04:34 -0800 Subject: [SCSI] libfc: export seq_release() for users of seq_assign() Target modules using lport->tt.seq_assign() get a hold on the exchange but have no way of releasing it. Add that. Signed-off-by: Joe Eykholt Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 1f124c0351b4..a3d640289dcc 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1282,6 +1282,8 @@ free: * @fp: The request frame * * On success, the sequence pointer will be returned and also in fr_seq(@fp). + * A reference will be held on the exchange/sequence for the caller, which + * must call fc_seq_release(). */ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) { @@ -1298,6 +1300,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) return fr_seq(fp); } +/** + * fc_seq_release() - Release the hold + * @sp: The sequence. + */ +static void fc_seq_release(struct fc_seq *sp) +{ + fc_exch_release(fc_seq_exch(sp)); +} + /** * fc_exch_recv_req() - Handler for an incoming request * @lport: The local port that received the request @@ -2369,6 +2380,9 @@ int fc_exch_init(struct fc_lport *lport) if (!lport->tt.seq_assign) lport->tt.seq_assign = fc_seq_assign; + if (!lport->tt.seq_release) + lport->tt.seq_release = fc_seq_release; + return 0; } EXPORT_SYMBOL(fc_exch_init); -- cgit v1.2.3 From 6c8cc1c003cee1c4290f5d8c684912d60354056a Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Fri, 28 Jan 2011 16:04:39 -0800 Subject: [SCSI] libfc: Enhanced exchange ID selection mechanism and fix related EMA selection logic. Problem: In case of exchange responder case, EMA selection was defaulted to the last EMA from EMA list (lport.ema_list). If exchange ID is selected from offload pool and not setup DDP, resulting into incorrect selection of EMA, and eventually dropping the packet because unable to find exchange. Fix: Enhanced the exchange ID selection (depending upon request type and exchange responder) Made necessary enhancement in EMA selection algorithm. Signed-off-by: Kiran Patil Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 64 +++++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index a3d640289dcc..08bf5fa6afd4 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2280,17 +2280,46 @@ void fc_exch_mgr_free(struct fc_lport *lport) } EXPORT_SYMBOL(fc_exch_mgr_free); +/** + * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending + * upon 'xid'. + * @f_ctl: f_ctl + * @lport: The local port the frame was received on + * @fh: The received frame header + */ +static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl, + struct fc_lport *lport, + struct fc_frame_header *fh) +{ + struct fc_exch_mgr_anchor *ema; + u16 xid; + + if (f_ctl & FC_FC_EX_CTX) + xid = ntohs(fh->fh_ox_id); + else { + xid = ntohs(fh->fh_rx_id); + if (xid == FC_XID_UNKNOWN) + return list_entry(lport->ema_list.prev, + typeof(*ema), ema_list); + } + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if ((xid >= ema->mp->min_xid) && + (xid <= ema->mp->max_xid)) + return ema; + } + return NULL; +} /** * fc_exch_recv() - Handler for received frames * @lport: The local port the frame was received on - * @fp: The received frame + * @fp: The received frame */ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_exch_mgr_anchor *ema; - u32 f_ctl, found = 0; - u16 oxid; + u32 f_ctl; /* lport lock ? */ if (!lport || lport->state == LPORT_ST_DISABLED) { @@ -2301,24 +2330,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) } f_ctl = ntoh24(fh->fh_f_ctl); - oxid = ntohs(fh->fh_ox_id); - if (f_ctl & FC_FC_EX_CTX) { - list_for_each_entry(ema, &lport->ema_list, ema_list) { - if ((oxid >= ema->mp->min_xid) && - (oxid <= ema->mp->max_xid)) { - found = 1; - break; - } - } - - if (!found) { - FC_LPORT_DBG(lport, "Received response for out " - "of range oxid:%hx\n", oxid); - fc_frame_free(fp); - return; - } - } else - ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list); + ema = fc_find_ema(f_ctl, lport, fh); + if (!ema) { + FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor," + "fc_ctl <0x%x>, xid <0x%x>\n", + f_ctl, + (f_ctl & FC_FC_EX_CTX) ? + ntohs(fh->fh_ox_id) : + ntohs(fh->fh_rx_id)); + fc_frame_free(fp); + return; + } /* * If frame is marked invalid, just drop it. -- cgit v1.2.3 From 21b7b2f557f4b105a4b7d739671d1ce6b867d3e6 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:04:45 -0800 Subject: [SCSI] libfcoe: move logging macros into the local libfcoe.h header file libfcoe kernel module debug macros will used by the fcoe transport code as well when later it gets added. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 22 ++-------------------- drivers/scsi/fcoe/libfcoe.h | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 20 deletions(-) create mode 100644 drivers/scsi/fcoe/libfcoe.h (limited to 'drivers') diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 625c6be25396..a4757ca2a4ed 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -44,6 +44,8 @@ #include #include +#include "libfcoe.h" + MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs"); MODULE_LICENSE("GPL v2"); @@ -70,26 +72,6 @@ unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ -#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ - -#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ -do { \ - if (unlikely(libfcoe_debug_logging & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ -} while (0) - -#define LIBFCOE_DBG(fmt, args...) \ - LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ - printk(KERN_INFO "libfcoe: " fmt, ##args);) - -#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ - LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ - printk(KERN_INFO "host%d: fip: " fmt, \ - (fip)->lp->host->host_no, ##args);) - static const char *fcoe_ctlr_states[] = { [FIP_ST_DISABLED] = "DISABLED", [FIP_ST_LINK_WAIT] = "LINK_WAIT", diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h new file mode 100644 index 000000000000..c3fe3167ff71 --- /dev/null +++ b/drivers/scsi/fcoe/libfcoe.h @@ -0,0 +1,25 @@ +#ifndef _FCOE_LIBFCOE_H_ +#define _FCOE_LIBFCOE_H_ + +extern unsigned int libfcoe_debug_logging; +#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ +#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ + +#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(libfcoe_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define LIBFCOE_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ + printk(KERN_INFO "libfcoe: " fmt, ##args);) + +#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ + printk(KERN_INFO "host%d: fip: " fmt, \ + (fip)->lp->host->host_no, ##args);) + +#endif /* _FCOE_LIBFCOE_H_ */ -- cgit v1.2.3 From fdecf31b7c7b9bb79516344884388f660fd5d5a7 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:04:55 -0800 Subject: [SCSI] libfcoe: add implementation to support fcoe transport Add the new fcoe_transport.c file that implements basic fcoe transport interface. Eventually, the sysfs entries to create/destroy/enable/disable an FCoE instance will be coming to the fcoe transport layer, who does a look-up to find the corresponding transport provide and pass the corresponding action over to the identified provider. The fcoe.ko will become the default fcoe transport provider that can support FCoE on any given netdev interfaces, as the Open-FCoE.org's default software FCoE HBA solution. Any vendor specific FCoE HBA driver that is built on top of Open-FCoE's kernel stack of libfc & libfcoe as well as the user land tool of fcoe-utils can easily plug-in and start running FCoE on their network interfaces. The fcoe.ko will be converted to act as the default provider if no vendor specific transport provider is found, as it is always added to the very end of the list of attached transports. Signed-off-by: Yi Zou Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe_transport.c | 518 +++++++++++++++++++++++++++++++++++++ drivers/scsi/fcoe/libfcoe.h | 6 + 2 files changed, 524 insertions(+) create mode 100644 drivers/scsi/fcoe/fcoe_transport.c (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c new file mode 100644 index 000000000000..41d69bedd4b4 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -0,0 +1,518 @@ +/* + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "libfcoe.h" + +static int fcoe_transport_create(const char *, struct kernel_param *); +static int fcoe_transport_destroy(const char *, struct kernel_param *); +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); +static int fcoe_transport_enable(const char *, struct kernel_param *); +static int fcoe_transport_disable(const char *, struct kernel_param *); + +static LIST_HEAD(fcoe_transports); +static LIST_HEAD(fcoe_netdevs); +static DEFINE_MUTEX(ft_mutex); + +module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); +__MODULE_PARM_TYPE(show, "string"); +MODULE_PARM_DESC(show, " Show attached FCoE transports"); + +module_param_call(create, fcoe_transport_create, NULL, + (void *)FIP_MODE_FABRIC, S_IWUSR); +__MODULE_PARM_TYPE(create, "string"); +MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); + +module_param_call(create_vn2vn, fcoe_transport_create, NULL, + (void *)FIP_MODE_VN2VN, S_IWUSR); +__MODULE_PARM_TYPE(create_vn2vn, "string"); +MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " + "on an Ethernet interface"); + +module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(destroy, "string"); +MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); + +module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(enable, "string"); +MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); + +module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(disable, "string"); +MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); + +/** + * fcoe_transport_lookup - find an fcoe transport that matches a netdev + * @netdev: The netdev to look for from all attached transports + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + + list_for_each_entry(ft, &fcoe_transports, list) + if (ft->match && ft->match(netdev)) + return ft; + return NULL; +} + +/** + * fcoe_transport_attach - Attaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_attach(struct fcoe_transport *ft) +{ + int rc = 0; + + mutex_lock(&ft_mutex); + if (ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", + ft->name); + rc = -EEXIST; + goto out_attach; + } + + /* Add default transport to the tail */ + if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) + list_add(&ft->list, &fcoe_transports); + else + list_add_tail(&ft->list, &fcoe_transports); + + ft->attached = true; + LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; +} +EXPORT_SYMBOL(fcoe_transport_attach); + +/** + * fcoe_transport_attach - Detaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_detach(struct fcoe_transport *ft) +{ + int rc = 0; + + mutex_lock(&ft_mutex); + if (!ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", + ft->name); + rc = -ENODEV; + goto out_attach; + } + + list_del(&ft->list); + ft->attached = false; + LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; + +} +EXPORT_SYMBOL(fcoe_transport_detach); + +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) +{ + int i, j; + struct fcoe_transport *ft = NULL; + + i = j = sprintf(buffer, "Attached FCoE transports:"); + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) { + i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); + if (i >= PAGE_SIZE) + break; + } + mutex_unlock(&ft_mutex); + if (i == j) + i += snprintf(&buffer[i], IFNAMSIZ, "none"); + return i; +} + +static int __init fcoe_transport_init(void) +{ + return 0; +} + +static int __exit fcoe_transport_exit(void) +{ + struct fcoe_transport *ft; + + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) + printk(KERN_ERR "FCoE transport %s is still attached!\n", + ft->name); + mutex_unlock(&ft_mutex); + return 0; +} + + +static int fcoe_add_netdev_mapping(struct net_device *netdev, + struct fcoe_transport *ft) +{ + struct fcoe_netdev_mapping *nm; + + nm = kmalloc(sizeof(*nm), GFP_KERNEL); + if (!nm) { + printk(KERN_ERR "Unable to allocate netdev_mapping"); + return -ENOMEM; + } + + nm->netdev = netdev; + nm->ft = ft; + + list_add(&nm->list, &fcoe_netdevs); + return 0; +} + + +static void fcoe_del_netdev_mapping(struct net_device *netdev) +{ + struct fcoe_netdev_mapping *nm = NULL, *tmp; + + list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { + if (nm->netdev == netdev) { + list_del(&nm->list); + kfree(nm); + return; + } + } +} + + +/** + * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which + * it was created + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + struct fcoe_netdev_mapping *nm; + + list_for_each_entry(nm, &fcoe_netdevs, list) { + if (netdev == nm->netdev) { + ft = nm->ft; + return ft; + } + } + + return NULL; +} + +/** + * fcoe_if_to_netdev() - Parse a name buffer to get a net device + * @buffer: The name of the net device + * + * Returns: NULL or a ptr to net_device + */ +static struct net_device *fcoe_if_to_netdev(const char *buffer) +{ + char *cp; + char ifname[IFNAMSIZ + 2]; + + if (buffer) { + strlcpy(ifname, buffer, IFNAMSIZ); + cp = ifname + strlen(ifname); + while (--cp >= ifname && *cp == '\n') + *cp = '\0'; + return dev_get_by_name(&init_net, ifname); + } + return NULL; +} + +/** + * fcoe_transport_create() - Create a fcoe interface + * @buffer: The name of the Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's create function. + * + * Returns: 0 for success + */ +static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + enum fip_state fip_mode = (enum fip_state)(long)kp->arg; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (ft) { + LIBFCOE_TRANSPORT_DBG("transport %s already has existing " + "FCoE instance on %s.\n", + ft->name, netdev->name); + rc = -EEXIST; + goto out_putdev; + } + + ft = fcoe_transport_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + rc = fcoe_add_netdev_mapping(netdev, ft); + if (rc) { + LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " + "for FCoE transport %s for %s.\n", + ft->name, netdev->name); + goto out_putdev; + } + + /* pass to transport create */ + rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; + if (rc) + fcoe_del_netdev_mapping(netdev); + + LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_destroy() - Destroy a FCoE interface + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's destroy function. + * + * Returns: 0 for success + */ +static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + /* pass to transport destroy */ + rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; + fcoe_del_netdev_mapping(netdev); + LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_disable() - Disables a FCoE interface + * @buffer: The name of the Ethernet interface to be disabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->disable ? ft->disable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_enable() - Enables a FCoE interface + * @buffer: The name of the Ethernet interface to be enabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->enable ? ft->enable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * libfcoe_init() - Initialization routine for libfcoe.ko + */ +static int __init libfcoe_init(void) +{ + fcoe_transport_init(); + + return 0; +} +module_init(libfcoe_init); + +/** + * libfcoe_exit() - Tear down libfcoe.ko + */ +static void __exit libfcoe_exit(void) +{ + fcoe_transport_exit(); +} +module_exit(libfcoe_exit); diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h index c3fe3167ff71..6af5fc3a17d8 100644 --- a/drivers/scsi/fcoe/libfcoe.h +++ b/drivers/scsi/fcoe/libfcoe.h @@ -4,6 +4,7 @@ extern unsigned int libfcoe_debug_logging; #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ +#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */ #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ do { \ @@ -22,4 +23,9 @@ do { \ printk(KERN_INFO "host%d: fip: " fmt, \ (fip)->lp->host->host_no, ##args);) +#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \ + printk(KERN_INFO "%s: " fmt, \ + __func__, ##args);) + #endif /* _FCOE_LIBFCOE_H_ */ -- cgit v1.2.3 From 0095a9213324a4466c10de98837a27ab1b7e72be Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:05:00 -0800 Subject: [SCSI] libfcoe: rename libfcoe.c to fcoe_cltr.c for the coming fcoe_transport.c The existing libfcoe.c is mostly for FIP support, rename it to reflect that fact and so we can add fcoe_transport.c to the make file to include both into the libfcoe kernel module. [ Minor modifications by Robert Love converting a few "__attribute__((packed))" modifiers to "__packed" to remove new checkpatch.pl WARNINGS ] Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/Makefile | 2 + drivers/scsi/fcoe/fcoe_ctlr.c | 2690 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/fcoe/libfcoe.c | 2690 ----------------------------------------- 3 files changed, 2692 insertions(+), 2690 deletions(-) create mode 100644 drivers/scsi/fcoe/fcoe_ctlr.c delete mode 100644 drivers/scsi/fcoe/libfcoe.c (limited to 'drivers') diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile index 950f27615c76..b122b7abddc6 100644 --- a/drivers/scsi/fcoe/Makefile +++ b/drivers/scsi/fcoe/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_FCOE) += fcoe.o obj-$(CONFIG_LIBFCOE) += libfcoe.o + +libfcoe-objs := fcoe_ctlr.o diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c new file mode 100644 index 000000000000..c12d0a7e0d3d --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -0,0 +1,2690 @@ +/* + * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "libfcoe.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs"); +MODULE_LICENSE("GPL v2"); + +#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ +#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ + +static void fcoe_ctlr_timeout(unsigned long); +static void fcoe_ctlr_timer_work(struct work_struct *); +static void fcoe_ctlr_recv_work(struct work_struct *); +static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *); + +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; +static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; +static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; +static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; + +unsigned int libfcoe_debug_logging; +module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +static const char * const fcoe_ctlr_states[] = { + [FIP_ST_DISABLED] = "DISABLED", + [FIP_ST_LINK_WAIT] = "LINK_WAIT", + [FIP_ST_AUTO] = "AUTO", + [FIP_ST_NON_FIP] = "NON_FIP", + [FIP_ST_ENABLED] = "ENABLED", + [FIP_ST_VNMP_START] = "VNMP_START", + [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1", + [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2", + [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM", + [FIP_ST_VNMP_UP] = "VNMP_UP", +}; + +static const char *fcoe_ctlr_state(enum fip_state state) +{ + const char *cp = "unknown"; + + if (state < ARRAY_SIZE(fcoe_ctlr_states)) + cp = fcoe_ctlr_states[state]; + if (!cp) + cp = "unknown"; + return cp; +} + +/** + * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state. + * @fip: The FCoE controller + * @state: The new state + */ +static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state) +{ + if (state == fip->state) + return; + if (fip->lp) + LIBFCOE_FIP_DBG(fip, "state %s -> %s\n", + fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state)); + fip->state = state; +} + +/** + * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid + * @fcf: The FCF to check + * + * Return non-zero if FCF fcoe_size has been validated. + */ +static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) +{ + return (fcf->flags & FIP_FL_SOL) != 0; +} + +/** + * fcoe_ctlr_fcf_usable() - Check if a FCF is usable + * @fcf: The FCF to check + * + * Return non-zero if the FCF is usable. + */ +static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) +{ + u16 flags = FIP_FL_SOL | FIP_FL_AVAIL; + + return (fcf->flags & flags) == flags; +} + +/** + * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses + * @fip: The FCoE controller + */ +static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) +{ + if (fip->mode == FIP_MODE_VN2VN) + hton24(fip->dest_addr, FIP_VN_FC_MAP); + else + hton24(fip->dest_addr, FIP_DEF_FC_MAP); + hton24(fip->dest_addr + 3, 0); + fip->map_dest = 1; +} + +/** + * fcoe_ctlr_init() - Initialize the FCoE Controller instance + * @fip: The FCoE controller to initialize + */ +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) +{ + fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); + fip->mode = mode; + INIT_LIST_HEAD(&fip->fcfs); + mutex_init(&fip->ctlr_mutex); + spin_lock_init(&fip->ctlr_lock); + fip->flogi_oxid = FC_XID_UNKNOWN; + setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); + INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); + INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work); + skb_queue_head_init(&fip->fip_recv_list); +} +EXPORT_SYMBOL(fcoe_ctlr_init); + +/** + * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller + * @fip: The FCoE controller whose FCFs are to be reset + * + * Called with &fcoe_ctlr lock held. + */ +static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *next; + + fip->sel_fcf = NULL; + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + list_del(&fcf->list); + kfree(fcf); + } + fip->fcf_count = 0; + fip->sel_time = 0; +} + +/** + * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller + * @fip: The FCoE controller to tear down + * + * This is called by FCoE drivers before freeing the &fcoe_ctlr. + * + * The receive handler will have been deleted before this to guarantee + * that no more recv_work will be scheduled. + * + * The timer routine will simply return once we set FIP_ST_DISABLED. + * This guarantees that no further timeouts or work will be scheduled. + */ +void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) +{ + cancel_work_sync(&fip->recv_work); + skb_queue_purge(&fip->fip_recv_list); + + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); + fcoe_ctlr_reset_fcfs(fip); + mutex_unlock(&fip->ctlr_mutex); + del_timer_sync(&fip->timer); + cancel_work_sync(&fip->timer_work); +} +EXPORT_SYMBOL(fcoe_ctlr_destroy); + +/** + * fcoe_ctlr_announce() - announce new FCF selection + * @fip: The FCoE controller + * + * Also sets the destination MAC for FCoE and control packets + * + * Called with neither ctlr_mutex nor ctlr_lock held. + */ +static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *sel; + struct fcoe_fcf *fcf; + + mutex_lock(&fip->ctlr_mutex); + spin_lock_bh(&fip->ctlr_lock); + + kfree_skb(fip->flogi_req); + fip->flogi_req = NULL; + list_for_each_entry(fcf, &fip->fcfs, list) + fcf->flogi_sent = 0; + + spin_unlock_bh(&fip->ctlr_lock); + sel = fip->sel_fcf; + + if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) + goto unlock; + if (!is_zero_ether_addr(fip->dest_addr)) { + printk(KERN_NOTICE "libfcoe: host%d: " + "FIP Fibre-Channel Forwarder MAC %pM deselected\n", + fip->lp->host->host_no, fip->dest_addr); + memset(fip->dest_addr, 0, ETH_ALEN); + } + if (sel) { + printk(KERN_INFO "libfcoe: host%d: FIP selected " + "Fibre-Channel Forwarder MAC %pM\n", + fip->lp->host->host_no, sel->fcf_mac); + memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); + fip->map_dest = 0; + } +unlock: + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port + * @fip: The FCoE controller to get the maximum FCoE size from + * + * Returns the maximum packet size including the FCoE header and trailer, + * but not including any Ethernet or VLAN headers. + */ +static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) +{ + /* + * Determine the max FCoE frame size allowed, including + * FCoE header and trailer. + * Note: lp->mfs is currently the payload size, not the frame size. + */ + return fip->lp->mfs + sizeof(struct fc_frame_header) + + sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof); +} + +/** + * fcoe_ctlr_solicit() - Send a FIP solicitation + * @fip: The FCoE controller to send the solicitation on + * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) + */ +static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) +{ + struct sk_buff *skb; + struct fip_sol { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + struct fip_size_desc size; + } __packed desc; + } __packed * sol; + u32 fcoe_size; + + skb = dev_alloc_skb(sizeof(*sol)); + if (!skb) + return; + + sol = (struct fip_sol *)skb->data; + + memset(sol, 0, sizeof(*sol)); + memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN); + memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + sol->eth.h_proto = htons(ETH_P_FIP); + + sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + sol->fip.fip_op = htons(FIP_OP_DISC); + sol->fip.fip_subcode = FIP_SC_SOL; + sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW); + sol->fip.fip_flags = htons(FIP_FL_FPMA); + if (fip->spma) + sol->fip.fip_flags |= htons(FIP_FL_SPMA); + + sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW; + memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn); + + fcoe_size = fcoe_ctlr_fcoe_size(fip); + sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; + sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW; + sol->desc.size.fd_size = htons(fcoe_size); + + skb_put(skb, sizeof(*sol)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + if (!fcf) + fip->sol_time = jiffies; +} + +/** + * fcoe_ctlr_link_up() - Start FCoE controller + * @fip: The FCoE controller to start + * + * Called from the LLD when the network link is ready. + */ +void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) +{ + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + } else if (fip->state == FIP_ST_LINK_WAIT) { + fcoe_ctlr_set_state(fip, fip->mode); + switch (fip->mode) { + default: + LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); + /* fall-through */ + case FIP_MODE_AUTO: + LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); + /* fall-through */ + case FIP_MODE_FABRIC: + case FIP_MODE_NON_FIP: + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + break; + case FIP_MODE_VN2VN: + fcoe_ctlr_vn_start(fip); + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + break; + } + } else + mutex_unlock(&fip->ctlr_mutex); +} +EXPORT_SYMBOL(fcoe_ctlr_link_up); + +/** + * fcoe_ctlr_reset() - Reset a FCoE controller + * @fip: The FCoE controller to reset + */ +static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) +{ + fcoe_ctlr_reset_fcfs(fip); + del_timer(&fip->timer); + fip->ctlr_ka_time = 0; + fip->port_ka_time = 0; + fip->sol_time = 0; + fip->flogi_oxid = FC_XID_UNKNOWN; + fcoe_ctlr_map_dest(fip); +} + +/** + * fcoe_ctlr_link_down() - Stop a FCoE controller + * @fip: The FCoE controller to be stopped + * + * Returns non-zero if the link was up and now isn't. + * + * Called from the LLD when the network link is not ready. + * There may be multiple calls while the link is down. + */ +int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) +{ + int link_dropped; + + LIBFCOE_FIP_DBG(fip, "link down.\n"); + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_reset(fip); + link_dropped = fip->state != FIP_ST_LINK_WAIT; + fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); + mutex_unlock(&fip->ctlr_mutex); + + if (link_dropped) + fc_linkdown(fip->lp); + return link_dropped; +} +EXPORT_SYMBOL(fcoe_ctlr_link_down); + +/** + * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF + * @fip: The FCoE controller to send the FKA on + * @lport: libfc fc_lport to send from + * @ports: 0 for controller keep-alive, 1 for port keep-alive + * @sa: The source MAC address + * + * A controller keep-alive is sent every fka_period (typically 8 seconds). + * The source MAC is the native MAC address. + * + * A port keep-alive is sent every 90 seconds while logged in. + * The source MAC is the assigned mapped source address. + * The destination is the FCF's F-port. + */ +static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, + struct fc_lport *lport, + int ports, u8 *sa) +{ + struct sk_buff *skb; + struct fip_kal { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + } __packed * kal; + struct fip_vn_desc *vn; + u32 len; + struct fc_lport *lp; + struct fcoe_fcf *fcf; + + fcf = fip->sel_fcf; + lp = fip->lp; + if (!fcf || (ports && !lp->port_id)) + return; + + len = sizeof(*kal) + ports * sizeof(*vn); + skb = dev_alloc_skb(len); + if (!skb) + return; + + kal = (struct fip_kal *)skb->data; + memset(kal, 0, len); + memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + memcpy(kal->eth.h_source, sa, ETH_ALEN); + kal->eth.h_proto = htons(ETH_P_FIP); + + kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + kal->fip.fip_op = htons(FIP_OP_CTRL); + kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; + kal->fip.fip_dl_len = htons((sizeof(kal->mac) + + ports * sizeof(*vn)) / FIP_BPW); + kal->fip.fip_flags = htons(FIP_FL_FPMA); + if (fip->spma) + kal->fip.fip_flags |= htons(FIP_FL_SPMA); + + kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; + kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; + memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + if (ports) { + vn = (struct fip_vn_desc *)(kal + 1); + vn->fd_desc.fip_dtype = FIP_DT_VN_ID; + vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; + memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); + hton24(vn->fd_fc_id, lport->port_id); + put_unaligned_be64(lport->wwpn, &vn->fd_wwpn); + } + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it + * @fip: The FCoE controller for the ELS frame + * @dtype: The FIP descriptor type for the frame + * @skb: The FCoE ELS frame including FC header but no FCoE headers + * @d_id: The destination port ID. + * + * Returns non-zero error code on failure. + * + * The caller must check that the length is a multiple of 4. + * + * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes). + * Headroom includes the FIP encapsulation description, FIP header, and + * Ethernet header. The tailroom is for the FIP MAC descriptor. + */ +static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, + u8 dtype, struct sk_buff *skb, u32 d_id) +{ + struct fip_encaps_head { + struct ethhdr eth; + struct fip_header fip; + struct fip_encaps encaps; + } __packed * cap; + struct fc_frame_header *fh; + struct fip_mac_desc *mac; + struct fcoe_fcf *fcf; + size_t dlen; + u16 fip_flags; + u8 op; + + fh = (struct fc_frame_header *)skb->data; + op = *(u8 *)(fh + 1); + dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ + cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); + memset(cap, 0, sizeof(*cap)); + + if (lport->point_to_multipoint) { + if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest)) + return -ENODEV; + fip_flags = 0; + } else { + fcf = fip->sel_fcf; + if (!fcf) + return -ENODEV; + fip_flags = fcf->flags; + fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : + FIP_FL_FPMA; + if (!fip_flags) + return -ENODEV; + memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + } + memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + cap->eth.h_proto = htons(ETH_P_FIP); + + cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + cap->fip.fip_op = htons(FIP_OP_LS); + if (op == ELS_LS_ACC || op == ELS_LS_RJT) + cap->fip.fip_subcode = FIP_SC_REP; + else + cap->fip.fip_subcode = FIP_SC_REQ; + cap->fip.fip_flags = htons(fip_flags); + + cap->encaps.fd_desc.fip_dtype = dtype; + cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; + + if (op != ELS_LS_RJT) { + dlen += sizeof(*mac); + mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac)); + memset(mac, 0, sizeof(*mac)); + mac->fd_desc.fip_dtype = FIP_DT_MAC; + mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; + if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) { + memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); + } else if (fip->mode == FIP_MODE_VN2VN) { + hton24(mac->fd_mac, FIP_VN_FC_MAP); + hton24(mac->fd_mac + 3, fip->port_id); + } else if (fip_flags & FIP_FL_SPMA) { + LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n"); + memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); + } else { + LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n"); + /* FPMA only FLOGI. Must leave the MAC desc zeroed. */ + } + } + cap->fip.fip_dl_len = htons(dlen / FIP_BPW); + + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + return 0; +} + +/** + * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. + * @fip: FCoE controller. + * @lport: libfc fc_lport to send from + * @skb: FCoE ELS frame including FC header but no FCoE headers. + * + * Returns a non-zero error code if the frame should not be sent. + * Returns zero if the caller should send the frame with FCoE encapsulation. + * + * The caller must check that the length is a multiple of 4. + * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). + * The the skb must also be an fc_frame. + * + * This is called from the lower-level driver with spinlocks held, + * so we must not take a mutex here. + */ +int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct sk_buff *skb) +{ + struct fc_frame *fp; + struct fc_frame_header *fh; + u16 old_xid; + u8 op; + u8 mac[ETH_ALEN]; + + fp = container_of(skb, struct fc_frame, skb); + fh = (struct fc_frame_header *)skb->data; + op = *(u8 *)(fh + 1); + + if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) { + old_xid = fip->flogi_oxid; + fip->flogi_oxid = ntohs(fh->fh_ox_id); + if (fip->state == FIP_ST_AUTO) { + if (old_xid == FC_XID_UNKNOWN) + fip->flogi_count = 0; + fip->flogi_count++; + if (fip->flogi_count < 3) + goto drop; + fcoe_ctlr_map_dest(fip); + return 0; + } + if (fip->state == FIP_ST_NON_FIP) + fcoe_ctlr_map_dest(fip); + } + + if (fip->state == FIP_ST_NON_FIP) + return 0; + if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) + goto drop; + switch (op) { + case ELS_FLOGI: + op = FIP_DT_FLOGI; + if (fip->mode == FIP_MODE_VN2VN) + break; + spin_lock_bh(&fip->ctlr_lock); + kfree_skb(fip->flogi_req); + fip->flogi_req = skb; + fip->flogi_req_send = 1; + spin_unlock_bh(&fip->ctlr_lock); + schedule_work(&fip->timer_work); + return -EINPROGRESS; + case ELS_FDISC: + if (ntoh24(fh->fh_s_id)) + return 0; + op = FIP_DT_FDISC; + break; + case ELS_LOGO: + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) + return -EINVAL; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) + return -EINVAL; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + return 0; + } + op = FIP_DT_LOGO; + break; + case ELS_LS_ACC: + /* + * If non-FIP, we may have gotten an SID by accepting an FLOGI + * from a point-to-point connection. Switch to using + * the source mac based on the SID. The destination + * MAC in this case would have been set by receving the + * FLOGI. + */ + if (fip->state == FIP_ST_NON_FIP) { + if (fip->flogi_oxid == FC_XID_UNKNOWN) + return 0; + fip->flogi_oxid = FC_XID_UNKNOWN; + fc_fcoe_set_mac(mac, fh->fh_d_id); + fip->update_mac(lport, mac); + } + /* fall through */ + case ELS_LS_RJT: + op = fr_encaps(fp); + if (op) + break; + return 0; + default: + if (fip->state != FIP_ST_ENABLED && + fip->state != FIP_ST_VNMP_UP) + goto drop; + return 0; + } + LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); + if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) + goto drop; + fip->send(fip, skb); + return -EINPROGRESS; +drop: + kfree_skb(skb); + return -EINVAL; +} +EXPORT_SYMBOL(fcoe_ctlr_els_send); + +/** + * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller + * @fip: The FCoE controller to free FCFs on + * + * Called with lock held and preemption disabled. + * + * An FCF is considered old if we have missed two advertisements. + * That is, there have been no valid advertisement from it for 2.5 + * times its keep-alive period. + * + * In addition, determine the time when an FCF selection can occur. + * + * Also, increment the MissDiscAdvCount when no advertisement is received + * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). + * + * Returns the time in jiffies for the next call. + */ +static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *next; + unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); + unsigned long deadline; + unsigned long sel_time = 0; + struct fcoe_dev_stats *stats; + + stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; + if (fip->sel_fcf == fcf) { + if (time_after(jiffies, deadline)) { + stats->MissDiscAdvCount++; + printk(KERN_INFO "libfcoe: host%d: " + "Missing Discovery Advertisement " + "for fab %16.16llx count %lld\n", + fip->lp->host->host_no, fcf->fabric_name, + stats->MissDiscAdvCount); + } else if (time_after(next_timer, deadline)) + next_timer = deadline; + } + + deadline += fcf->fka_period; + if (time_after_eq(jiffies, deadline)) { + if (fip->sel_fcf == fcf) + fip->sel_fcf = NULL; + list_del(&fcf->list); + WARN_ON(!fip->fcf_count); + fip->fcf_count--; + kfree(fcf); + stats->VLinkFailureCount++; + } else { + if (time_after(next_timer, deadline)) + next_timer = deadline; + if (fcoe_ctlr_mtu_valid(fcf) && + (!sel_time || time_before(sel_time, fcf->time))) + sel_time = fcf->time; + } + } + put_cpu(); + if (sel_time && !fip->sel_fcf && !fip->sel_time) { + sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); + fip->sel_time = sel_time; + } + + return next_timer; +} + +/** + * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry + * @fip: The FCoE controller receiving the advertisement + * @skb: The received FIP advertisement frame + * @fcf: The resulting FCF entry + * + * Returns zero on a valid parsed advertisement, + * otherwise returns non zero value. + */ +static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, + struct sk_buff *skb, struct fcoe_fcf *fcf) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_wwn_desc *wwn; + struct fip_fab_desc *fab; + struct fip_fka_desc *fka; + unsigned long t; + size_t rlen; + size_t dlen; + u32 desc_mask; + + memset(fcf, 0, sizeof(*fcf)); + fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA); + + fiph = (struct fip_header *)skb->data; + fcf->flags = ntohs(fiph->fip_flags); + + /* + * mask of required descriptors. validating each one clears its bit. + */ + desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA); + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + /* Drop Adv if there are duplicate critical descriptors */ + if ((desc->fip_dtype < 32) && + !(desc_mask & 1U << desc->fip_dtype)) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP adv\n"); + return -EINVAL; + } + switch (desc->fip_dtype) { + case FIP_DT_PRI: + if (dlen != sizeof(struct fip_pri_desc)) + goto len_err; + fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri; + desc_mask &= ~BIT(FIP_DT_PRI); + break; + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + memcpy(fcf->fcf_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); + if (!is_valid_ether_addr(fcf->fcf_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP adv\n", + fcf->fcf_mac); + return -EINVAL; + } + desc_mask &= ~BIT(FIP_DT_MAC); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn); + desc_mask &= ~BIT(FIP_DT_NAME); + break; + case FIP_DT_FAB: + if (dlen != sizeof(struct fip_fab_desc)) + goto len_err; + fab = (struct fip_fab_desc *)desc; + fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn); + fcf->vfid = ntohs(fab->fd_vfid); + fcf->fc_map = ntoh24(fab->fd_map); + desc_mask &= ~BIT(FIP_DT_FAB); + break; + case FIP_DT_FKA: + if (dlen != sizeof(struct fip_fka_desc)) + goto len_err; + fka = (struct fip_fka_desc *)desc; + if (fka->fd_flags & FIP_FKA_ADV_D) + fcf->fd_flags = 1; + t = ntohl(fka->fd_fka_period); + if (t >= FCOE_CTLR_MIN_FKA) + fcf->fka_period = msecs_to_jiffies(t); + desc_mask &= ~BIT(FIP_DT_FKA); + break; + case FIP_DT_MAP_OUI: + case FIP_DT_FCOE_SIZE: + case FIP_DT_FLOGI: + case FIP_DT_FDISC: + case FIP_DT_LOGO: + case FIP_DT_ELP: + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP adv\n", desc->fip_dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + if (!fcf->fc_map || (fcf->fc_map & 0x10000)) + return -EINVAL; + if (!fcf->switch_name) + return -EINVAL; + if (desc_mask) { + LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n", + desc_mask); + return -EINVAL; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + desc->fip_dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_recv_adv() - Handle an incoming advertisement + * @fip: The FCoE controller receiving the advertisement + * @skb: The received FIP packet + */ +static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf new; + struct fcoe_fcf *found; + unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); + int first = 0; + int mtu_valid; + + if (fcoe_ctlr_parse_adv(fip, skb, &new)) + return; + + mutex_lock(&fip->ctlr_mutex); + first = list_empty(&fip->fcfs); + found = NULL; + list_for_each_entry(fcf, &fip->fcfs, list) { + if (fcf->switch_name == new.switch_name && + fcf->fabric_name == new.fabric_name && + fcf->fc_map == new.fc_map && + compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { + found = fcf; + break; + } + } + if (!found) { + if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT) + goto out; + + fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC); + if (!fcf) + goto out; + + fip->fcf_count++; + memcpy(fcf, &new, sizeof(new)); + list_add(&fcf->list, &fip->fcfs); + } else { + /* + * Update the FCF's keep-alive descriptor flags. + * Other flag changes from new advertisements are + * ignored after a solicited advertisement is + * received and the FCF is selectable (usable). + */ + fcf->fd_flags = new.fd_flags; + if (!fcoe_ctlr_fcf_usable(fcf)) + fcf->flags = new.flags; + + if (fcf == fip->sel_fcf && !fcf->fd_flags) { + fip->ctlr_ka_time -= fcf->fka_period; + fip->ctlr_ka_time += new.fka_period; + if (time_before(fip->ctlr_ka_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->ctlr_ka_time); + } + fcf->fka_period = new.fka_period; + memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); + } + mtu_valid = fcoe_ctlr_mtu_valid(fcf); + fcf->time = jiffies; + if (!found) + LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", + fcf->fabric_name, fcf->fcf_mac); + + /* + * If this advertisement is not solicited and our max receive size + * hasn't been verified, send a solicited advertisement. + */ + if (!mtu_valid) + fcoe_ctlr_solicit(fip, fcf); + + /* + * If its been a while since we did a solicit, and this is + * the first advertisement we've received, do a multicast + * solicitation to gather as many advertisements as we can + * before selection occurs. + */ + if (first && time_after(jiffies, fip->sol_time + sol_tov)) + fcoe_ctlr_solicit(fip, NULL); + + /* + * Put this FCF at the head of the list for priority among equals. + * This helps in the case of an NPV switch which insists we use + * the FCF that answers multicast solicitations, not the others that + * are sending periodic multicast advertisements. + */ + if (mtu_valid) { + list_del(&fcf->list); + list_add(&fcf->list, &fip->fcfs); + } + + /* + * If this is the first validated FCF, note the time and + * set a timer to trigger selection. + */ + if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) { + fip->sel_time = jiffies + + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + if (!timer_pending(&fip->timer) || + time_before(fip->sel_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->sel_time); + } +out: + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame + * @fip: The FCoE controller which received the packet + * @skb: The received FIP packet + */ +static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame *fp = (struct fc_frame *)skb; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + struct fcoe_dev_stats *stats; + enum fip_desc_type els_dtype = 0; + u8 els_op; + u8 sub; + u8 granted_mac[ETH_ALEN] = { 0 }; + size_t els_len = 0; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 desc_cnt = 0; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + if (sub != FIP_SC_REQ && sub != FIP_SC_REP) + goto drop; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + goto drop; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + desc_cnt++; + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + goto drop; + /* Drop ELS if there are duplicate critical descriptors */ + if (desc->fip_dtype < 32) { + if (desc_mask & 1U << desc->fip_dtype) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP ELS\n"); + goto drop; + } + desc_mask |= (1 << desc->fip_dtype); + } + switch (desc->fip_dtype) { + case FIP_DT_MAC: + if (desc_cnt == 1) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + memcpy(granted_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); + break; + case FIP_DT_FLOGI: + case FIP_DT_FDISC: + case FIP_DT_LOGO: + case FIP_DT_ELP: + if (desc_cnt != 1) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + if (fh) + goto drop; + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + goto len_err; + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP adv\n", desc->fip_dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + goto drop; + if (desc_cnt <= 2) { + LIBFCOE_FIP_DBG(fip, "FIP descriptors " + "received out of order\n"); + goto drop; + } + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + if (!fh) + goto drop; + els_op = *(u8 *)(fh + 1); + + if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && + sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { + if (els_op == ELS_LS_ACC) { + if (!is_valid_ether_addr(granted_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC address %pM in FIP ELS\n", + granted_mac); + goto drop; + } + memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); + + if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) { + fip->flogi_oxid = FC_XID_UNKNOWN; + if (els_dtype == FIP_DT_FLOGI) + fcoe_ctlr_announce(fip); + } + } else if (els_dtype == FIP_DT_FLOGI && + !fcoe_ctlr_flogi_retry(fip)) + goto drop; /* retrying FLOGI so drop reject */ + } + + if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && + (!(1U << FIP_DT_MAC & desc_mask)))) { + LIBFCOE_FIP_DBG(fip, "Missing critical descriptors " + "in FIP ELS\n"); + goto drop; + } + + /* + * Convert skb into an fc_frame containing only the ELS. + */ + skb_pull(skb, (u8 *)fh - skb->data); + skb_trim(skb, els_len); + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_dev(fp) = lport; + fr_encaps(fp) = els_dtype; + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += skb->len / FIP_BPW; + put_cpu(); + + fc_exch_recv(lport, fp); + return; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + desc->fip_dtype, dlen); +drop: + kfree_skb(skb); +} + +/** + * fcoe_ctlr_recv_els() - Handle an incoming link reset frame + * @fip: The FCoE controller that received the frame + * @fh: The received FIP header + * + * There may be multiple VN_Port descriptors. + * The overall length has already been checked. + */ +static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, + struct fip_header *fh) +{ + struct fip_desc *desc; + struct fip_mac_desc *mp; + struct fip_wwn_desc *wp; + struct fip_vn_desc *vp; + size_t rlen; + size_t dlen; + struct fcoe_fcf *fcf = fip->sel_fcf; + struct fc_lport *lport = fip->lp; + struct fc_lport *vn_port = NULL; + u32 desc_mask; + int is_vn_port = 0; + + LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); + + if (!fcf || !lport->port_id) + return; + + /* + * mask of required descriptors. Validating each one clears its bit. + */ + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); + + rlen = ntohs(fh->fip_dl_len) * FIP_BPW; + desc = (struct fip_desc *)(fh + 1); + while (rlen >= sizeof(*desc)) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen > rlen) + return; + /* Drop CVL if there are duplicate critical descriptors */ + if ((desc->fip_dtype < 32) && + !(desc_mask & 1U << desc->fip_dtype)) { + LIBFCOE_FIP_DBG(fip, "Duplicate Critical " + "Descriptors in FIP CVL\n"); + return; + } + switch (desc->fip_dtype) { + case FIP_DT_MAC: + mp = (struct fip_mac_desc *)desc; + if (dlen < sizeof(*mp)) + return; + if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) + return; + desc_mask &= ~BIT(FIP_DT_MAC); + break; + case FIP_DT_NAME: + wp = (struct fip_wwn_desc *)desc; + if (dlen < sizeof(*wp)) + return; + if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) + return; + desc_mask &= ~BIT(FIP_DT_NAME); + break; + case FIP_DT_VN_ID: + vp = (struct fip_vn_desc *)desc; + if (dlen < sizeof(*vp)) + return; + if (compare_ether_addr(vp->fd_mac, + fip->get_src_addr(lport)) == 0 && + get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && + ntoh24(vp->fd_fc_id) == lport->port_id) { + desc_mask &= ~BIT(FIP_DT_VN_ID); + break; + } + /* check if clr_vlink is for NPIV port */ + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vn_port, &lport->vports, list) { + if (compare_ether_addr(vp->fd_mac, + fip->get_src_addr(vn_port)) == 0 && + (get_unaligned_be64(&vp->fd_wwpn) + == vn_port->wwpn) && + (ntoh24(vp->fd_fc_id) == + fc_host_port_id(vn_port->host))) { + desc_mask &= ~BIT(FIP_DT_VN_ID); + is_vn_port = 1; + break; + } + } + mutex_unlock(&lport->lp_mutex); + + break; + default: + /* standard says ignore unknown descriptors >= 128 */ + if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + return; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* + * reset only if all required descriptors were present and valid. + */ + if (desc_mask) { + LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", + desc_mask); + } else { + LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); + + if (is_vn_port) + fc_lport_reset(vn_port); + else { + mutex_lock(&fip->ctlr_mutex); + per_cpu_ptr(lport->dev_stats, + get_cpu())->VLinkFailureCount++; + put_cpu(); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + } + } +} + +/** + * fcoe_ctlr_recv() - Receive a FIP packet + * @fip: The FCoE controller that received the packet + * @skb: The received FIP packet + * + * This may be called from either NET_RX_SOFTIRQ or IRQ. + */ +void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + skb_queue_tail(&fip->fip_recv_list, skb); + schedule_work(&fip->recv_work); +} +EXPORT_SYMBOL(fcoe_ctlr_recv); + +/** + * fcoe_ctlr_recv_handler() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is dropped. + */ +static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + struct ethhdr *eh; + enum fip_state state; + u16 op; + u8 sub; + + if (skb_linearize(skb)) + goto drop; + if (skb->len < sizeof(*fiph)) + goto drop; + eh = eth_hdr(skb); + if (fip->mode == FIP_MODE_VN2VN) { + if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && + compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && + compare_ether_addr(eh->h_dest, fcoe_all_p2p)) + goto drop; + } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && + compare_ether_addr(eh->h_dest, fcoe_all_enode)) + goto drop; + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + mutex_lock(&fip->ctlr_mutex); + state = fip->state; + if (state == FIP_ST_AUTO) { + fip->map_dest = 0; + fcoe_ctlr_set_state(fip, FIP_ST_ENABLED); + state = FIP_ST_ENABLED; + LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); + } + mutex_unlock(&fip->ctlr_mutex); + + if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) + return fcoe_ctlr_vn_recv(fip, skb); + + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && + state != FIP_ST_VNMP_CLAIM) + goto drop; + + if (op == FIP_OP_LS) { + fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ + return 0; + } + + if (state != FIP_ST_ENABLED) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) + fcoe_ctlr_recv_adv(fip, skb); + else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) + fcoe_ctlr_recv_clr_vlink(fip, fiph); + kfree_skb(skb); + return 0; +drop: + kfree_skb(skb); + return -1; +} + +/** + * fcoe_ctlr_select() - Select the best FCF (if possible) + * @fip: The FCoE controller + * + * Returns the selected FCF, or NULL if none are usable. + * + * If there are conflicting advertisements, no FCF can be chosen. + * + * If there is already a selected FCF, this will choose a better one or + * an equivalent one that hasn't already been sent a FLOGI. + * + * Called with lock held. + */ +static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + struct fcoe_fcf *best = fip->sel_fcf; + struct fcoe_fcf *first; + + first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); + + list_for_each_entry(fcf, &fip->fcfs, list) { + LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " + "VFID %d mac %pM map %x val %d " + "sent %u pri %u\n", + fcf->fabric_name, fcf->vfid, fcf->fcf_mac, + fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), + fcf->flogi_sent, fcf->pri); + if (fcf->fabric_name != first->fabric_name || + fcf->vfid != first->vfid || + fcf->fc_map != first->fc_map) { + LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " + "or FC-MAP\n"); + return NULL; + } + if (fcf->flogi_sent) + continue; + if (!fcoe_ctlr_fcf_usable(fcf)) { + LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " + "map %x %svalid %savailable\n", + fcf->fabric_name, fcf->fc_map, + (fcf->flags & FIP_FL_SOL) ? "" : "in", + (fcf->flags & FIP_FL_AVAIL) ? + "" : "un"); + continue; + } + if (!best || fcf->pri < best->pri || best->flogi_sent) + best = fcf; + } + fip->sel_fcf = best; + if (best) { + LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac); + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + fip->ctlr_ka_time = jiffies + best->fka_period; + if (time_before(fip->ctlr_ka_time, fip->timer.expires)) + mod_timer(&fip->timer, fip->ctlr_ka_time); + } + return best; +} + +/** + * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF + * @fip: The FCoE controller + * + * Returns non-zero error if it could not be sent. + * + * Called with ctlr_mutex and ctlr_lock held. + * Caller must verify that fip->sel_fcf is not NULL. + */ +static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) +{ + struct sk_buff *skb; + struct sk_buff *skb_orig; + struct fc_frame_header *fh; + int error; + + skb_orig = fip->flogi_req; + if (!skb_orig) + return -EINVAL; + + /* + * Clone and send the FLOGI request. If clone fails, use original. + */ + skb = skb_clone(skb_orig, GFP_ATOMIC); + if (!skb) { + skb = skb_orig; + fip->flogi_req = NULL; + } + fh = (struct fc_frame_header *)skb->data; + error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb, + ntoh24(fh->fh_d_id)); + if (error) { + kfree_skb(skb); + return error; + } + fip->send(fip, skb); + fip->sel_fcf->flogi_sent = 1; + return 0; +} + +/** + * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible + * @fip: The FCoE controller + * + * Returns non-zero error code if there's no FLOGI request to retry or + * no alternate FCF available. + */ +static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + int error; + + mutex_lock(&fip->ctlr_mutex); + spin_lock_bh(&fip->ctlr_lock); + LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); + fcf = fcoe_ctlr_select(fip); + if (!fcf || fcf->flogi_sent) { + kfree_skb(fip->flogi_req); + fip->flogi_req = NULL; + error = -ENOENT; + } else { + fcoe_ctlr_solicit(fip, NULL); + error = fcoe_ctlr_flogi_send_locked(fip); + } + spin_unlock_bh(&fip->ctlr_lock); + mutex_unlock(&fip->ctlr_mutex); + return error; +} + + +/** + * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. + * @fip: The FCoE controller that timed out + * + * Done here because fcoe_ctlr_els_send() can't get mutex. + * + * Called with ctlr_mutex held. The caller must not hold ctlr_lock. + */ +static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) +{ + struct fcoe_fcf *fcf; + + spin_lock_bh(&fip->ctlr_lock); + fcf = fip->sel_fcf; + if (!fcf || !fip->flogi_req_send) + goto unlock; + + LIBFCOE_FIP_DBG(fip, "sending FLOGI\n"); + + /* + * If this FLOGI is being sent due to a timeout retry + * to the same FCF as before, select a different FCF if possible. + */ + if (fcf->flogi_sent) { + LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n"); + fcf = fcoe_ctlr_select(fip); + if (!fcf || fcf->flogi_sent) { + LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n"); + list_for_each_entry(fcf, &fip->fcfs, list) + fcf->flogi_sent = 0; + fcf = fcoe_ctlr_select(fip); + } + } + if (fcf) { + fcoe_ctlr_flogi_send_locked(fip); + fip->flogi_req_send = 0; + } else /* XXX */ + LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); +unlock: + spin_unlock_bh(&fip->ctlr_lock); +} + +/** + * fcoe_ctlr_timeout() - FIP timeout handler + * @arg: The FCoE controller that timed out + */ +static void fcoe_ctlr_timeout(unsigned long arg) +{ + struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg; + + schedule_work(&fip->timer_work); +} + +/** + * fcoe_ctlr_timer_work() - Worker thread function for timer work + * @work: Handle to a FCoE controller + * + * Ages FCFs. Triggers FCF selection if possible. + * Sends keep-alives and resets. + */ +static void fcoe_ctlr_timer_work(struct work_struct *work) +{ + struct fcoe_ctlr *fip; + struct fc_lport *vport; + u8 *mac; + u8 reset = 0; + u8 send_ctlr_ka = 0; + u8 send_port_ka = 0; + struct fcoe_fcf *sel; + struct fcoe_fcf *fcf; + unsigned long next_timer; + + fip = container_of(work, struct fcoe_ctlr, timer_work); + if (fip->mode == FIP_MODE_VN2VN) + return fcoe_ctlr_vn_timeout(fip); + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_DISABLED) { + mutex_unlock(&fip->ctlr_mutex); + return; + } + + fcf = fip->sel_fcf; + next_timer = fcoe_ctlr_age_fcfs(fip); + + sel = fip->sel_fcf; + if (!sel && fip->sel_time) { + if (time_after_eq(jiffies, fip->sel_time)) { + sel = fcoe_ctlr_select(fip); + fip->sel_time = 0; + } else if (time_after(next_timer, fip->sel_time)) + next_timer = fip->sel_time; + } + + if (sel && fip->flogi_req_send) + fcoe_ctlr_flogi_send(fip); + else if (!sel && fcf) + reset = 1; + + if (sel && !sel->fd_flags) { + if (time_after_eq(jiffies, fip->ctlr_ka_time)) { + fip->ctlr_ka_time = jiffies + sel->fka_period; + send_ctlr_ka = 1; + } + if (time_after(next_timer, fip->ctlr_ka_time)) + next_timer = fip->ctlr_ka_time; + + if (time_after_eq(jiffies, fip->port_ka_time)) { + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + send_port_ka = 1; + } + if (time_after(next_timer, fip->port_ka_time)) + next_timer = fip->port_ka_time; + } + if (!list_empty(&fip->fcfs)) + mod_timer(&fip->timer, next_timer); + mutex_unlock(&fip->ctlr_mutex); + + if (reset) { + fc_lport_reset(fip->lp); + /* restart things with a solicitation */ + fcoe_ctlr_solicit(fip, NULL); + } + + if (send_ctlr_ka) + fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); + + if (send_port_ka) { + mutex_lock(&fip->lp->lp_mutex); + mac = fip->get_src_addr(fip->lp); + fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); + list_for_each_entry(vport, &fip->lp->vports, list) { + mac = fip->get_src_addr(vport); + fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); + } + mutex_unlock(&fip->lp->lp_mutex); + } +} + +/** + * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames + * @recv_work: Handle to a FCoE controller + */ +static void fcoe_ctlr_recv_work(struct work_struct *recv_work) +{ + struct fcoe_ctlr *fip; + struct sk_buff *skb; + + fip = container_of(recv_work, struct fcoe_ctlr, recv_work); + while ((skb = skb_dequeue(&fip->fip_recv_list))) + fcoe_ctlr_recv_handler(fip, skb); +} + +/** + * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response + * @fip: The FCoE controller + * @fp: The FC frame to snoop + * + * Snoop potential response to FLOGI or even incoming FLOGI. + * + * The caller has checked that we are waiting for login as indicated + * by fip->flogi_oxid != FC_XID_UNKNOWN. + * + * The caller is responsible for freeing the frame. + * Fill in the granted_mac address. + * + * Return non-zero if the frame should not be delivered to libfc. + */ +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fc_frame_header *fh; + u8 op; + u8 *sa; + + sa = eth_hdr(&fp->skb)->h_source; + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_ELS) + return 0; + + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && + fip->flogi_oxid == ntohs(fh->fh_ox_id)) { + + mutex_lock(&fip->ctlr_mutex); + if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; + } + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + LIBFCOE_FIP_DBG(fip, + "received FLOGI LS_ACC using non-FIP mode\n"); + + /* + * FLOGI accepted. + * If the src mac addr is FC_OUI-based, then we mark the + * address_mode flag to use FC_OUI-based Ethernet DA. + * Otherwise we use the FCoE gateway addr + */ + if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { + fcoe_ctlr_map_dest(fip); + } else { + memcpy(fip->dest_addr, sa, ETH_ALEN); + fip->map_dest = 0; + } + fip->flogi_oxid = FC_XID_UNKNOWN; + mutex_unlock(&fip->ctlr_mutex); + fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); + } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { + /* + * Save source MAC for point-to-point responses. + */ + mutex_lock(&fip->ctlr_mutex); + if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { + memcpy(fip->dest_addr, sa, ETH_ALEN); + fip->map_dest = 0; + if (fip->state == FIP_ST_AUTO) + LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " + "Setting non-FIP mode\n"); + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + } + mutex_unlock(&fip->ctlr_mutex); + } + return 0; +} +EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); + +/** + * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN + * @mac: The MAC address to convert + * @scheme: The scheme to use when converting + * @port: The port indicator for converting + * + * Returns: u64 fc world wide name + */ +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], + unsigned int scheme, unsigned int port) +{ + u64 wwn; + u64 host_mac; + + /* The MAC is in NO, so flip only the low 48 bits */ + host_mac = ((u64) mac[0] << 40) | + ((u64) mac[1] << 32) | + ((u64) mac[2] << 24) | + ((u64) mac[3] << 16) | + ((u64) mac[4] << 8) | + (u64) mac[5]; + + WARN_ON(host_mac >= (1ULL << 48)); + wwn = host_mac | ((u64) scheme << 60); + switch (scheme) { + case 1: + WARN_ON(port != 0); + break; + case 2: + WARN_ON(port >= 0xfff); + wwn |= (u64) port << 48; + break; + default: + WARN_ON(1); + break; + } + + return wwn; +} +EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); + +/** + * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv + * @rdata: libfc remote port + */ +static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) +{ + return (struct fcoe_rport *)(rdata + 1); +} + +/** + * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply. + * @fip: The FCoE controller + * @sub: sub-opcode for probe request, reply, or advertisement. + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, + enum fip_vn2vn_subcode sub, + const u8 *dest, size_t min_len) +{ + struct sk_buff *skb; + struct fip_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + struct fip_vn_desc vn; + } __packed * frame; + struct fip_fc4_feat *ff; + struct fip_size_desc *size; + u32 fcp_feat; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = 0; + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + dlen = sizeof(struct fip_fc4_feat) + + sizeof(struct fip_size_desc); + len += dlen; + } + dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn); + len = max(len, min_len + sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + frame = (struct fip_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VN2VN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn); + + frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID; + frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW; + hton24(frame->vn.fd_mac, FIP_VN_FC_MAP); + hton24(frame->vn.fd_mac + 3, fip->port_id); + hton24(frame->vn.fd_fc_id, fip->port_id); + put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn); + + /* + * For claims, add FC-4 features. + * TBD: Add interface to get fc-4 types and features from libfc. + */ + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + ff = (struct fip_fc4_feat *)(frame + 1); + ff->fd_desc.fip_dtype = FIP_DT_FC4F; + ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW; + ff->fd_fts = fip->lp->fcts; + + fcp_feat = 0; + if (fip->lp->service_params & FCP_SPPF_INIT_FCN) + fcp_feat |= FCP_FEAT_INIT; + if (fip->lp->service_params & FCP_SPPF_TARG_FCN) + fcp_feat |= FCP_FEAT_TARG; + fcp_feat <<= (FC_TYPE_FCP * 4) % 32; + ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat); + + size = (struct fip_size_desc *)(ff + 1); + size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; + size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW; + size->fd_size = htons(fcoe_ctlr_fcoe_size(fip)); + } + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vn_rport_callback - Event handler for rport events. + * @lport: The lport which is receiving the event + * @rdata: remote port private data + * @event: The event that occured + * + * Locking Note: The rport lock must not be held when calling this function. + */ +static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + + LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n", + rdata->ids.port_id, event); + + mutex_lock(&fip->ctlr_mutex); + switch (event) { + case RPORT_EV_READY: + frport->login_count = 0; + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + frport->login_count++; + if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) { + LIBFCOE_FIP_DBG(fip, + "rport FLOGI limited port_id %6.6x\n", + rdata->ids.port_id); + lport->tt.rport_logoff(rdata); + } + break; + default: + break; + } + mutex_unlock(&fip->ctlr_mutex); +} + +static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { + .event_callback = fcoe_ctlr_vn_rport_callback, +}; + +/** + * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) +{ + mutex_lock(&lport->disc.disc_mutex); + lport->disc.disc_callback = NULL; + mutex_unlock(&lport->disc.disc_mutex); +} + +/** + * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode + * @fip: The FCoE controller + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop(struct fc_lport *lport) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_disc_stop_locked(lport); + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode + * @fip: The FCoE controller + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) +{ + fcoe_ctlr_disc_stop(lport); + lport->tt.rport_flush_queue(); + synchronize_rcu(); +} + +/** + * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) +{ + unsigned long wait; + u32 port_id; + + fcoe_ctlr_disc_stop_locked(fip->lp); + + /* + * Get proposed port ID. + * If this is the first try after link up, use any previous port_id. + * If there was none, use the low bits of the port_name. + * On subsequent tries, get the next random one. + * Don't use reserved IDs, use another non-zero value, just as random. + */ + port_id = fip->port_id; + if (fip->probe_tries) + port_id = prandom32(&fip->rnd_state) & 0xffff; + else if (!port_id) + port_id = fip->lp->wwpn & 0xffff; + if (!port_id || port_id == 0xffff) + port_id = 1; + fip->port_id = port_id; + + if (fip->probe_tries < FIP_VN_RLIM_COUNT) { + fip->probe_tries++; + wait = random32() % FIP_VN_PROBE_WAIT; + } else + wait = FIP_VN_RLIM_INT; + mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START); +} + +/** + * fcoe_ctlr_vn_start() - Start in VN2VN mode + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) +{ + fip->probe_tries = 0; + prandom32_seed(&fip->rnd_state, fip->lp->wwpn); + fcoe_ctlr_vn_restart(fip); +} + +/** + * fcoe_ctlr_vn_parse - parse probe request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @rdata: buffer for resulting parsed VN entry plus fcoe_rport + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fc_rport_priv *rdata) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fip_vn_desc *vn = NULL; + struct fip_size_desc *size = NULL; + struct fcoe_rport *frport; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); + frport = fcoe_ctlr_rport(rdata); + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + case FIP_SC_VN_PROBE_REP: + case FIP_SC_VN_BEACON: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + case FIP_SC_VN_CLAIM_REP: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) | + BIT(FIP_DT_FCOE_SIZE); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + break; + case FIP_DT_VN_ID: + if (dlen != sizeof(struct fip_vn_desc)) + goto len_err; + vn = (struct fip_vn_desc *)desc; + memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); + rdata->ids.port_id = ntoh24(vn->fd_fc_id); + rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); + break; + case FIP_DT_FC4F: + if (dlen != sizeof(struct fip_fc4_feat)) + goto len_err; + break; + case FIP_DT_FCOE_SIZE: + if (dlen != sizeof(struct fip_size_desc)) + goto len_err; + size = (struct fip_size_desc *)desc; + frport->fcoe_len = ntohs(size->fd_size); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_VENDOR_BASE) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) +{ + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0); + fip->sol_time = jiffies; +} + +/** + * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. + * @fip: The FCoE controller + * @rdata: parsed remote port with frport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + + if (rdata->ids.port_id != fip->port_id) + return; + + switch (fip->state) { + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + /* + * Decide whether to reply to the Probe. + * Our selected address is never a "recorded" one, so + * only reply if our WWPN is greater and the + * Probe's REC bit is not set. + * If we don't reply, we will change our address. + */ + if (fip->lp->wwpn > rdata->ids.port_name && + !(frport->flags & FIP_FL_REC_OR_P2P)) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + } + /* fall through */ + case FIP_ST_VNMP_START: + fcoe_ctlr_vn_restart(fip); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. + * @fip: The FCoE controller + * @rdata: parsed remote port with frport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + if (rdata->ids.port_id != fip->port_id) + return; + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + case FIP_ST_VNMP_CLAIM: + fcoe_ctlr_vn_restart(fip); + break; + case FIP_ST_VNMP_UP: + fcoe_ctlr_vn_send_claim(fip); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. + * @fip: The FCoE controller + * @new: newly-parsed remote port with frport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fc_rport_identifiers *ids; + struct fcoe_rport *frport; + u32 port_id; + + port_id = new->ids.port_id; + if (port_id == fip->port_id) + return; + + mutex_lock(&lport->disc.disc_mutex); + rdata = lport->tt.rport_create(lport, port_id); + if (!rdata) { + mutex_unlock(&lport->disc.disc_mutex); + return; + } + + rdata->ops = &fcoe_ctlr_vn_rport_ops; + rdata->disc_id = lport->disc.disc_id; + + ids = &rdata->ids; + if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || + (ids->node_name != -1 && ids->node_name != new->ids.node_name)) + lport->tt.rport_logoff(rdata); + ids->port_name = new->ids.port_name; + ids->node_name = new->ids.node_name; + mutex_unlock(&lport->disc.disc_mutex); + + frport = fcoe_ctlr_rport(rdata); + LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", + port_id, frport->fcoe_len ? "old" : "new"); + *frport = *fcoe_ctlr_rport(new); + frport->time = 0; +} + +/** + * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address + * @fip: The FCoE controller + * @port_id: The port_id of the remote VN_node + * @mac: buffer which will hold the VN_NODE destination MAC address, if found. + * + * Returns non-zero error if no remote port found. + */ +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + int ret = -1; + + rcu_read_lock(); + rdata = lport->tt.rport_lookup(lport, port_id); + if (rdata) { + frport = fcoe_ctlr_rport(rdata); + memcpy(mac, frport->enode_mac, ETH_ALEN); + ret = 0; + } + rcu_read_unlock(); + return ret; +} + +/** + * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification + * @fip: The FCoE controller + * @new: newly-parsed remote port with frport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(new); + + if (frport->flags & FIP_FL_REC_OR_P2P) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + if (new->ids.port_id == fip->port_id) + fcoe_ctlr_vn_restart(fip); + break; + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + if (new->ids.port_id == fip->port_id) { + if (new->ids.port_name > fip->lp->wwpn) { + fcoe_ctlr_vn_restart(fip); + break; + } + fcoe_ctlr_vn_send_claim(fip); + break; + } + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, + min((u32)frport->fcoe_len, + fcoe_ctlr_fcoe_size(fip))); + fcoe_ctlr_vn_add(fip, new); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_claim_resp() - handle received Claim Response + * @fip: The FCoE controller that received the frame + * @new: newly-parsed remote port with frport from the Claim Response + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", + new->ids.port_id, fcoe_ctlr_state(fip->state)); + if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) + fcoe_ctlr_vn_add(fip, new); +} + +/** + * fcoe_ctlr_vn_beacon() - handle received beacon. + * @fip: The FCoE controller that received the frame + * @new: newly-parsed remote port with frport from the Beacon + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + + frport = fcoe_ctlr_rport(new); + if (frport->flags & FIP_FL_REC_OR_P2P) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + mutex_lock(&lport->disc.disc_mutex); + rdata = lport->tt.rport_lookup(lport, new->ids.port_id); + if (rdata) + kref_get(&rdata->kref); + mutex_unlock(&lport->disc.disc_mutex); + if (rdata) { + if (rdata->ids.node_name == new->ids.node_name && + rdata->ids.port_name == new->ids.port_name) { + frport = fcoe_ctlr_rport(rdata); + if (!frport->time && fip->state == FIP_ST_VNMP_UP) + lport->tt.rport_login(rdata); + frport->time = jiffies; + } + kref_put(&rdata->kref, lport->tt.rport_destroy); + return; + } + if (fip->state != FIP_ST_VNMP_UP) + return; + + /* + * Beacon from a new neighbor. + * Send a claim notify if one hasn't been sent recently. + * Don't add the neighbor yet. + */ + LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", + new->ids.port_id); + if (time_after(jiffies, + fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) + fcoe_ctlr_vn_send_claim(fip); +} + +/** + * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + * Called only in state FIP_ST_VNMP_UP. + * Returns the soonest time for next age-out or a time far in the future. + */ +static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + unsigned long next_time; + unsigned long deadline; + + next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + frport = fcoe_ctlr_rport(rdata); + if (!frport->time) + continue; + deadline = frport->time + + msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); + if (time_after_eq(jiffies, deadline)) { + frport->time = 0; + LIBFCOE_FIP_DBG(fip, + "port %16.16llx fc_id %6.6x beacon expired\n", + rdata->ids.port_name, rdata->ids.port_id); + lport->tt.rport_logoff(rdata); + } else if (time_before(deadline, next_time)) + next_time = deadline; + } + mutex_unlock(&lport->disc.disc_mutex); + return next_time; +} + +/** + * fcoe_ctlr_vn_recv() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is dropped. + * Always consumes the frame. + */ +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vn2vn_subcode sub; + struct { + struct fc_rport_priv rdata; + struct fcoe_rport frport; + } buf; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + + rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); + goto drop; + } + + mutex_lock(&fip->ctlr_mutex); + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + fcoe_ctlr_vn_probe_req(fip, &buf.rdata); + break; + case FIP_SC_VN_PROBE_REP: + fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); + break; + case FIP_SC_VN_CLAIM_REP: + fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); + break; + case FIP_SC_VN_BEACON: + fcoe_ctlr_vn_beacon(fip, &buf.rdata); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); + rc = -1; + break; + } + mutex_unlock(&fip->ctlr_mutex); +drop: + kfree_skb(skb); + return rc; +} + +/** + * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. + * @lport: The local port + * @fp: The received frame + * + * This should never be called since we don't see RSCNs or other + * fabric-generated ELSes. + */ +static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_seq_els_data rjt_data; + + rjt_data.reason = ELS_RJT_UNSUP; + rjt_data.explan = ELS_EXPL_NONE; + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/** + * fcoe_ctlr_disc_recv - start discovery for VN2VN mode. + * @fip: The FCoE controller + * + * This sets a flag indicating that remote ports should be created + * and started for the peers we discover. We use the disc_callback + * pointer as that flag. Peers already discovered are created here. + * + * The lport lock is held during this call. The callback must be done + * later, without holding either the lport or discovery locks. + * The fcoe_ctlr lock may also be held during this call. + */ +static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + struct fcoe_ctlr *fip = disc->priv; + + mutex_lock(&disc->disc_mutex); + disc->disc_callback = callback; + disc->disc_id = (disc->disc_id + 2) | 1; + disc->pending = 1; + schedule_work(&fip->timer_work); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state. + * @fip: The FCoE controller + * + * Starts the FLOGI and PLOGI login process to each discovered rport for which + * we've received at least one beacon. + * Performs the discovery complete callback. + */ +static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_disc *disc = &lport->disc; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + void (*callback)(struct fc_lport *, enum fc_disc_event); + + mutex_lock(&disc->disc_mutex); + callback = disc->pending ? disc->disc_callback : NULL; + disc->pending = 0; + list_for_each_entry_rcu(rdata, &disc->rports, peers) { + frport = fcoe_ctlr_rport(rdata); + if (frport->time) + lport->tt.rport_login(rdata); + } + mutex_unlock(&disc->disc_mutex); + if (callback) + callback(lport, DISC_EV_SUCCESS); +} + +/** + * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode. + * @fip: The FCoE controller + */ +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) +{ + unsigned long next_time; + u8 mac[ETH_ALEN]; + u32 new_port_id = 0; + + mutex_lock(&fip->ctlr_mutex); + switch (fip->state) { + case FIP_ST_VNMP_START: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); + break; + case FIP_ST_VNMP_PROBE1: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_PROBE2: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM); + new_port_id = fip->port_id; + hton24(mac, FIP_VN_FC_MAP); + hton24(mac + 3, new_port_id); + fcoe_ctlr_map_dest(fip); + fip->update_mac(fip->lp, mac); + fcoe_ctlr_vn_send_claim(fip); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_CLAIM: + /* + * This may be invoked either by starting discovery so don't + * go to the next state unless it's been long enough. + */ + next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); + if (time_after_eq(jiffies, next_time)) { + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + fip->port_ka_time = next_time; + } + fcoe_ctlr_vn_disc(fip); + break; + case FIP_ST_VNMP_UP: + next_time = fcoe_ctlr_vn_age(fip); + if (time_after_eq(jiffies, fip->port_ka_time)) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_BEACON_INT + + (random32() % FIP_VN_BEACON_FUZZ)); + } + if (time_before(fip->port_ka_time, next_time)) + next_time = fip->port_ka_time; + break; + case FIP_ST_LINK_WAIT: + goto unlock; + default: + WARN(1, "unexpected state %d\n", fip->state); + goto unlock; + } + mod_timer(&fip->timer, next_time); +unlock: + mutex_unlock(&fip->ctlr_mutex); + + /* If port ID is new, notify local port after dropping ctlr_mutex */ + if (new_port_id) + fc_lport_set_local_id(fip->lp, new_port_id); +} + +/** + * fcoe_libfc_config() - Sets up libfc related properties for local port + * @lp: The local port to configure libfc for + * @fip: The FCoE controller in use by the local port + * @tt: The libfc function template + * @init_fcp: If non-zero, the FCP portion of libfc should be initialized + * + * Returns : 0 for success + */ +int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, + const struct libfc_function_template *tt, int init_fcp) +{ + /* Set the function pointers set by the LLDD */ + memcpy(&lport->tt, tt, sizeof(*tt)); + if (init_fcp && fc_fcp_init(lport)) + return -ENOMEM; + fc_exch_init(lport); + fc_elsct_init(lport); + fc_lport_init(lport); + if (fip->mode == FIP_MODE_VN2VN) + lport->rport_priv_size = sizeof(struct fcoe_rport); + fc_rport_init(lport); + if (fip->mode == FIP_MODE_VN2VN) { + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + mutex_init(&lport->disc.disc_mutex); + INIT_LIST_HEAD(&lport->disc.rports); + lport->disc.priv = fip; + } else { + fc_disc_init(lport); + } + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_libfc_config); diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c deleted file mode 100644 index a4757ca2a4ed..000000000000 --- a/drivers/scsi/fcoe/libfcoe.c +++ /dev/null @@ -1,2690 +0,0 @@ -/* - * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. - * Copyright (c) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "libfcoe.h" - -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs"); -MODULE_LICENSE("GPL v2"); - -#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ -#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ - -static void fcoe_ctlr_timeout(unsigned long); -static void fcoe_ctlr_timer_work(struct work_struct *); -static void fcoe_ctlr_recv_work(struct work_struct *); -static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *); - -static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); -static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); -static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); -static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); - -static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; -static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; -static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; -static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; - -unsigned int libfcoe_debug_logging; -module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); - -static const char *fcoe_ctlr_states[] = { - [FIP_ST_DISABLED] = "DISABLED", - [FIP_ST_LINK_WAIT] = "LINK_WAIT", - [FIP_ST_AUTO] = "AUTO", - [FIP_ST_NON_FIP] = "NON_FIP", - [FIP_ST_ENABLED] = "ENABLED", - [FIP_ST_VNMP_START] = "VNMP_START", - [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1", - [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2", - [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM", - [FIP_ST_VNMP_UP] = "VNMP_UP", -}; - -static const char *fcoe_ctlr_state(enum fip_state state) -{ - const char *cp = "unknown"; - - if (state < ARRAY_SIZE(fcoe_ctlr_states)) - cp = fcoe_ctlr_states[state]; - if (!cp) - cp = "unknown"; - return cp; -} - -/** - * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state. - * @fip: The FCoE controller - * @state: The new state - */ -static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state) -{ - if (state == fip->state) - return; - if (fip->lp) - LIBFCOE_FIP_DBG(fip, "state %s -> %s\n", - fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state)); - fip->state = state; -} - -/** - * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid - * @fcf: The FCF to check - * - * Return non-zero if FCF fcoe_size has been validated. - */ -static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) -{ - return (fcf->flags & FIP_FL_SOL) != 0; -} - -/** - * fcoe_ctlr_fcf_usable() - Check if a FCF is usable - * @fcf: The FCF to check - * - * Return non-zero if the FCF is usable. - */ -static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) -{ - u16 flags = FIP_FL_SOL | FIP_FL_AVAIL; - - return (fcf->flags & flags) == flags; -} - -/** - * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses - * @fip: The FCoE controller - */ -static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) -{ - if (fip->mode == FIP_MODE_VN2VN) - hton24(fip->dest_addr, FIP_VN_FC_MAP); - else - hton24(fip->dest_addr, FIP_DEF_FC_MAP); - hton24(fip->dest_addr + 3, 0); - fip->map_dest = 1; -} - -/** - * fcoe_ctlr_init() - Initialize the FCoE Controller instance - * @fip: The FCoE controller to initialize - */ -void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) -{ - fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); - fip->mode = mode; - INIT_LIST_HEAD(&fip->fcfs); - mutex_init(&fip->ctlr_mutex); - spin_lock_init(&fip->ctlr_lock); - fip->flogi_oxid = FC_XID_UNKNOWN; - setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); - INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); - INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work); - skb_queue_head_init(&fip->fip_recv_list); -} -EXPORT_SYMBOL(fcoe_ctlr_init); - -/** - * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller - * @fip: The FCoE controller whose FCFs are to be reset - * - * Called with &fcoe_ctlr lock held. - */ -static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *fcf; - struct fcoe_fcf *next; - - fip->sel_fcf = NULL; - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - list_del(&fcf->list); - kfree(fcf); - } - fip->fcf_count = 0; - fip->sel_time = 0; -} - -/** - * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller - * @fip: The FCoE controller to tear down - * - * This is called by FCoE drivers before freeing the &fcoe_ctlr. - * - * The receive handler will have been deleted before this to guarantee - * that no more recv_work will be scheduled. - * - * The timer routine will simply return once we set FIP_ST_DISABLED. - * This guarantees that no further timeouts or work will be scheduled. - */ -void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) -{ - cancel_work_sync(&fip->recv_work); - skb_queue_purge(&fip->fip_recv_list); - - mutex_lock(&fip->ctlr_mutex); - fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); - fcoe_ctlr_reset_fcfs(fip); - mutex_unlock(&fip->ctlr_mutex); - del_timer_sync(&fip->timer); - cancel_work_sync(&fip->timer_work); -} -EXPORT_SYMBOL(fcoe_ctlr_destroy); - -/** - * fcoe_ctlr_announce() - announce new FCF selection - * @fip: The FCoE controller - * - * Also sets the destination MAC for FCoE and control packets - * - * Called with neither ctlr_mutex nor ctlr_lock held. - */ -static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *sel; - struct fcoe_fcf *fcf; - - mutex_lock(&fip->ctlr_mutex); - spin_lock_bh(&fip->ctlr_lock); - - kfree_skb(fip->flogi_req); - fip->flogi_req = NULL; - list_for_each_entry(fcf, &fip->fcfs, list) - fcf->flogi_sent = 0; - - spin_unlock_bh(&fip->ctlr_lock); - sel = fip->sel_fcf; - - if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) - goto unlock; - if (!is_zero_ether_addr(fip->dest_addr)) { - printk(KERN_NOTICE "libfcoe: host%d: " - "FIP Fibre-Channel Forwarder MAC %pM deselected\n", - fip->lp->host->host_no, fip->dest_addr); - memset(fip->dest_addr, 0, ETH_ALEN); - } - if (sel) { - printk(KERN_INFO "libfcoe: host%d: FIP selected " - "Fibre-Channel Forwarder MAC %pM\n", - fip->lp->host->host_no, sel->fcf_mac); - memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); - fip->map_dest = 0; - } -unlock: - mutex_unlock(&fip->ctlr_mutex); -} - -/** - * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port - * @fip: The FCoE controller to get the maximum FCoE size from - * - * Returns the maximum packet size including the FCoE header and trailer, - * but not including any Ethernet or VLAN headers. - */ -static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) -{ - /* - * Determine the max FCoE frame size allowed, including - * FCoE header and trailer. - * Note: lp->mfs is currently the payload size, not the frame size. - */ - return fip->lp->mfs + sizeof(struct fc_frame_header) + - sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof); -} - -/** - * fcoe_ctlr_solicit() - Send a FIP solicitation - * @fip: The FCoE controller to send the solicitation on - * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) - */ -static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) -{ - struct sk_buff *skb; - struct fip_sol { - struct ethhdr eth; - struct fip_header fip; - struct { - struct fip_mac_desc mac; - struct fip_wwn_desc wwnn; - struct fip_size_desc size; - } __attribute__((packed)) desc; - } __attribute__((packed)) *sol; - u32 fcoe_size; - - skb = dev_alloc_skb(sizeof(*sol)); - if (!skb) - return; - - sol = (struct fip_sol *)skb->data; - - memset(sol, 0, sizeof(*sol)); - memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN); - memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - sol->eth.h_proto = htons(ETH_P_FIP); - - sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - sol->fip.fip_op = htons(FIP_OP_DISC); - sol->fip.fip_subcode = FIP_SC_SOL; - sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW); - sol->fip.fip_flags = htons(FIP_FL_FPMA); - if (fip->spma) - sol->fip.fip_flags |= htons(FIP_FL_SPMA); - - sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; - sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW; - memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - - sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; - sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW; - put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn); - - fcoe_size = fcoe_ctlr_fcoe_size(fip); - sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; - sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW; - sol->desc.size.fd_size = htons(fcoe_size); - - skb_put(skb, sizeof(*sol)); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - fip->send(fip, skb); - - if (!fcf) - fip->sol_time = jiffies; -} - -/** - * fcoe_ctlr_link_up() - Start FCoE controller - * @fip: The FCoE controller to start - * - * Called from the LLD when the network link is ready. - */ -void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) -{ - mutex_lock(&fip->ctlr_mutex); - if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { - mutex_unlock(&fip->ctlr_mutex); - fc_linkup(fip->lp); - } else if (fip->state == FIP_ST_LINK_WAIT) { - fcoe_ctlr_set_state(fip, fip->mode); - switch (fip->mode) { - default: - LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); - /* fall-through */ - case FIP_MODE_AUTO: - LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); - /* fall-through */ - case FIP_MODE_FABRIC: - case FIP_MODE_NON_FIP: - mutex_unlock(&fip->ctlr_mutex); - fc_linkup(fip->lp); - fcoe_ctlr_solicit(fip, NULL); - break; - case FIP_MODE_VN2VN: - fcoe_ctlr_vn_start(fip); - mutex_unlock(&fip->ctlr_mutex); - fc_linkup(fip->lp); - break; - } - } else - mutex_unlock(&fip->ctlr_mutex); -} -EXPORT_SYMBOL(fcoe_ctlr_link_up); - -/** - * fcoe_ctlr_reset() - Reset a FCoE controller - * @fip: The FCoE controller to reset - */ -static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) -{ - fcoe_ctlr_reset_fcfs(fip); - del_timer(&fip->timer); - fip->ctlr_ka_time = 0; - fip->port_ka_time = 0; - fip->sol_time = 0; - fip->flogi_oxid = FC_XID_UNKNOWN; - fcoe_ctlr_map_dest(fip); -} - -/** - * fcoe_ctlr_link_down() - Stop a FCoE controller - * @fip: The FCoE controller to be stopped - * - * Returns non-zero if the link was up and now isn't. - * - * Called from the LLD when the network link is not ready. - * There may be multiple calls while the link is down. - */ -int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) -{ - int link_dropped; - - LIBFCOE_FIP_DBG(fip, "link down.\n"); - mutex_lock(&fip->ctlr_mutex); - fcoe_ctlr_reset(fip); - link_dropped = fip->state != FIP_ST_LINK_WAIT; - fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); - mutex_unlock(&fip->ctlr_mutex); - - if (link_dropped) - fc_linkdown(fip->lp); - return link_dropped; -} -EXPORT_SYMBOL(fcoe_ctlr_link_down); - -/** - * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF - * @fip: The FCoE controller to send the FKA on - * @lport: libfc fc_lport to send from - * @ports: 0 for controller keep-alive, 1 for port keep-alive - * @sa: The source MAC address - * - * A controller keep-alive is sent every fka_period (typically 8 seconds). - * The source MAC is the native MAC address. - * - * A port keep-alive is sent every 90 seconds while logged in. - * The source MAC is the assigned mapped source address. - * The destination is the FCF's F-port. - */ -static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, - struct fc_lport *lport, - int ports, u8 *sa) -{ - struct sk_buff *skb; - struct fip_kal { - struct ethhdr eth; - struct fip_header fip; - struct fip_mac_desc mac; - } __attribute__((packed)) *kal; - struct fip_vn_desc *vn; - u32 len; - struct fc_lport *lp; - struct fcoe_fcf *fcf; - - fcf = fip->sel_fcf; - lp = fip->lp; - if (!fcf || (ports && !lp->port_id)) - return; - - len = sizeof(*kal) + ports * sizeof(*vn); - skb = dev_alloc_skb(len); - if (!skb) - return; - - kal = (struct fip_kal *)skb->data; - memset(kal, 0, len); - memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN); - memcpy(kal->eth.h_source, sa, ETH_ALEN); - kal->eth.h_proto = htons(ETH_P_FIP); - - kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - kal->fip.fip_op = htons(FIP_OP_CTRL); - kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; - kal->fip.fip_dl_len = htons((sizeof(kal->mac) + - ports * sizeof(*vn)) / FIP_BPW); - kal->fip.fip_flags = htons(FIP_FL_FPMA); - if (fip->spma) - kal->fip.fip_flags |= htons(FIP_FL_SPMA); - - kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; - kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; - memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - if (ports) { - vn = (struct fip_vn_desc *)(kal + 1); - vn->fd_desc.fip_dtype = FIP_DT_VN_ID; - vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; - memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); - hton24(vn->fd_fc_id, lport->port_id); - put_unaligned_be64(lport->wwpn, &vn->fd_wwpn); - } - skb_put(skb, len); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - fip->send(fip, skb); -} - -/** - * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it - * @fip: The FCoE controller for the ELS frame - * @dtype: The FIP descriptor type for the frame - * @skb: The FCoE ELS frame including FC header but no FCoE headers - * @d_id: The destination port ID. - * - * Returns non-zero error code on failure. - * - * The caller must check that the length is a multiple of 4. - * - * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes). - * Headroom includes the FIP encapsulation description, FIP header, and - * Ethernet header. The tailroom is for the FIP MAC descriptor. - */ -static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, - u8 dtype, struct sk_buff *skb, u32 d_id) -{ - struct fip_encaps_head { - struct ethhdr eth; - struct fip_header fip; - struct fip_encaps encaps; - } __attribute__((packed)) *cap; - struct fc_frame_header *fh; - struct fip_mac_desc *mac; - struct fcoe_fcf *fcf; - size_t dlen; - u16 fip_flags; - u8 op; - - fh = (struct fc_frame_header *)skb->data; - op = *(u8 *)(fh + 1); - dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ - cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); - memset(cap, 0, sizeof(*cap)); - - if (lport->point_to_multipoint) { - if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest)) - return -ENODEV; - fip_flags = 0; - } else { - fcf = fip->sel_fcf; - if (!fcf) - return -ENODEV; - fip_flags = fcf->flags; - fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : - FIP_FL_FPMA; - if (!fip_flags) - return -ENODEV; - memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); - } - memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - cap->eth.h_proto = htons(ETH_P_FIP); - - cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - cap->fip.fip_op = htons(FIP_OP_LS); - if (op == ELS_LS_ACC || op == ELS_LS_RJT) - cap->fip.fip_subcode = FIP_SC_REP; - else - cap->fip.fip_subcode = FIP_SC_REQ; - cap->fip.fip_flags = htons(fip_flags); - - cap->encaps.fd_desc.fip_dtype = dtype; - cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; - - if (op != ELS_LS_RJT) { - dlen += sizeof(*mac); - mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac)); - memset(mac, 0, sizeof(*mac)); - mac->fd_desc.fip_dtype = FIP_DT_MAC; - mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; - if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) { - memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); - } else if (fip->mode == FIP_MODE_VN2VN) { - hton24(mac->fd_mac, FIP_VN_FC_MAP); - hton24(mac->fd_mac + 3, fip->port_id); - } else if (fip_flags & FIP_FL_SPMA) { - LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n"); - memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); - } else { - LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n"); - /* FPMA only FLOGI. Must leave the MAC desc zeroed. */ - } - } - cap->fip.fip_dl_len = htons(dlen / FIP_BPW); - - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - return 0; -} - -/** - * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. - * @fip: FCoE controller. - * @lport: libfc fc_lport to send from - * @skb: FCoE ELS frame including FC header but no FCoE headers. - * - * Returns a non-zero error code if the frame should not be sent. - * Returns zero if the caller should send the frame with FCoE encapsulation. - * - * The caller must check that the length is a multiple of 4. - * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). - * The the skb must also be an fc_frame. - * - * This is called from the lower-level driver with spinlocks held, - * so we must not take a mutex here. - */ -int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, - struct sk_buff *skb) -{ - struct fc_frame *fp; - struct fc_frame_header *fh; - u16 old_xid; - u8 op; - u8 mac[ETH_ALEN]; - - fp = container_of(skb, struct fc_frame, skb); - fh = (struct fc_frame_header *)skb->data; - op = *(u8 *)(fh + 1); - - if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) { - old_xid = fip->flogi_oxid; - fip->flogi_oxid = ntohs(fh->fh_ox_id); - if (fip->state == FIP_ST_AUTO) { - if (old_xid == FC_XID_UNKNOWN) - fip->flogi_count = 0; - fip->flogi_count++; - if (fip->flogi_count < 3) - goto drop; - fcoe_ctlr_map_dest(fip); - return 0; - } - if (fip->state == FIP_ST_NON_FIP) - fcoe_ctlr_map_dest(fip); - } - - if (fip->state == FIP_ST_NON_FIP) - return 0; - if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) - goto drop; - switch (op) { - case ELS_FLOGI: - op = FIP_DT_FLOGI; - if (fip->mode == FIP_MODE_VN2VN) - break; - spin_lock_bh(&fip->ctlr_lock); - kfree_skb(fip->flogi_req); - fip->flogi_req = skb; - fip->flogi_req_send = 1; - spin_unlock_bh(&fip->ctlr_lock); - schedule_work(&fip->timer_work); - return -EINPROGRESS; - case ELS_FDISC: - if (ntoh24(fh->fh_s_id)) - return 0; - op = FIP_DT_FDISC; - break; - case ELS_LOGO: - if (fip->mode == FIP_MODE_VN2VN) { - if (fip->state != FIP_ST_VNMP_UP) - return -EINVAL; - if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) - return -EINVAL; - } else { - if (fip->state != FIP_ST_ENABLED) - return 0; - if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) - return 0; - } - op = FIP_DT_LOGO; - break; - case ELS_LS_ACC: - /* - * If non-FIP, we may have gotten an SID by accepting an FLOGI - * from a point-to-point connection. Switch to using - * the source mac based on the SID. The destination - * MAC in this case would have been set by receving the - * FLOGI. - */ - if (fip->state == FIP_ST_NON_FIP) { - if (fip->flogi_oxid == FC_XID_UNKNOWN) - return 0; - fip->flogi_oxid = FC_XID_UNKNOWN; - fc_fcoe_set_mac(mac, fh->fh_d_id); - fip->update_mac(lport, mac); - } - /* fall through */ - case ELS_LS_RJT: - op = fr_encaps(fp); - if (op) - break; - return 0; - default: - if (fip->state != FIP_ST_ENABLED && - fip->state != FIP_ST_VNMP_UP) - goto drop; - return 0; - } - LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n", - op, ntoh24(fh->fh_d_id)); - if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) - goto drop; - fip->send(fip, skb); - return -EINPROGRESS; -drop: - kfree_skb(skb); - return -EINVAL; -} -EXPORT_SYMBOL(fcoe_ctlr_els_send); - -/** - * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller - * @fip: The FCoE controller to free FCFs on - * - * Called with lock held and preemption disabled. - * - * An FCF is considered old if we have missed two advertisements. - * That is, there have been no valid advertisement from it for 2.5 - * times its keep-alive period. - * - * In addition, determine the time when an FCF selection can occur. - * - * Also, increment the MissDiscAdvCount when no advertisement is received - * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). - * - * Returns the time in jiffies for the next call. - */ -static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *fcf; - struct fcoe_fcf *next; - unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); - unsigned long deadline; - unsigned long sel_time = 0; - struct fcoe_dev_stats *stats; - - stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); - - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; - if (fip->sel_fcf == fcf) { - if (time_after(jiffies, deadline)) { - stats->MissDiscAdvCount++; - printk(KERN_INFO "libfcoe: host%d: " - "Missing Discovery Advertisement " - "for fab %16.16llx count %lld\n", - fip->lp->host->host_no, fcf->fabric_name, - stats->MissDiscAdvCount); - } else if (time_after(next_timer, deadline)) - next_timer = deadline; - } - - deadline += fcf->fka_period; - if (time_after_eq(jiffies, deadline)) { - if (fip->sel_fcf == fcf) - fip->sel_fcf = NULL; - list_del(&fcf->list); - WARN_ON(!fip->fcf_count); - fip->fcf_count--; - kfree(fcf); - stats->VLinkFailureCount++; - } else { - if (time_after(next_timer, deadline)) - next_timer = deadline; - if (fcoe_ctlr_mtu_valid(fcf) && - (!sel_time || time_before(sel_time, fcf->time))) - sel_time = fcf->time; - } - } - put_cpu(); - if (sel_time && !fip->sel_fcf && !fip->sel_time) { - sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); - fip->sel_time = sel_time; - } - - return next_timer; -} - -/** - * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry - * @fip: The FCoE controller receiving the advertisement - * @skb: The received FIP advertisement frame - * @fcf: The resulting FCF entry - * - * Returns zero on a valid parsed advertisement, - * otherwise returns non zero value. - */ -static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, - struct sk_buff *skb, struct fcoe_fcf *fcf) -{ - struct fip_header *fiph; - struct fip_desc *desc = NULL; - struct fip_wwn_desc *wwn; - struct fip_fab_desc *fab; - struct fip_fka_desc *fka; - unsigned long t; - size_t rlen; - size_t dlen; - u32 desc_mask; - - memset(fcf, 0, sizeof(*fcf)); - fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA); - - fiph = (struct fip_header *)skb->data; - fcf->flags = ntohs(fiph->fip_flags); - - /* - * mask of required descriptors. validating each one clears its bit. - */ - desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | - BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA); - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - return -EINVAL; - - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - dlen = desc->fip_dlen * FIP_BPW; - if (dlen < sizeof(*desc) || dlen > rlen) - return -EINVAL; - /* Drop Adv if there are duplicate critical descriptors */ - if ((desc->fip_dtype < 32) && - !(desc_mask & 1U << desc->fip_dtype)) { - LIBFCOE_FIP_DBG(fip, "Duplicate Critical " - "Descriptors in FIP adv\n"); - return -EINVAL; - } - switch (desc->fip_dtype) { - case FIP_DT_PRI: - if (dlen != sizeof(struct fip_pri_desc)) - goto len_err; - fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri; - desc_mask &= ~BIT(FIP_DT_PRI); - break; - case FIP_DT_MAC: - if (dlen != sizeof(struct fip_mac_desc)) - goto len_err; - memcpy(fcf->fcf_mac, - ((struct fip_mac_desc *)desc)->fd_mac, - ETH_ALEN); - if (!is_valid_ether_addr(fcf->fcf_mac)) { - LIBFCOE_FIP_DBG(fip, - "Invalid MAC addr %pM in FIP adv\n", - fcf->fcf_mac); - return -EINVAL; - } - desc_mask &= ~BIT(FIP_DT_MAC); - break; - case FIP_DT_NAME: - if (dlen != sizeof(struct fip_wwn_desc)) - goto len_err; - wwn = (struct fip_wwn_desc *)desc; - fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn); - desc_mask &= ~BIT(FIP_DT_NAME); - break; - case FIP_DT_FAB: - if (dlen != sizeof(struct fip_fab_desc)) - goto len_err; - fab = (struct fip_fab_desc *)desc; - fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn); - fcf->vfid = ntohs(fab->fd_vfid); - fcf->fc_map = ntoh24(fab->fd_map); - desc_mask &= ~BIT(FIP_DT_FAB); - break; - case FIP_DT_FKA: - if (dlen != sizeof(struct fip_fka_desc)) - goto len_err; - fka = (struct fip_fka_desc *)desc; - if (fka->fd_flags & FIP_FKA_ADV_D) - fcf->fd_flags = 1; - t = ntohl(fka->fd_fka_period); - if (t >= FCOE_CTLR_MIN_FKA) - fcf->fka_period = msecs_to_jiffies(t); - desc_mask &= ~BIT(FIP_DT_FKA); - break; - case FIP_DT_MAP_OUI: - case FIP_DT_FCOE_SIZE: - case FIP_DT_FLOGI: - case FIP_DT_FDISC: - case FIP_DT_LOGO: - case FIP_DT_ELP: - default: - LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " - "in FIP adv\n", desc->fip_dtype); - /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) - return -EINVAL; - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; - } - if (!fcf->fc_map || (fcf->fc_map & 0x10000)) - return -EINVAL; - if (!fcf->switch_name) - return -EINVAL; - if (desc_mask) { - LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n", - desc_mask); - return -EINVAL; - } - return 0; - -len_err: - LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", - desc->fip_dtype, dlen); - return -EINVAL; -} - -/** - * fcoe_ctlr_recv_adv() - Handle an incoming advertisement - * @fip: The FCoE controller receiving the advertisement - * @skb: The received FIP packet - */ -static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fcoe_fcf *fcf; - struct fcoe_fcf new; - struct fcoe_fcf *found; - unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); - int first = 0; - int mtu_valid; - - if (fcoe_ctlr_parse_adv(fip, skb, &new)) - return; - - mutex_lock(&fip->ctlr_mutex); - first = list_empty(&fip->fcfs); - found = NULL; - list_for_each_entry(fcf, &fip->fcfs, list) { - if (fcf->switch_name == new.switch_name && - fcf->fabric_name == new.fabric_name && - fcf->fc_map == new.fc_map && - compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { - found = fcf; - break; - } - } - if (!found) { - if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT) - goto out; - - fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC); - if (!fcf) - goto out; - - fip->fcf_count++; - memcpy(fcf, &new, sizeof(new)); - list_add(&fcf->list, &fip->fcfs); - } else { - /* - * Update the FCF's keep-alive descriptor flags. - * Other flag changes from new advertisements are - * ignored after a solicited advertisement is - * received and the FCF is selectable (usable). - */ - fcf->fd_flags = new.fd_flags; - if (!fcoe_ctlr_fcf_usable(fcf)) - fcf->flags = new.flags; - - if (fcf == fip->sel_fcf && !fcf->fd_flags) { - fip->ctlr_ka_time -= fcf->fka_period; - fip->ctlr_ka_time += new.fka_period; - if (time_before(fip->ctlr_ka_time, fip->timer.expires)) - mod_timer(&fip->timer, fip->ctlr_ka_time); - } - fcf->fka_period = new.fka_period; - memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); - } - mtu_valid = fcoe_ctlr_mtu_valid(fcf); - fcf->time = jiffies; - if (!found) - LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", - fcf->fabric_name, fcf->fcf_mac); - - /* - * If this advertisement is not solicited and our max receive size - * hasn't been verified, send a solicited advertisement. - */ - if (!mtu_valid) - fcoe_ctlr_solicit(fip, fcf); - - /* - * If its been a while since we did a solicit, and this is - * the first advertisement we've received, do a multicast - * solicitation to gather as many advertisements as we can - * before selection occurs. - */ - if (first && time_after(jiffies, fip->sol_time + sol_tov)) - fcoe_ctlr_solicit(fip, NULL); - - /* - * Put this FCF at the head of the list for priority among equals. - * This helps in the case of an NPV switch which insists we use - * the FCF that answers multicast solicitations, not the others that - * are sending periodic multicast advertisements. - */ - if (mtu_valid) { - list_del(&fcf->list); - list_add(&fcf->list, &fip->fcfs); - } - - /* - * If this is the first validated FCF, note the time and - * set a timer to trigger selection. - */ - if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) { - fip->sel_time = jiffies + - msecs_to_jiffies(FCOE_CTLR_START_DELAY); - if (!timer_pending(&fip->timer) || - time_before(fip->sel_time, fip->timer.expires)) - mod_timer(&fip->timer, fip->sel_time); - } -out: - mutex_unlock(&fip->ctlr_mutex); -} - -/** - * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame - * @fip: The FCoE controller which received the packet - * @skb: The received FIP packet - */ -static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fc_lport *lport = fip->lp; - struct fip_header *fiph; - struct fc_frame *fp = (struct fc_frame *)skb; - struct fc_frame_header *fh = NULL; - struct fip_desc *desc; - struct fip_encaps *els; - struct fcoe_dev_stats *stats; - enum fip_desc_type els_dtype = 0; - u8 els_op; - u8 sub; - u8 granted_mac[ETH_ALEN] = { 0 }; - size_t els_len = 0; - size_t rlen; - size_t dlen; - u32 desc_mask = 0; - u32 desc_cnt = 0; - - fiph = (struct fip_header *)skb->data; - sub = fiph->fip_subcode; - if (sub != FIP_SC_REQ && sub != FIP_SC_REP) - goto drop; - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - goto drop; - - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - desc_cnt++; - dlen = desc->fip_dlen * FIP_BPW; - if (dlen < sizeof(*desc) || dlen > rlen) - goto drop; - /* Drop ELS if there are duplicate critical descriptors */ - if (desc->fip_dtype < 32) { - if (desc_mask & 1U << desc->fip_dtype) { - LIBFCOE_FIP_DBG(fip, "Duplicate Critical " - "Descriptors in FIP ELS\n"); - goto drop; - } - desc_mask |= (1 << desc->fip_dtype); - } - switch (desc->fip_dtype) { - case FIP_DT_MAC: - if (desc_cnt == 1) { - LIBFCOE_FIP_DBG(fip, "FIP descriptors " - "received out of order\n"); - goto drop; - } - - if (dlen != sizeof(struct fip_mac_desc)) - goto len_err; - memcpy(granted_mac, - ((struct fip_mac_desc *)desc)->fd_mac, - ETH_ALEN); - break; - case FIP_DT_FLOGI: - case FIP_DT_FDISC: - case FIP_DT_LOGO: - case FIP_DT_ELP: - if (desc_cnt != 1) { - LIBFCOE_FIP_DBG(fip, "FIP descriptors " - "received out of order\n"); - goto drop; - } - if (fh) - goto drop; - if (dlen < sizeof(*els) + sizeof(*fh) + 1) - goto len_err; - els_len = dlen - sizeof(*els); - els = (struct fip_encaps *)desc; - fh = (struct fc_frame_header *)(els + 1); - els_dtype = desc->fip_dtype; - break; - default: - LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " - "in FIP adv\n", desc->fip_dtype); - /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) - goto drop; - if (desc_cnt <= 2) { - LIBFCOE_FIP_DBG(fip, "FIP descriptors " - "received out of order\n"); - goto drop; - } - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; - } - - if (!fh) - goto drop; - els_op = *(u8 *)(fh + 1); - - if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && - sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { - if (els_op == ELS_LS_ACC) { - if (!is_valid_ether_addr(granted_mac)) { - LIBFCOE_FIP_DBG(fip, - "Invalid MAC address %pM in FIP ELS\n", - granted_mac); - goto drop; - } - memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); - - if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) { - fip->flogi_oxid = FC_XID_UNKNOWN; - if (els_dtype == FIP_DT_FLOGI) - fcoe_ctlr_announce(fip); - } - } else if (els_dtype == FIP_DT_FLOGI && - !fcoe_ctlr_flogi_retry(fip)) - goto drop; /* retrying FLOGI so drop reject */ - } - - if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && - (!(1U << FIP_DT_MAC & desc_mask)))) { - LIBFCOE_FIP_DBG(fip, "Missing critical descriptors " - "in FIP ELS\n"); - goto drop; - } - - /* - * Convert skb into an fc_frame containing only the ELS. - */ - skb_pull(skb, (u8 *)fh - skb->data); - skb_trim(skb, els_len); - fp = (struct fc_frame *)skb; - fc_frame_init(fp); - fr_sof(fp) = FC_SOF_I3; - fr_eof(fp) = FC_EOF_T; - fr_dev(fp) = lport; - fr_encaps(fp) = els_dtype; - - stats = per_cpu_ptr(lport->dev_stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += skb->len / FIP_BPW; - put_cpu(); - - fc_exch_recv(lport, fp); - return; - -len_err: - LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", - desc->fip_dtype, dlen); -drop: - kfree_skb(skb); -} - -/** - * fcoe_ctlr_recv_els() - Handle an incoming link reset frame - * @fip: The FCoE controller that received the frame - * @fh: The received FIP header - * - * There may be multiple VN_Port descriptors. - * The overall length has already been checked. - */ -static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, - struct fip_header *fh) -{ - struct fip_desc *desc; - struct fip_mac_desc *mp; - struct fip_wwn_desc *wp; - struct fip_vn_desc *vp; - size_t rlen; - size_t dlen; - struct fcoe_fcf *fcf = fip->sel_fcf; - struct fc_lport *lport = fip->lp; - struct fc_lport *vn_port = NULL; - u32 desc_mask; - int is_vn_port = 0; - - LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); - - if (!fcf || !lport->port_id) - return; - - /* - * mask of required descriptors. Validating each one clears its bit. - */ - desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); - - rlen = ntohs(fh->fip_dl_len) * FIP_BPW; - desc = (struct fip_desc *)(fh + 1); - while (rlen >= sizeof(*desc)) { - dlen = desc->fip_dlen * FIP_BPW; - if (dlen > rlen) - return; - /* Drop CVL if there are duplicate critical descriptors */ - if ((desc->fip_dtype < 32) && - !(desc_mask & 1U << desc->fip_dtype)) { - LIBFCOE_FIP_DBG(fip, "Duplicate Critical " - "Descriptors in FIP CVL\n"); - return; - } - switch (desc->fip_dtype) { - case FIP_DT_MAC: - mp = (struct fip_mac_desc *)desc; - if (dlen < sizeof(*mp)) - return; - if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) - return; - desc_mask &= ~BIT(FIP_DT_MAC); - break; - case FIP_DT_NAME: - wp = (struct fip_wwn_desc *)desc; - if (dlen < sizeof(*wp)) - return; - if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) - return; - desc_mask &= ~BIT(FIP_DT_NAME); - break; - case FIP_DT_VN_ID: - vp = (struct fip_vn_desc *)desc; - if (dlen < sizeof(*vp)) - return; - if (compare_ether_addr(vp->fd_mac, - fip->get_src_addr(lport)) == 0 && - get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && - ntoh24(vp->fd_fc_id) == lport->port_id) { - desc_mask &= ~BIT(FIP_DT_VN_ID); - break; - } - /* check if clr_vlink is for NPIV port */ - mutex_lock(&lport->lp_mutex); - list_for_each_entry(vn_port, &lport->vports, list) { - if (compare_ether_addr(vp->fd_mac, - fip->get_src_addr(vn_port)) == 0 && - (get_unaligned_be64(&vp->fd_wwpn) - == vn_port->wwpn) && - (ntoh24(vp->fd_fc_id) == - fc_host_port_id(vn_port->host))) { - desc_mask &= ~BIT(FIP_DT_VN_ID); - is_vn_port = 1; - break; - } - } - mutex_unlock(&lport->lp_mutex); - - break; - default: - /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) - return; - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; - } - - /* - * reset only if all required descriptors were present and valid. - */ - if (desc_mask) { - LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", - desc_mask); - } else { - LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); - - if (is_vn_port) - fc_lport_reset(vn_port); - else { - mutex_lock(&fip->ctlr_mutex); - per_cpu_ptr(lport->dev_stats, - get_cpu())->VLinkFailureCount++; - put_cpu(); - fcoe_ctlr_reset(fip); - mutex_unlock(&fip->ctlr_mutex); - - fc_lport_reset(fip->lp); - fcoe_ctlr_solicit(fip, NULL); - } - } -} - -/** - * fcoe_ctlr_recv() - Receive a FIP packet - * @fip: The FCoE controller that received the packet - * @skb: The received FIP packet - * - * This may be called from either NET_RX_SOFTIRQ or IRQ. - */ -void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - skb_queue_tail(&fip->fip_recv_list, skb); - schedule_work(&fip->recv_work); -} -EXPORT_SYMBOL(fcoe_ctlr_recv); - -/** - * fcoe_ctlr_recv_handler() - Receive a FIP frame - * @fip: The FCoE controller that received the frame - * @skb: The received FIP frame - * - * Returns non-zero if the frame is dropped. - */ -static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fip_header *fiph; - struct ethhdr *eh; - enum fip_state state; - u16 op; - u8 sub; - - if (skb_linearize(skb)) - goto drop; - if (skb->len < sizeof(*fiph)) - goto drop; - eh = eth_hdr(skb); - if (fip->mode == FIP_MODE_VN2VN) { - if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && - compare_ether_addr(eh->h_dest, fcoe_all_p2p)) - goto drop; - } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_enode)) - goto drop; - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; - - if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) - goto drop; - if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) - goto drop; - - mutex_lock(&fip->ctlr_mutex); - state = fip->state; - if (state == FIP_ST_AUTO) { - fip->map_dest = 0; - fcoe_ctlr_set_state(fip, FIP_ST_ENABLED); - state = FIP_ST_ENABLED; - LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); - } - mutex_unlock(&fip->ctlr_mutex); - - if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) - return fcoe_ctlr_vn_recv(fip, skb); - - if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && - state != FIP_ST_VNMP_CLAIM) - goto drop; - - if (op == FIP_OP_LS) { - fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ - return 0; - } - - if (state != FIP_ST_ENABLED) - goto drop; - - if (op == FIP_OP_DISC && sub == FIP_SC_ADV) - fcoe_ctlr_recv_adv(fip, skb); - else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) - fcoe_ctlr_recv_clr_vlink(fip, fiph); - kfree_skb(skb); - return 0; -drop: - kfree_skb(skb); - return -1; -} - -/** - * fcoe_ctlr_select() - Select the best FCF (if possible) - * @fip: The FCoE controller - * - * Returns the selected FCF, or NULL if none are usable. - * - * If there are conflicting advertisements, no FCF can be chosen. - * - * If there is already a selected FCF, this will choose a better one or - * an equivalent one that hasn't already been sent a FLOGI. - * - * Called with lock held. - */ -static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *fcf; - struct fcoe_fcf *best = fip->sel_fcf; - struct fcoe_fcf *first; - - first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); - - list_for_each_entry(fcf, &fip->fcfs, list) { - LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " - "VFID %d mac %pM map %x val %d " - "sent %u pri %u\n", - fcf->fabric_name, fcf->vfid, fcf->fcf_mac, - fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), - fcf->flogi_sent, fcf->pri); - if (fcf->fabric_name != first->fabric_name || - fcf->vfid != first->vfid || - fcf->fc_map != first->fc_map) { - LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " - "or FC-MAP\n"); - return NULL; - } - if (fcf->flogi_sent) - continue; - if (!fcoe_ctlr_fcf_usable(fcf)) { - LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " - "map %x %svalid %savailable\n", - fcf->fabric_name, fcf->fc_map, - (fcf->flags & FIP_FL_SOL) ? "" : "in", - (fcf->flags & FIP_FL_AVAIL) ? - "" : "un"); - continue; - } - if (!best || fcf->pri < best->pri || best->flogi_sent) - best = fcf; - } - fip->sel_fcf = best; - if (best) { - LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac); - fip->port_ka_time = jiffies + - msecs_to_jiffies(FIP_VN_KA_PERIOD); - fip->ctlr_ka_time = jiffies + best->fka_period; - if (time_before(fip->ctlr_ka_time, fip->timer.expires)) - mod_timer(&fip->timer, fip->ctlr_ka_time); - } - return best; -} - -/** - * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF - * @fip: The FCoE controller - * - * Returns non-zero error if it could not be sent. - * - * Called with ctlr_mutex and ctlr_lock held. - * Caller must verify that fip->sel_fcf is not NULL. - */ -static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) -{ - struct sk_buff *skb; - struct sk_buff *skb_orig; - struct fc_frame_header *fh; - int error; - - skb_orig = fip->flogi_req; - if (!skb_orig) - return -EINVAL; - - /* - * Clone and send the FLOGI request. If clone fails, use original. - */ - skb = skb_clone(skb_orig, GFP_ATOMIC); - if (!skb) { - skb = skb_orig; - fip->flogi_req = NULL; - } - fh = (struct fc_frame_header *)skb->data; - error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb, - ntoh24(fh->fh_d_id)); - if (error) { - kfree_skb(skb); - return error; - } - fip->send(fip, skb); - fip->sel_fcf->flogi_sent = 1; - return 0; -} - -/** - * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible - * @fip: The FCoE controller - * - * Returns non-zero error code if there's no FLOGI request to retry or - * no alternate FCF available. - */ -static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *fcf; - int error; - - mutex_lock(&fip->ctlr_mutex); - spin_lock_bh(&fip->ctlr_lock); - LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); - fcf = fcoe_ctlr_select(fip); - if (!fcf || fcf->flogi_sent) { - kfree_skb(fip->flogi_req); - fip->flogi_req = NULL; - error = -ENOENT; - } else { - fcoe_ctlr_solicit(fip, NULL); - error = fcoe_ctlr_flogi_send_locked(fip); - } - spin_unlock_bh(&fip->ctlr_lock); - mutex_unlock(&fip->ctlr_mutex); - return error; -} - - -/** - * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. - * @fip: The FCoE controller that timed out - * - * Done here because fcoe_ctlr_els_send() can't get mutex. - * - * Called with ctlr_mutex held. The caller must not hold ctlr_lock. - */ -static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) -{ - struct fcoe_fcf *fcf; - - spin_lock_bh(&fip->ctlr_lock); - fcf = fip->sel_fcf; - if (!fcf || !fip->flogi_req_send) - goto unlock; - - LIBFCOE_FIP_DBG(fip, "sending FLOGI\n"); - - /* - * If this FLOGI is being sent due to a timeout retry - * to the same FCF as before, select a different FCF if possible. - */ - if (fcf->flogi_sent) { - LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n"); - fcf = fcoe_ctlr_select(fip); - if (!fcf || fcf->flogi_sent) { - LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n"); - list_for_each_entry(fcf, &fip->fcfs, list) - fcf->flogi_sent = 0; - fcf = fcoe_ctlr_select(fip); - } - } - if (fcf) { - fcoe_ctlr_flogi_send_locked(fip); - fip->flogi_req_send = 0; - } else /* XXX */ - LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); -unlock: - spin_unlock_bh(&fip->ctlr_lock); -} - -/** - * fcoe_ctlr_timeout() - FIP timeout handler - * @arg: The FCoE controller that timed out - */ -static void fcoe_ctlr_timeout(unsigned long arg) -{ - struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg; - - schedule_work(&fip->timer_work); -} - -/** - * fcoe_ctlr_timer_work() - Worker thread function for timer work - * @work: Handle to a FCoE controller - * - * Ages FCFs. Triggers FCF selection if possible. - * Sends keep-alives and resets. - */ -static void fcoe_ctlr_timer_work(struct work_struct *work) -{ - struct fcoe_ctlr *fip; - struct fc_lport *vport; - u8 *mac; - u8 reset = 0; - u8 send_ctlr_ka = 0; - u8 send_port_ka = 0; - struct fcoe_fcf *sel; - struct fcoe_fcf *fcf; - unsigned long next_timer; - - fip = container_of(work, struct fcoe_ctlr, timer_work); - if (fip->mode == FIP_MODE_VN2VN) - return fcoe_ctlr_vn_timeout(fip); - mutex_lock(&fip->ctlr_mutex); - if (fip->state == FIP_ST_DISABLED) { - mutex_unlock(&fip->ctlr_mutex); - return; - } - - fcf = fip->sel_fcf; - next_timer = fcoe_ctlr_age_fcfs(fip); - - sel = fip->sel_fcf; - if (!sel && fip->sel_time) { - if (time_after_eq(jiffies, fip->sel_time)) { - sel = fcoe_ctlr_select(fip); - fip->sel_time = 0; - } else if (time_after(next_timer, fip->sel_time)) - next_timer = fip->sel_time; - } - - if (sel && fip->flogi_req_send) - fcoe_ctlr_flogi_send(fip); - else if (!sel && fcf) - reset = 1; - - if (sel && !sel->fd_flags) { - if (time_after_eq(jiffies, fip->ctlr_ka_time)) { - fip->ctlr_ka_time = jiffies + sel->fka_period; - send_ctlr_ka = 1; - } - if (time_after(next_timer, fip->ctlr_ka_time)) - next_timer = fip->ctlr_ka_time; - - if (time_after_eq(jiffies, fip->port_ka_time)) { - fip->port_ka_time = jiffies + - msecs_to_jiffies(FIP_VN_KA_PERIOD); - send_port_ka = 1; - } - if (time_after(next_timer, fip->port_ka_time)) - next_timer = fip->port_ka_time; - } - if (!list_empty(&fip->fcfs)) - mod_timer(&fip->timer, next_timer); - mutex_unlock(&fip->ctlr_mutex); - - if (reset) { - fc_lport_reset(fip->lp); - /* restart things with a solicitation */ - fcoe_ctlr_solicit(fip, NULL); - } - - if (send_ctlr_ka) - fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); - - if (send_port_ka) { - mutex_lock(&fip->lp->lp_mutex); - mac = fip->get_src_addr(fip->lp); - fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); - list_for_each_entry(vport, &fip->lp->vports, list) { - mac = fip->get_src_addr(vport); - fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); - } - mutex_unlock(&fip->lp->lp_mutex); - } -} - -/** - * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames - * @recv_work: Handle to a FCoE controller - */ -static void fcoe_ctlr_recv_work(struct work_struct *recv_work) -{ - struct fcoe_ctlr *fip; - struct sk_buff *skb; - - fip = container_of(recv_work, struct fcoe_ctlr, recv_work); - while ((skb = skb_dequeue(&fip->fip_recv_list))) - fcoe_ctlr_recv_handler(fip, skb); -} - -/** - * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response - * @fip: The FCoE controller - * @fp: The FC frame to snoop - * - * Snoop potential response to FLOGI or even incoming FLOGI. - * - * The caller has checked that we are waiting for login as indicated - * by fip->flogi_oxid != FC_XID_UNKNOWN. - * - * The caller is responsible for freeing the frame. - * Fill in the granted_mac address. - * - * Return non-zero if the frame should not be delivered to libfc. - */ -int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, - struct fc_frame *fp) -{ - struct fc_frame_header *fh; - u8 op; - u8 *sa; - - sa = eth_hdr(&fp->skb)->h_source; - fh = fc_frame_header_get(fp); - if (fh->fh_type != FC_TYPE_ELS) - return 0; - - op = fc_frame_payload_op(fp); - if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && - fip->flogi_oxid == ntohs(fh->fh_ox_id)) { - - mutex_lock(&fip->ctlr_mutex); - if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { - mutex_unlock(&fip->ctlr_mutex); - return -EINVAL; - } - fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); - LIBFCOE_FIP_DBG(fip, - "received FLOGI LS_ACC using non-FIP mode\n"); - - /* - * FLOGI accepted. - * If the src mac addr is FC_OUI-based, then we mark the - * address_mode flag to use FC_OUI-based Ethernet DA. - * Otherwise we use the FCoE gateway addr - */ - if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { - fcoe_ctlr_map_dest(fip); - } else { - memcpy(fip->dest_addr, sa, ETH_ALEN); - fip->map_dest = 0; - } - fip->flogi_oxid = FC_XID_UNKNOWN; - mutex_unlock(&fip->ctlr_mutex); - fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); - } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { - /* - * Save source MAC for point-to-point responses. - */ - mutex_lock(&fip->ctlr_mutex); - if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { - memcpy(fip->dest_addr, sa, ETH_ALEN); - fip->map_dest = 0; - if (fip->state == FIP_ST_AUTO) - LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " - "Setting non-FIP mode\n"); - fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); - } - mutex_unlock(&fip->ctlr_mutex); - } - return 0; -} -EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); - -/** - * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN - * @mac: The MAC address to convert - * @scheme: The scheme to use when converting - * @port: The port indicator for converting - * - * Returns: u64 fc world wide name - */ -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], - unsigned int scheme, unsigned int port) -{ - u64 wwn; - u64 host_mac; - - /* The MAC is in NO, so flip only the low 48 bits */ - host_mac = ((u64) mac[0] << 40) | - ((u64) mac[1] << 32) | - ((u64) mac[2] << 24) | - ((u64) mac[3] << 16) | - ((u64) mac[4] << 8) | - (u64) mac[5]; - - WARN_ON(host_mac >= (1ULL << 48)); - wwn = host_mac | ((u64) scheme << 60); - switch (scheme) { - case 1: - WARN_ON(port != 0); - break; - case 2: - WARN_ON(port >= 0xfff); - wwn |= (u64) port << 48; - break; - default: - WARN_ON(1); - break; - } - - return wwn; -} -EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); - -/** - * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv - * @rdata: libfc remote port - */ -static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) -{ - return (struct fcoe_rport *)(rdata + 1); -} - -/** - * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply. - * @fip: The FCoE controller - * @sub: sub-opcode for probe request, reply, or advertisement. - * @dest: The destination Ethernet MAC address - * @min_len: minimum size of the Ethernet payload to be sent - */ -static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, - enum fip_vn2vn_subcode sub, - const u8 *dest, size_t min_len) -{ - struct sk_buff *skb; - struct fip_frame { - struct ethhdr eth; - struct fip_header fip; - struct fip_mac_desc mac; - struct fip_wwn_desc wwnn; - struct fip_vn_desc vn; - } __attribute__((packed)) *frame; - struct fip_fc4_feat *ff; - struct fip_size_desc *size; - u32 fcp_feat; - size_t len; - size_t dlen; - - len = sizeof(*frame); - dlen = 0; - if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { - dlen = sizeof(struct fip_fc4_feat) + - sizeof(struct fip_size_desc); - len += dlen; - } - dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn); - len = max(len, min_len + sizeof(struct ethhdr)); - - skb = dev_alloc_skb(len); - if (!skb) - return; - - frame = (struct fip_frame *)skb->data; - memset(frame, 0, len); - memcpy(frame->eth.h_dest, dest, ETH_ALEN); - memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - frame->eth.h_proto = htons(ETH_P_FIP); - - frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - frame->fip.fip_op = htons(FIP_OP_VN2VN); - frame->fip.fip_subcode = sub; - frame->fip.fip_dl_len = htons(dlen / FIP_BPW); - - frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; - frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; - memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - - frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME; - frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW; - put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn); - - frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID; - frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW; - hton24(frame->vn.fd_mac, FIP_VN_FC_MAP); - hton24(frame->vn.fd_mac + 3, fip->port_id); - hton24(frame->vn.fd_fc_id, fip->port_id); - put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn); - - /* - * For claims, add FC-4 features. - * TBD: Add interface to get fc-4 types and features from libfc. - */ - if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { - ff = (struct fip_fc4_feat *)(frame + 1); - ff->fd_desc.fip_dtype = FIP_DT_FC4F; - ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW; - ff->fd_fts = fip->lp->fcts; - - fcp_feat = 0; - if (fip->lp->service_params & FCP_SPPF_INIT_FCN) - fcp_feat |= FCP_FEAT_INIT; - if (fip->lp->service_params & FCP_SPPF_TARG_FCN) - fcp_feat |= FCP_FEAT_TARG; - fcp_feat <<= (FC_TYPE_FCP * 4) % 32; - ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat); - - size = (struct fip_size_desc *)(ff + 1); - size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; - size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW; - size->fd_size = htons(fcoe_ctlr_fcoe_size(fip)); - } - - skb_put(skb, len); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - - fip->send(fip, skb); -} - -/** - * fcoe_ctlr_vn_rport_callback - Event handler for rport events. - * @lport: The lport which is receiving the event - * @rdata: remote port private data - * @event: The event that occured - * - * Locking Note: The rport lock must not be held when calling this function. - */ -static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, - struct fc_rport_priv *rdata, - enum fc_rport_event event) -{ - struct fcoe_ctlr *fip = lport->disc.priv; - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); - - LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n", - rdata->ids.port_id, event); - - mutex_lock(&fip->ctlr_mutex); - switch (event) { - case RPORT_EV_READY: - frport->login_count = 0; - break; - case RPORT_EV_LOGO: - case RPORT_EV_FAILED: - case RPORT_EV_STOP: - frport->login_count++; - if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) { - LIBFCOE_FIP_DBG(fip, - "rport FLOGI limited port_id %6.6x\n", - rdata->ids.port_id); - lport->tt.rport_logoff(rdata); - } - break; - default: - break; - } - mutex_unlock(&fip->ctlr_mutex); -} - -static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { - .event_callback = fcoe_ctlr_vn_rport_callback, -}; - -/** - * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode - * @fip: The FCoE controller - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) -{ - mutex_lock(&lport->disc.disc_mutex); - lport->disc.disc_callback = NULL; - mutex_unlock(&lport->disc.disc_mutex); -} - -/** - * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode - * @fip: The FCoE controller - * - * Called through the local port template for discovery. - * Called without the ctlr_mutex held. - */ -static void fcoe_ctlr_disc_stop(struct fc_lport *lport) -{ - struct fcoe_ctlr *fip = lport->disc.priv; - - mutex_lock(&fip->ctlr_mutex); - fcoe_ctlr_disc_stop_locked(lport); - mutex_unlock(&fip->ctlr_mutex); -} - -/** - * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode - * @fip: The FCoE controller - * - * Called through the local port template for discovery. - * Called without the ctlr_mutex held. - */ -static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) -{ - fcoe_ctlr_disc_stop(lport); - lport->tt.rport_flush_queue(); - synchronize_rcu(); -} - -/** - * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id - * @fip: The FCoE controller - * - * Called with fcoe_ctlr lock held. - */ -static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) -{ - unsigned long wait; - u32 port_id; - - fcoe_ctlr_disc_stop_locked(fip->lp); - - /* - * Get proposed port ID. - * If this is the first try after link up, use any previous port_id. - * If there was none, use the low bits of the port_name. - * On subsequent tries, get the next random one. - * Don't use reserved IDs, use another non-zero value, just as random. - */ - port_id = fip->port_id; - if (fip->probe_tries) - port_id = prandom32(&fip->rnd_state) & 0xffff; - else if (!port_id) - port_id = fip->lp->wwpn & 0xffff; - if (!port_id || port_id == 0xffff) - port_id = 1; - fip->port_id = port_id; - - if (fip->probe_tries < FIP_VN_RLIM_COUNT) { - fip->probe_tries++; - wait = random32() % FIP_VN_PROBE_WAIT; - } else - wait = FIP_VN_RLIM_INT; - mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); - fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START); -} - -/** - * fcoe_ctlr_vn_start() - Start in VN2VN mode - * @fip: The FCoE controller - * - * Called with fcoe_ctlr lock held. - */ -static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) -{ - fip->probe_tries = 0; - prandom32_seed(&fip->rnd_state, fip->lp->wwpn); - fcoe_ctlr_vn_restart(fip); -} - -/** - * fcoe_ctlr_vn_parse - parse probe request or response - * @fip: The FCoE controller - * @skb: incoming packet - * @rdata: buffer for resulting parsed VN entry plus fcoe_rport - * - * Returns non-zero error number on error. - * Does not consume the packet. - */ -static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, - struct sk_buff *skb, - struct fc_rport_priv *rdata) -{ - struct fip_header *fiph; - struct fip_desc *desc = NULL; - struct fip_mac_desc *macd = NULL; - struct fip_wwn_desc *wwn = NULL; - struct fip_vn_desc *vn = NULL; - struct fip_size_desc *size = NULL; - struct fcoe_rport *frport; - size_t rlen; - size_t dlen; - u32 desc_mask = 0; - u32 dtype; - u8 sub; - - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - - fiph = (struct fip_header *)skb->data; - frport->flags = ntohs(fiph->fip_flags); - - sub = fiph->fip_subcode; - switch (sub) { - case FIP_SC_VN_PROBE_REQ: - case FIP_SC_VN_PROBE_REP: - case FIP_SC_VN_BEACON: - desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | - BIT(FIP_DT_VN_ID); - break; - case FIP_SC_VN_CLAIM_NOTIFY: - case FIP_SC_VN_CLAIM_REP: - desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | - BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) | - BIT(FIP_DT_FCOE_SIZE); - break; - default: - LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); - return -EINVAL; - } - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - return -EINVAL; - - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - dlen = desc->fip_dlen * FIP_BPW; - if (dlen < sizeof(*desc) || dlen > rlen) - return -EINVAL; - - dtype = desc->fip_dtype; - if (dtype < 32) { - if (!(desc_mask & BIT(dtype))) { - LIBFCOE_FIP_DBG(fip, - "unexpected or duplicated desc " - "desc type %u in " - "FIP VN2VN subtype %u\n", - dtype, sub); - return -EINVAL; - } - desc_mask &= ~BIT(dtype); - } - - switch (dtype) { - case FIP_DT_MAC: - if (dlen != sizeof(struct fip_mac_desc)) - goto len_err; - macd = (struct fip_mac_desc *)desc; - if (!is_valid_ether_addr(macd->fd_mac)) { - LIBFCOE_FIP_DBG(fip, - "Invalid MAC addr %pM in FIP VN2VN\n", - macd->fd_mac); - return -EINVAL; - } - memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); - break; - case FIP_DT_NAME: - if (dlen != sizeof(struct fip_wwn_desc)) - goto len_err; - wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); - break; - case FIP_DT_VN_ID: - if (dlen != sizeof(struct fip_vn_desc)) - goto len_err; - vn = (struct fip_vn_desc *)desc; - memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); - rdata->ids.port_id = ntoh24(vn->fd_fc_id); - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); - break; - case FIP_DT_FC4F: - if (dlen != sizeof(struct fip_fc4_feat)) - goto len_err; - break; - case FIP_DT_FCOE_SIZE: - if (dlen != sizeof(struct fip_size_desc)) - goto len_err; - size = (struct fip_size_desc *)desc; - frport->fcoe_len = ntohs(size->fd_size); - break; - default: - LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " - "in FIP probe\n", dtype); - /* standard says ignore unknown descriptors >= 128 */ - if (dtype < FIP_DT_VENDOR_BASE) - return -EINVAL; - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; - } - return 0; - -len_err: - LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", - dtype, dlen); - return -EINVAL; -} - -/** - * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification. - * @fip: The FCoE controller - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) -{ - fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0); - fip->sol_time = jiffies; -} - -/** - * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. - * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) -{ - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); - - if (rdata->ids.port_id != fip->port_id) - return; - - switch (fip->state) { - case FIP_ST_VNMP_CLAIM: - case FIP_ST_VNMP_UP: - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, - frport->enode_mac, 0); - break; - case FIP_ST_VNMP_PROBE1: - case FIP_ST_VNMP_PROBE2: - /* - * Decide whether to reply to the Probe. - * Our selected address is never a "recorded" one, so - * only reply if our WWPN is greater and the - * Probe's REC bit is not set. - * If we don't reply, we will change our address. - */ - if (fip->lp->wwpn > rdata->ids.port_name && - !(frport->flags & FIP_FL_REC_OR_P2P)) { - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, - frport->enode_mac, 0); - break; - } - /* fall through */ - case FIP_ST_VNMP_START: - fcoe_ctlr_vn_restart(fip); - break; - default: - break; - } -} - -/** - * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. - * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) -{ - if (rdata->ids.port_id != fip->port_id) - return; - switch (fip->state) { - case FIP_ST_VNMP_START: - case FIP_ST_VNMP_PROBE1: - case FIP_ST_VNMP_PROBE2: - case FIP_ST_VNMP_CLAIM: - fcoe_ctlr_vn_restart(fip); - break; - case FIP_ST_VNMP_UP: - fcoe_ctlr_vn_send_claim(fip); - break; - default: - break; - } -} - -/** - * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. - * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) -{ - struct fc_lport *lport = fip->lp; - struct fc_rport_priv *rdata; - struct fc_rport_identifiers *ids; - struct fcoe_rport *frport; - u32 port_id; - - port_id = new->ids.port_id; - if (port_id == fip->port_id) - return; - - mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_create(lport, port_id); - if (!rdata) { - mutex_unlock(&lport->disc.disc_mutex); - return; - } - - rdata->ops = &fcoe_ctlr_vn_rport_ops; - rdata->disc_id = lport->disc.disc_id; - - ids = &rdata->ids; - if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || - (ids->node_name != -1 && ids->node_name != new->ids.node_name)) - lport->tt.rport_logoff(rdata); - ids->port_name = new->ids.port_name; - ids->node_name = new->ids.node_name; - mutex_unlock(&lport->disc.disc_mutex); - - frport = fcoe_ctlr_rport(rdata); - LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", - port_id, frport->fcoe_len ? "old" : "new"); - *frport = *fcoe_ctlr_rport(new); - frport->time = 0; -} - -/** - * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address - * @fip: The FCoE controller - * @port_id: The port_id of the remote VN_node - * @mac: buffer which will hold the VN_NODE destination MAC address, if found. - * - * Returns non-zero error if no remote port found. - */ -static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) -{ - struct fc_lport *lport = fip->lp; - struct fc_rport_priv *rdata; - struct fcoe_rport *frport; - int ret = -1; - - rcu_read_lock(); - rdata = lport->tt.rport_lookup(lport, port_id); - if (rdata) { - frport = fcoe_ctlr_rport(rdata); - memcpy(mac, frport->enode_mac, ETH_ALEN); - ret = 0; - } - rcu_read_unlock(); - return ret; -} - -/** - * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification - * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) -{ - struct fcoe_rport *frport = fcoe_ctlr_rport(new); - - if (frport->flags & FIP_FL_REC_OR_P2P) { - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); - return; - } - switch (fip->state) { - case FIP_ST_VNMP_START: - case FIP_ST_VNMP_PROBE1: - case FIP_ST_VNMP_PROBE2: - if (new->ids.port_id == fip->port_id) - fcoe_ctlr_vn_restart(fip); - break; - case FIP_ST_VNMP_CLAIM: - case FIP_ST_VNMP_UP: - if (new->ids.port_id == fip->port_id) { - if (new->ids.port_name > fip->lp->wwpn) { - fcoe_ctlr_vn_restart(fip); - break; - } - fcoe_ctlr_vn_send_claim(fip); - break; - } - fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, - min((u32)frport->fcoe_len, - fcoe_ctlr_fcoe_size(fip))); - fcoe_ctlr_vn_add(fip, new); - break; - default: - break; - } -} - -/** - * fcoe_ctlr_vn_claim_resp() - handle received Claim Response - * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Claim Response - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) -{ - LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", - new->ids.port_id, fcoe_ctlr_state(fip->state)); - if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) - fcoe_ctlr_vn_add(fip, new); -} - -/** - * fcoe_ctlr_vn_beacon() - handle received beacon. - * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Beacon - * - * Called with ctlr_mutex held. - */ -static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) -{ - struct fc_lport *lport = fip->lp; - struct fc_rport_priv *rdata; - struct fcoe_rport *frport; - - frport = fcoe_ctlr_rport(new); - if (frport->flags & FIP_FL_REC_OR_P2P) { - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); - return; - } - mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_lookup(lport, new->ids.port_id); - if (rdata) - kref_get(&rdata->kref); - mutex_unlock(&lport->disc.disc_mutex); - if (rdata) { - if (rdata->ids.node_name == new->ids.node_name && - rdata->ids.port_name == new->ids.port_name) { - frport = fcoe_ctlr_rport(rdata); - if (!frport->time && fip->state == FIP_ST_VNMP_UP) - lport->tt.rport_login(rdata); - frport->time = jiffies; - } - kref_put(&rdata->kref, lport->tt.rport_destroy); - return; - } - if (fip->state != FIP_ST_VNMP_UP) - return; - - /* - * Beacon from a new neighbor. - * Send a claim notify if one hasn't been sent recently. - * Don't add the neighbor yet. - */ - LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", - new->ids.port_id); - if (time_after(jiffies, - fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) - fcoe_ctlr_vn_send_claim(fip); -} - -/** - * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons - * @fip: The FCoE controller - * - * Called with ctlr_mutex held. - * Called only in state FIP_ST_VNMP_UP. - * Returns the soonest time for next age-out or a time far in the future. - */ -static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) -{ - struct fc_lport *lport = fip->lp; - struct fc_rport_priv *rdata; - struct fcoe_rport *frport; - unsigned long next_time; - unsigned long deadline; - - next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); - mutex_lock(&lport->disc.disc_mutex); - list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { - frport = fcoe_ctlr_rport(rdata); - if (!frport->time) - continue; - deadline = frport->time + - msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); - if (time_after_eq(jiffies, deadline)) { - frport->time = 0; - LIBFCOE_FIP_DBG(fip, - "port %16.16llx fc_id %6.6x beacon expired\n", - rdata->ids.port_name, rdata->ids.port_id); - lport->tt.rport_logoff(rdata); - } else if (time_before(deadline, next_time)) - next_time = deadline; - } - mutex_unlock(&lport->disc.disc_mutex); - return next_time; -} - -/** - * fcoe_ctlr_vn_recv() - Receive a FIP frame - * @fip: The FCoE controller that received the frame - * @skb: The received FIP frame - * - * Returns non-zero if the frame is dropped. - * Always consumes the frame. - */ -static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fip_header *fiph; - enum fip_vn2vn_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; - int rc; - - fiph = (struct fip_header *)skb->data; - sub = fiph->fip_subcode; - - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); - if (rc) { - LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); - goto drop; - } - - mutex_lock(&fip->ctlr_mutex); - switch (sub) { - case FIP_SC_VN_PROBE_REQ: - fcoe_ctlr_vn_probe_req(fip, &buf.rdata); - break; - case FIP_SC_VN_PROBE_REP: - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); - break; - case FIP_SC_VN_CLAIM_NOTIFY: - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); - break; - case FIP_SC_VN_CLAIM_REP: - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); - break; - case FIP_SC_VN_BEACON: - fcoe_ctlr_vn_beacon(fip, &buf.rdata); - break; - default: - LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); - rc = -1; - break; - } - mutex_unlock(&fip->ctlr_mutex); -drop: - kfree_skb(skb); - return rc; -} - -/** - * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. - * @lport: The local port - * @fp: The received frame - * - * This should never be called since we don't see RSCNs or other - * fabric-generated ELSes. - */ -static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) -{ - struct fc_seq_els_data rjt_data; - - rjt_data.reason = ELS_RJT_UNSUP; - rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); - fc_frame_free(fp); -} - -/** - * fcoe_ctlr_disc_recv - start discovery for VN2VN mode. - * @fip: The FCoE controller - * - * This sets a flag indicating that remote ports should be created - * and started for the peers we discover. We use the disc_callback - * pointer as that flag. Peers already discovered are created here. - * - * The lport lock is held during this call. The callback must be done - * later, without holding either the lport or discovery locks. - * The fcoe_ctlr lock may also be held during this call. - */ -static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *, - enum fc_disc_event), - struct fc_lport *lport) -{ - struct fc_disc *disc = &lport->disc; - struct fcoe_ctlr *fip = disc->priv; - - mutex_lock(&disc->disc_mutex); - disc->disc_callback = callback; - disc->disc_id = (disc->disc_id + 2) | 1; - disc->pending = 1; - schedule_work(&fip->timer_work); - mutex_unlock(&disc->disc_mutex); -} - -/** - * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state. - * @fip: The FCoE controller - * - * Starts the FLOGI and PLOGI login process to each discovered rport for which - * we've received at least one beacon. - * Performs the discovery complete callback. - */ -static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) -{ - struct fc_lport *lport = fip->lp; - struct fc_disc *disc = &lport->disc; - struct fc_rport_priv *rdata; - struct fcoe_rport *frport; - void (*callback)(struct fc_lport *, enum fc_disc_event); - - mutex_lock(&disc->disc_mutex); - callback = disc->pending ? disc->disc_callback : NULL; - disc->pending = 0; - list_for_each_entry_rcu(rdata, &disc->rports, peers) { - frport = fcoe_ctlr_rport(rdata); - if (frport->time) - lport->tt.rport_login(rdata); - } - mutex_unlock(&disc->disc_mutex); - if (callback) - callback(lport, DISC_EV_SUCCESS); -} - -/** - * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode. - * @fip: The FCoE controller - */ -static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) -{ - unsigned long next_time; - u8 mac[ETH_ALEN]; - u32 new_port_id = 0; - - mutex_lock(&fip->ctlr_mutex); - switch (fip->state) { - case FIP_ST_VNMP_START: - fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); - next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); - break; - case FIP_ST_VNMP_PROBE1: - fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); - fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); - next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); - break; - case FIP_ST_VNMP_PROBE2: - fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM); - new_port_id = fip->port_id; - hton24(mac, FIP_VN_FC_MAP); - hton24(mac + 3, new_port_id); - fcoe_ctlr_map_dest(fip); - fip->update_mac(fip->lp, mac); - fcoe_ctlr_vn_send_claim(fip); - next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); - break; - case FIP_ST_VNMP_CLAIM: - /* - * This may be invoked either by starting discovery so don't - * go to the next state unless it's been long enough. - */ - next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); - if (time_after_eq(jiffies, next_time)) { - fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); - fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, - fcoe_all_vn2vn, 0); - next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); - fip->port_ka_time = next_time; - } - fcoe_ctlr_vn_disc(fip); - break; - case FIP_ST_VNMP_UP: - next_time = fcoe_ctlr_vn_age(fip); - if (time_after_eq(jiffies, fip->port_ka_time)) { - fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, - fcoe_all_vn2vn, 0); - fip->port_ka_time = jiffies + - msecs_to_jiffies(FIP_VN_BEACON_INT + - (random32() % FIP_VN_BEACON_FUZZ)); - } - if (time_before(fip->port_ka_time, next_time)) - next_time = fip->port_ka_time; - break; - case FIP_ST_LINK_WAIT: - goto unlock; - default: - WARN(1, "unexpected state %d\n", fip->state); - goto unlock; - } - mod_timer(&fip->timer, next_time); -unlock: - mutex_unlock(&fip->ctlr_mutex); - - /* If port ID is new, notify local port after dropping ctlr_mutex */ - if (new_port_id) - fc_lport_set_local_id(fip->lp, new_port_id); -} - -/** - * fcoe_libfc_config() - Sets up libfc related properties for local port - * @lp: The local port to configure libfc for - * @fip: The FCoE controller in use by the local port - * @tt: The libfc function template - * @init_fcp: If non-zero, the FCP portion of libfc should be initialized - * - * Returns : 0 for success - */ -int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, - const struct libfc_function_template *tt, int init_fcp) -{ - /* Set the function pointers set by the LLDD */ - memcpy(&lport->tt, tt, sizeof(*tt)); - if (init_fcp && fc_fcp_init(lport)) - return -ENOMEM; - fc_exch_init(lport); - fc_elsct_init(lport); - fc_lport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) - lport->rport_priv_size = sizeof(struct fcoe_rport); - fc_rport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) { - lport->point_to_multipoint = 1; - lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; - lport->tt.disc_start = fcoe_ctlr_disc_start; - lport->tt.disc_stop = fcoe_ctlr_disc_stop; - lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; - mutex_init(&lport->disc.disc_mutex); - INIT_LIST_HEAD(&lport->disc.rports); - lport->disc.priv = fip; - } else { - fc_disc_init(lport); - } - return 0; -} -EXPORT_SYMBOL_GPL(fcoe_libfc_config); -- cgit v1.2.3 From e01efc33bc4a248b1f9bfb972e156c76125fc914 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:05:06 -0800 Subject: [SCSI] libfcoe: include fcoe_transport.c into kernel libfcoe module Now we can include the fcoe_transport.c to the build of the kernel libfcoe module. Move the module information to fcoe_transport, and it will have all the module parameters later for the create/destroy/enable/disable of an FCoE instance. Signed-off-by: Yi Zou Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/Makefile | 2 +- drivers/scsi/fcoe/fcoe_ctlr.c | 8 -------- drivers/scsi/fcoe/fcoe_transport.c | 8 ++++++++ 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile index b122b7abddc6..f6d37d0271f7 100644 --- a/drivers/scsi/fcoe/Makefile +++ b/drivers/scsi/fcoe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_FCOE) += fcoe.o obj-$(CONFIG_LIBFCOE) += libfcoe.o -libfcoe-objs := fcoe_ctlr.o +libfcoe-objs := fcoe_ctlr.o fcoe_transport.o diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index c12d0a7e0d3d..c93f007e702f 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -46,10 +46,6 @@ #include "libfcoe.h" -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs"); -MODULE_LICENSE("GPL v2"); - #define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ #define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ @@ -68,10 +64,6 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; -unsigned int libfcoe_debug_logging; -module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); - static const char * const fcoe_ctlr_states[] = { [FIP_ST_DISABLED] = "DISABLED", [FIP_ST_LINK_WAIT] = "LINK_WAIT", diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 41d69bedd4b4..e5aef5639124 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -27,6 +27,10 @@ #include "libfcoe.h" +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); +MODULE_LICENSE("GPL v2"); + static int fcoe_transport_create(const char *, struct kernel_param *); static int fcoe_transport_destroy(const char *, struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); @@ -39,6 +43,10 @@ static LIST_HEAD(fcoe_transports); static LIST_HEAD(fcoe_netdevs); static DEFINE_MUTEX(ft_mutex); +unsigned int libfcoe_debug_logging; +module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); __MODULE_PARM_TYPE(show, "string"); MODULE_PARM_DESC(show, " Show attached FCoE transports"); -- cgit v1.2.3 From 8ca86f84dd5fc881012b2940ce4eab1fa57680e5 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:05:11 -0800 Subject: [SCSI] fcoe: prepare fcoe for using fcoe transport Prepare the fcoe to convert it to use the newly added fcoe transport, making it as the default fcoe transport provider for libfcoe. This patch is to rename some of the variables to avoid any confusing names later as now there are several transports in the same file. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d1146994a1b5..6d6c8bf5d155 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -145,8 +145,8 @@ static struct notifier_block fcoe_cpu_notifier = { .notifier_call = fcoe_cpu_callback, }; -static struct scsi_transport_template *fcoe_transport_template; -static struct scsi_transport_template *fcoe_vport_transport_template; +static struct scsi_transport_template *fcoe_nport_scsi_transport; +static struct scsi_transport_template *fcoe_vport_scsi_transport; static int fcoe_vport_destroy(struct fc_vport *); static int fcoe_vport_create(struct fc_vport *, bool disabled); @@ -163,7 +163,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { .lport_set_port_id = fcoe_set_port_id, }; -struct fc_function_template fcoe_transport_function = { +struct fc_function_template fcoe_nport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -203,7 +203,7 @@ struct fc_function_template fcoe_transport_function = { .bsg_request = fc_lport_bsg_request, }; -struct fc_function_template fcoe_vport_transport_function = { +struct fc_function_template fcoe_vport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -723,9 +723,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; if (lport->vport) - lport->host->transportt = fcoe_vport_transport_template; + lport->host->transportt = fcoe_vport_scsi_transport; else - lport->host->transportt = fcoe_transport_template; + lport->host->transportt = fcoe_nport_scsi_transport; /* add the new host to the SCSI-ml */ rc = scsi_add_host(lport->host, dev); @@ -1064,11 +1064,12 @@ out: static int __init fcoe_if_init(void) { /* attach to scsi transport */ - fcoe_transport_template = fc_attach_transport(&fcoe_transport_function); - fcoe_vport_transport_template = - fc_attach_transport(&fcoe_vport_transport_function); + fcoe_nport_scsi_transport = + fc_attach_transport(&fcoe_nport_fc_functions); + fcoe_vport_scsi_transport = + fc_attach_transport(&fcoe_vport_fc_functions); - if (!fcoe_transport_template) { + if (!fcoe_nport_scsi_transport) { printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); return -ENODEV; } @@ -1085,10 +1086,10 @@ static int __init fcoe_if_init(void) */ int __exit fcoe_if_exit(void) { - fc_release_transport(fcoe_transport_template); - fc_release_transport(fcoe_vport_transport_template); - fcoe_transport_template = NULL; - fcoe_vport_transport_template = NULL; + fc_release_transport(fcoe_nport_scsi_transport); + fc_release_transport(fcoe_vport_scsi_transport); + fcoe_nport_scsi_transport = NULL; + fcoe_vport_scsi_transport = NULL; return 0; } -- cgit v1.2.3 From 78a582463c1e3a262aeaf2a291e06a93a7b34212 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 28 Jan 2011 16:05:16 -0800 Subject: [SCSI] fcoe: convert fcoe.ko to become an fcoe transport provider driver Remove the existing sysfs entry points of the fcoe.ko module parameters that are used to create/destroy/enable/disable an FCoE instance, rather, use the newly added fcoe transport code to attach itself as an FCoE transport provider when fcoe.ko gets loaded. There is no functionality change on the logic of fcoe interacts with upper libfc and lower netdev. The fcoe transport only acts as thin layer to provide a unified interface for all fcoe transport providers so all FCoE instances on any network interfaces from all vendors can be managed through the same Open-FCoE.org's user space tool package, which also has full DCB support. Signed-off-by: Yi Zou Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 170 +++++++++++++++++------------------------------ 1 file changed, 62 insertions(+), 108 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6d6c8bf5d155..8a1005d117b7 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -101,10 +101,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16); static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); -static int fcoe_create(const char *, struct kernel_param *); -static int fcoe_destroy(const char *, struct kernel_param *); -static int fcoe_enable(const char *, struct kernel_param *); -static int fcoe_disable(const char *, struct kernel_param *); +static bool fcoe_match(struct net_device *netdev); +static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); +static int fcoe_destroy(struct net_device *netdev); +static int fcoe_enable(struct net_device *netdev); +static int fcoe_disable(struct net_device *netdev); static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, @@ -117,24 +118,6 @@ static void fcoe_recv_frame(struct sk_buff *skb); static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); -module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); -__MODULE_PARM_TYPE(create, "string"); -MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); -module_param_call(create_vn2vn, fcoe_create, NULL, - (void *)FIP_MODE_VN2VN, S_IWUSR); -__MODULE_PARM_TYPE(create_vn2vn, "string"); -MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " - "on an Ethernet interface"); -module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(destroy, "string"); -MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); -module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(enable, "string"); -MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); -module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(disable, "string"); -MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); - /* notification function for packets from net device */ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, @@ -1938,40 +1921,17 @@ out: return rc; } -/** - * fcoe_if_to_netdev() - Parse a name buffer to get a net device - * @buffer: The name of the net device - * - * Returns: NULL or a ptr to net_device - */ -static struct net_device *fcoe_if_to_netdev(const char *buffer) -{ - char *cp; - char ifname[IFNAMSIZ + 2]; - - if (buffer) { - strlcpy(ifname, buffer, IFNAMSIZ); - cp = ifname + strlen(ifname); - while (--cp >= ifname && *cp == '\n') - *cp = '\0'; - return dev_get_by_name(&init_net, ifname); - } - return NULL; -} - /** * fcoe_disable() - Disables a FCoE interface - * @buffer: The name of the Ethernet interface to be disabled - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport. * * Returns: 0 for success */ -static int fcoe_disable(const char *buffer, struct kernel_param *kp) +static int fcoe_disable(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -1987,16 +1947,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp) } #endif - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); @@ -2008,7 +1961,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp) } else rc = -ENODEV; - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -2016,17 +1968,15 @@ out_nodev: /** * fcoe_enable() - Enables a FCoE interface - * @buffer: The name of the Ethernet interface to be enabled - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport. * * Returns: 0 for success */ -static int fcoe_enable(const char *buffer, struct kernel_param *kp) +static int fcoe_enable(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -2041,17 +1991,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp) goto out_nodev; } #endif - - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); @@ -2062,7 +2004,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp) else if (!fcoe_link_ok(fcoe->ctlr.lp)) fcoe_ctlr_link_up(&fcoe->ctlr); - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -2070,17 +2011,15 @@ out_nodev: /** * fcoe_destroy() - Destroy a FCoE interface - * @buffer: The name of the Ethernet interface to be destroyed - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport * * Returns: 0 for success */ -static int fcoe_destroy(const char *buffer, struct kernel_param *kp) +static int fcoe_destroy(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -2095,32 +2034,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) goto out_nodev; } #endif - - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); if (!fcoe) { rtnl_unlock(); rc = -ENODEV; - goto out_putdev; + goto out_nodev; } fcoe_interface_cleanup(fcoe); list_del(&fcoe->list); /* RTNL mutex is dropped by fcoe_if_destroy */ fcoe_if_destroy(fcoe->ctlr.lp); - -out_putdev: - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -2142,28 +2070,40 @@ static void fcoe_destroy_work(struct work_struct *work) mutex_unlock(&fcoe_config_mutex); } +/** + * fcoe_match() - Check if the FCoE is supported on the given netdevice + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport. + * + * Returns: always returns true as this is the default FCoE transport, + * i.e., support all netdevs. + */ +static bool fcoe_match(struct net_device *netdev) +{ + return true; +} + /** * fcoe_create() - Create a fcoe interface - * @buffer: The name of the Ethernet interface to create on - * @kp: The associated kernel param + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation * - * Called from sysfs. + * Called from fcoe transport * * Returns: 0 for success */ -static int fcoe_create(const char *buffer, struct kernel_param *kp) +static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) { - enum fip_state fip_mode = (enum fip_state)(long)kp->arg; int rc; struct fcoe_interface *fcoe; struct fc_lport *lport; - struct net_device *netdev; mutex_lock(&fcoe_config_mutex); if (!rtnl_trylock()) { mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } #ifdef CONFIG_FCOE_MODULE @@ -2178,22 +2118,16 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } #endif - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - /* look for existing lport */ if (fcoe_hostlist_lookup(netdev)) { rc = -EEXIST; - goto out_putdev; + goto out_nodev; } fcoe = fcoe_interface_create(netdev, fip_mode); if (IS_ERR(fcoe)) { rc = PTR_ERR(fcoe); - goto out_putdev; + goto out_nodev; } lport = fcoe_if_create(fcoe, &netdev->dev, 0); @@ -2222,15 +2156,12 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) * should be holding a reference taken in fcoe_if_create(). */ fcoe_interface_put(fcoe); - dev_put(netdev); rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return 0; out_free: fcoe_interface_put(fcoe); -out_putdev: - dev_put(netdev); out_nodev: rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); @@ -2433,6 +2364,18 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) return 0; } + +static struct fcoe_transport fcoe_sw_transport = { + .name = {FCOE_TRANSPORT_DEFAULT}, + .attached = false, + .list = LIST_HEAD_INIT(fcoe_sw_transport.list), + .match = fcoe_match, + .create = fcoe_create, + .destroy = fcoe_destroy, + .enable = fcoe_enable, + .disable = fcoe_disable, +}; + /** * fcoe_init() - Initialize fcoe.ko * @@ -2444,6 +2387,14 @@ static int __init fcoe_init(void) unsigned int cpu; int rc = 0; + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&fcoe_sw_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + return rc; + } + mutex_lock(&fcoe_config_mutex); for_each_possible_cpu(cpu) { @@ -2520,6 +2471,9 @@ static void __exit fcoe_exit(void) /* detach from scsi transport * must happen after all destroys are done, therefor after the flush */ fcoe_if_exit(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&fcoe_sw_transport); } module_exit(fcoe_exit); -- cgit v1.2.3 From acc1a921659561c4ea97eca6e3c2633adecbcd81 Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Fri, 28 Jan 2011 16:05:22 -0800 Subject: [SCSI] libfc: Extending lport's roles for target if there is a registered target. Problem: From initaitor machine, when queried role of target (other end of connection), it is "initiator", hence SCSI-ml doesn't send any LUN Inquiry commands. Fix: If there is a registered target for FC_TYPE_FCP, extend lport's params (capability) to be target as well, By default lport params are INITIATOR only. Having this fix, caused initiator to send SCSI LUN inquiry command to target. Signed-off-by: Kiran Patil Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_libfc.c | 19 +++++++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 1 + drivers/scsi/libfc/fc_lport.c | 1 + 3 files changed, 21 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 5e40dab8f919..b7735129f1f3 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -232,6 +232,25 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, } EXPORT_SYMBOL(fc_fill_reply_hdr); +/** + * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport + * if there is service provider (target provider) registered with libfc + * for specified "fc_ft_type" + * @lport: Local port which service_params needs to be modified + * @type: FC-4 type, such as FC_TYPE_FCP + */ +void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type) +{ + struct fc4_prov *prov_entry; + BUG_ON(type >= FC_FC4_PROV_SIZE); + BUG_ON(!lport); + prov_entry = fc_passive_prov[type]; + if (type == FC_TYPE_FCP) { + if (prov_entry && prov_entry->recv) + lport->service_params |= FCP_SPPF_TARG_FCN; + } +} + void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg) { struct fc_lport *lport; diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index 8496f7020b97..fedc819d70c0 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -125,6 +125,7 @@ void fc_destroy_fcp(void); const char *fc_els_resp_type(struct fc_frame *); extern void fc_fc4_add_lport(struct fc_lport *); extern void fc_fc4_del_lport(struct fc_lport *); +extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type); /* * Copies a buffer into an sg list diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e0ef81426c33..735f1f82ccfd 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1597,6 +1597,7 @@ int fc_lport_config(struct fc_lport *lport) fc_lport_add_fc4_type(lport, FC_TYPE_FCP); fc_lport_add_fc4_type(lport, FC_TYPE_CT); + fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); return 0; } -- cgit v1.2.3 From 75a2792df296c77004a72056c76628a1f835bc93 Mon Sep 17 00:00:00 2001 From: Bhanu Prakash Gollapudi Date: Fri, 28 Jan 2011 16:05:27 -0800 Subject: [SCSI] libfc: introduce LLD event callback This patch enables LLD to listen to rport events and perform LLD specific operations based on the rport event. This patch also stores sp_features and spp_type in rdata for further reference by LLD. Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_npiv.c | 1 + drivers/scsi/libfc/fc_rport.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c index dd2b43bb1c70..076cd5ff0e40 100644 --- a/drivers/scsi/libfc/fc_npiv.c +++ b/drivers/scsi/libfc/fc_npiv.c @@ -86,6 +86,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) return lport; } +EXPORT_SYMBOL(fc_vport_id_lookup); /* * When setting the link state of vports during an lport state change, it's diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9ded6123ff6d..59b16bbb66a3 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); - if (port_id != FC_FID_DIR_SERV) + if (port_id != FC_FID_DIR_SERV) { + rdata->lld_event_callback = lport->tt.rport_event_callback; list_add_rcu(&rdata->peers, &lport->disc.rports); + } return rdata; } @@ -302,6 +304,10 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } kref_put(&rdata->kref, lport->tt.rport_destroy); break; @@ -324,6 +330,10 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } cancel_delayed_work_sync(&rdata->retry_work); /* @@ -890,6 +900,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); + /* save plogi response sp_features for further reference */ + rdata->sp_features = ntohs(plp->fl_csp.sp_features); + if (lport->point_to_multipoint) fc_rport_login_complete(rdata, fp); csp_seq = ntohs(plp->fl_csp.sp_tot_seq); @@ -997,6 +1010,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", pp->spp.spp_flags); + rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) fc_rport_error(rdata, fp); @@ -1010,6 +1024,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, fcp_parm = ntohl(pp->spp.spp_params); if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; + if (fcp_parm & FCP_SPPF_CONF_COMPL) + rdata->flags |= FC_RP_FLAGS_CONF_REQ; prov = fc_passive_prov[FC_TYPE_FCP]; if (prov) { @@ -1719,6 +1735,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, spp = &pp->spp; mutex_lock(&fc_prov_mutex); while (len >= plen) { + rdata->spp_type = rspp->spp_type; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; resp = 0; -- cgit v1.2.3 From 2ca32b4848a865fb088e8c00af0dc194701c373a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Jan 2011 16:05:32 -0800 Subject: [SCSI] fcoe: use dedicated workqueue instead of system_wq fcoe uses the system_wq to destroy ports and the work items need to be flushed before the driver is unloaded. As the work items free the containing data structure, they can't be flushed directly. The workqueue should be flushed instead. Also, the destruction works can be chained - ie. destruction of a port may lead to destruction of another port where the work item for the former queues the work for the latter. Currently, the depth of chain can be at most two and fcoe_exit() makes sure everything is complete by calling flush_scheduled_work() twice. With commit c8efcc25 (workqueue: allow chained queueing during destruction), destroy_workqueue() can take care of chained works on workqueue destruction. Add and use fcoe_wq instead. Simply destroying fcoe_wq on driver unload takes care of flushing. Signed-off-by: Tejun Heo Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 8a1005d117b7..46c57e5755ae 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ DEFINE_MUTEX(fcoe_config_mutex); +static struct workqueue_struct *fcoe_wq; + /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ static DECLARE_COMPLETION(fcoe_flush_completion); @@ -1896,7 +1899,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); fcoe_interface_cleanup(fcoe); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); goto out; break; case NETDEV_FEAT_CHANGE: @@ -2387,6 +2390,10 @@ static int __init fcoe_init(void) unsigned int cpu; int rc = 0; + fcoe_wq = alloc_workqueue("fcoe", 0, 0); + if (!fcoe_wq) + return -ENOMEM; + /* register as a fcoe transport */ rc = fcoe_transport_attach(&fcoe_sw_transport); if (rc) { @@ -2425,6 +2432,7 @@ out_free: fcoe_percpu_thread_destroy(cpu); } mutex_unlock(&fcoe_config_mutex); + destroy_workqueue(fcoe_wq); return rc; } module_init(fcoe_init); @@ -2450,7 +2458,7 @@ static void __exit fcoe_exit(void) list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); fcoe_interface_cleanup(fcoe); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); } rtnl_unlock(); @@ -2461,15 +2469,17 @@ static void __exit fcoe_exit(void) mutex_unlock(&fcoe_config_mutex); - /* flush any asyncronous interface destroys, - * this should happen after the netdev notifier is unregistered */ - flush_scheduled_work(); - /* That will flush out all the N_Ports on the hostlist, but now we - * may have NPIV VN_Ports scheduled for destruction */ - flush_scheduled_work(); + /* + * destroy_work's may be chained but destroy_workqueue() + * can take care of them. Just kill the fcoe_wq. + */ + destroy_workqueue(fcoe_wq); - /* detach from scsi transport - * must happen after all destroys are done, therefor after the flush */ + /* + * Detaching from the scsi transport must happen after all + * destroys are done on the fcoe_wq. destroy_workqueue will + * enusre the fcoe_wq is flushed. + */ fcoe_if_exit(); /* detach from fcoe transport */ @@ -2618,7 +2628,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport) mutex_lock(&n_port->lp_mutex); list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); return 0; } -- cgit v1.2.3 From 8597ae8bfe35f5e438b00ba5df852e97ebe1ac23 Mon Sep 17 00:00:00 2001 From: Bhanu Prakash Gollapudi Date: Fri, 28 Jan 2011 16:05:37 -0800 Subject: [SCSI] libfcoe: Move common code from fcoe to libfcoe module To facilitate LLDDs to reuse the code, skb queue related functions are moved to libfcoe, so that both fcoe and bnx2fc drivers can use them. The common structures fcoe_port, fcoe_percpu_s are moved to libfcoe. fcoe_port will now have an opaque pointer that points to corresponding driver's interface structure. Also, fcoe_start_io and fcoe_fc_crc are moved to libfcoe. As part of this change, fixed fcoe_start_io to return ENOMEM if skb_clone fails. Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 217 ++++--------------------------------- drivers/scsi/fcoe/fcoe.h | 44 +------- drivers/scsi/fcoe/fcoe_transport.c | 200 ++++++++++++++++++++++++++++++++++ 3 files changed, 224 insertions(+), 237 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 46c57e5755ae..495456fe4520 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -75,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int fcoe_percpu_receive_thread(void *); -static void fcoe_clean_pending_queue(struct fc_lport *); static void fcoe_percpu_clean(struct fc_lport *); static int fcoe_link_speed_update(struct fc_lport *); static int fcoe_link_ok(struct fc_lport *); @@ -83,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *); static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); -static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); @@ -506,7 +504,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; rtnl_lock(); if (!is_zero_ether_addr(port->data_src_addr)) @@ -561,17 +559,6 @@ static int fcoe_lport_config(struct fc_lport *lport) return 0; } -/** - * fcoe_queue_timer() - The fcoe queue timer - * @lport: The local port - * - * Calls fcoe_check_wait_queue on timeout - */ -static void fcoe_queue_timer(ulong lport) -{ - fcoe_check_wait_queue((struct fc_lport *)lport, NULL); -} - /** * fcoe_get_wwn() - Get the world wide name from LLD if it supports it * @netdev: the associated net device @@ -651,7 +638,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) /* Setup lport private data to point to fcoe softc */ port = lport_priv(lport); - fcoe = port->fcoe; + fcoe = port->priv; /* * Determine max frame size based on underlying device and optional @@ -761,7 +748,7 @@ bool fcoe_oem_match(struct fc_frame *fp) static inline int fcoe_em_config(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct fcoe_interface *oldfcoe = NULL; struct net_device *old_real_dev, *cur_real_dev; u16 min_xid = FCOE_MIN_XID; @@ -845,7 +832,7 @@ skip_oem: static void fcoe_if_destroy(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct net_device *netdev = fcoe->netdev; FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); @@ -966,7 +953,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, } port = lport_priv(lport); port->lport = lport; - port->fcoe = fcoe; + port->priv = fcoe; + port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; + port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; INIT_WORK(&port->destroy_work, fcoe_destroy_work); /* configure a fc_lport including the exchange manager */ @@ -1362,108 +1351,22 @@ err2: } /** - * fcoe_start_io() - Start FCoE I/O - * @skb: The packet to be transmitted - * - * This routine is called from the net device to start transmitting - * FCoE packets. - * - * Returns: 0 for success - */ -static inline int fcoe_start_io(struct sk_buff *skb) -{ - struct sk_buff *nskb; - int rc; - - nskb = skb_clone(skb, GFP_ATOMIC); - rc = dev_queue_xmit(nskb); - if (rc != 0) - return rc; - kfree_skb(skb); - return 0; -} - -/** - * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC * @skb: The packet to be transmitted * @tlen: The total length of the trailer * - * This routine allocates a page for frame trailers. The page is re-used if - * there is enough room left on it for the current trailer. If there isn't - * enough buffer left a new page is allocated for the trailer. Reference to - * the page from this function as well as the skbs using the page fragments - * ensure that the page is freed at the appropriate time. - * * Returns: 0 for success */ -static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) +static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - struct page *page; + int rc; fps = &get_cpu_var(fcoe_percpu); - page = fps->crc_eof_page; - if (!page) { - page = alloc_page(GFP_ATOMIC); - if (!page) { - put_cpu_var(fcoe_percpu); - return -ENOMEM; - } - fps->crc_eof_page = page; - fps->crc_eof_offset = 0; - } - - get_page(page); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, - fps->crc_eof_offset, tlen); - skb->len += tlen; - skb->data_len += tlen; - skb->truesize += tlen; - fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); - - if (fps->crc_eof_offset >= PAGE_SIZE) { - fps->crc_eof_page = NULL; - fps->crc_eof_offset = 0; - put_page(page); - } + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); put_cpu_var(fcoe_percpu); - return 0; -} -/** - * fcoe_fc_crc() - Calculates the CRC for a given frame - * @fp: The frame to be checksumed - * - * This uses crc32() routine to calculate the CRC for a frame - * - * Return: The 32 bit CRC value - */ -u32 fcoe_fc_crc(struct fc_frame *fp) -{ - struct sk_buff *skb = fp_skb(fp); - struct skb_frag_struct *frag; - unsigned char *data; - unsigned long off, len, clen; - u32 crc; - unsigned i; - - crc = crc32(~0, skb->data, skb_headlen(skb)); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - off = frag->page_offset; - len = frag->size; - while (len > 0) { - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), - KM_SKB_DATA_SOFTIRQ); - crc = crc32(crc, data + (off & ~PAGE_MASK), clen); - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); - off += clen; - len -= clen; - } - } - return crc; + return rc; } /** @@ -1486,7 +1389,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) unsigned int tlen; /* trailer length */ unsigned int elen; /* eth header, may include vlan */ struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; u8 sof, eof; struct fcoe_hdr *hp; @@ -1527,7 +1430,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; - if (fcoe_get_paged_crc_eof(skb, tlen)) { + if (fcoe_alloc_paged_crc_eof(skb, tlen)) { kfree_skb(skb); return -ENOMEM; } @@ -1636,7 +1539,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) return 0; - fcoe = ((struct fcoe_port *)lport_priv(lport))->fcoe; + fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); @@ -1770,64 +1673,6 @@ int fcoe_percpu_receive_thread(void *arg) return 0; } -/** - * fcoe_check_wait_queue() - Attempt to clear the transmit backlog - * @lport: The local port whose backlog is to be cleared - * - * This empties the wait_queue, dequeues the head of the wait_queue queue - * and calls fcoe_start_io() for each packet. If all skb have been - * transmitted it returns the qlen. If an error occurs it restores - * wait_queue (to try again later) and returns -1. - * - * The wait_queue is used when the skb transmit fails. The failed skb - * will go in the wait_queue which will be emptied by the timer function or - * by the next skb transmit. - */ -static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) -{ - struct fcoe_port *port = lport_priv(lport); - int rc; - - spin_lock_bh(&port->fcoe_pending_queue.lock); - - if (skb) - __skb_queue_tail(&port->fcoe_pending_queue, skb); - - if (port->fcoe_pending_queue_active) - goto out; - port->fcoe_pending_queue_active = 1; - - while (port->fcoe_pending_queue.qlen) { - /* keep qlen > 0 until fcoe_start_io succeeds */ - port->fcoe_pending_queue.qlen++; - skb = __skb_dequeue(&port->fcoe_pending_queue); - - spin_unlock_bh(&port->fcoe_pending_queue.lock); - rc = fcoe_start_io(skb); - spin_lock_bh(&port->fcoe_pending_queue.lock); - - if (rc) { - __skb_queue_head(&port->fcoe_pending_queue, skb); - /* undo temporary increment above */ - port->fcoe_pending_queue.qlen--; - break; - } - /* undo temporary increment above */ - port->fcoe_pending_queue.qlen--; - } - - if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) - lport->qfull = 0; - if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) - mod_timer(&port->timer, jiffies + 2); - port->fcoe_pending_queue_active = 0; -out: - if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lport->qfull = 1; - spin_unlock_bh(&port->fcoe_pending_queue.lock); - return; -} - /** * fcoe_dev_setup() - Setup the link change notification interface */ @@ -2180,8 +2025,7 @@ out_nodev: */ int fcoe_link_speed_update(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct net_device *netdev = port->fcoe->netdev; + struct net_device *netdev = fcoe_netdev(lport); struct ethtool_cmd ecmd = { ETHTOOL_GSET }; if (!dev_ethtool_get_settings(netdev, &ecmd)) { @@ -2212,8 +2056,7 @@ int fcoe_link_speed_update(struct fc_lport *lport) */ int fcoe_link_ok(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct net_device *netdev = port->fcoe->netdev; + struct net_device *netdev = fcoe_netdev(lport); if (netif_oper_up(netdev)) return 0; @@ -2276,24 +2119,6 @@ void fcoe_percpu_clean(struct fc_lport *lport) } } -/** - * fcoe_clean_pending_queue() - Dequeue a skb and free it - * @lport: The local port to dequeue a skb on - */ -void fcoe_clean_pending_queue(struct fc_lport *lport) -{ - struct fcoe_port *port = lport_priv(lport); - struct sk_buff *skb; - - spin_lock_bh(&port->fcoe_pending_queue.lock); - while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&port->fcoe_pending_queue.lock); - kfree_skb(skb); - spin_lock_bh(&port->fcoe_pending_queue.lock); - } - spin_unlock_bh(&port->fcoe_pending_queue.lock); -} - /** * fcoe_reset() - Reset a local port * @shost: The SCSI host associated with the local port to be reset @@ -2361,7 +2186,7 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); if (!fcoe) { port = lport_priv(lport); - fcoe = port->fcoe; + fcoe = port->priv; list_add_tail(&fcoe->list, &fcoe_hostlist); } return 0; @@ -2555,7 +2380,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, void *arg, u32 timeout) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct fcoe_ctlr *fip = &fcoe->ctlr; struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -2588,7 +2413,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled) struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fcoe_port *port = lport_priv(n_port); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct net_device *netdev = fcoe->netdev; struct fc_lport *vn_port; @@ -2732,7 +2557,7 @@ static void fcoe_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index c69b2c56c2d1..d775128398e9 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -24,7 +24,7 @@ #include #define FCOE_MAX_QUEUE_DEPTH 256 -#define FCOE_LOW_QUEUE_DEPTH 32 +#define FCOE_MIN_QUEUE_DEPTH 32 #define FCOE_WORD_TO_BYTE 4 @@ -70,21 +70,6 @@ do { \ printk(KERN_INFO "fcoe: %s: " fmt, \ netdev->name, ##args);) -/** - * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads - * @thread: The thread context - * @fcoe_rx_list: The queue of pending packets to process - * @page: The memory page for calculating frame trailer CRCs - * @crc_eof_offset: The offset into the CRC page pointing to available - * memory for a new trailer - */ -struct fcoe_percpu_s { - struct task_struct *thread; - struct sk_buff_head fcoe_rx_list; - struct page *crc_eof_page; - int crc_eof_offset; -}; - /** * struct fcoe_interface - A FCoE interface * @list: Handle for a list of FCoE interfaces @@ -108,30 +93,6 @@ struct fcoe_interface { struct kref kref; }; -/** - * struct fcoe_port - The FCoE private structure - * @fcoe: The associated fcoe interface - * @lport: The associated local port - * @fcoe_pending_queue: The pending Rx queue of skbs - * @fcoe_pending_queue_active: Indicates if the pending queue is active - * @timer: The queue timer - * @destroy_work: Handle for work context - * (to prevent RTNL deadlocks) - * @data_srt_addr: Source address for data - * - * An instance of this structure is to be allocated along with the - * Scsi_Host and libfc fc_lport structures. - */ -struct fcoe_port { - struct fcoe_interface *fcoe; - struct fc_lport *lport; - struct sk_buff_head fcoe_pending_queue; - u8 fcoe_pending_queue_active; - struct timer_list timer; - struct work_struct destroy_work; - u8 data_src_addr[ETH_ALEN]; -}; - #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) /** @@ -140,7 +101,8 @@ struct fcoe_port { */ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) { - return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev; + return ((struct fcoe_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; } #endif /* _FCOE_H_ */ diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index e5aef5639124..745eb9a22d64 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "libfcoe.h" @@ -74,6 +75,205 @@ module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(disable, "string"); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); +/** + * fcoe_fc_crc() - Calculates the CRC for a given frame + * @fp: The frame to be checksumed + * + * This uses crc32() routine to calculate the CRC for a frame + * + * Return: The 32 bit CRC value + */ +u32 fcoe_fc_crc(struct fc_frame *fp) +{ + struct sk_buff *skb = fp_skb(fp); + struct skb_frag_struct *frag; + unsigned char *data; + unsigned long off, len, clen; + u32 crc; + unsigned i; + + crc = crc32(~0, skb->data, skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + off = frag->page_offset; + len = frag->size; + while (len > 0) { + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), + KM_SKB_DATA_SOFTIRQ); + crc = crc32(crc, data + (off & ~PAGE_MASK), clen); + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); + off += clen; + len -= clen; + } + } + return crc; +} +EXPORT_SYMBOL_GPL(fcoe_fc_crc); + +/** + * fcoe_start_io() - Start FCoE I/O + * @skb: The packet to be transmitted + * + * This routine is called from the net device to start transmitting + * FCoE packets. + * + * Returns: 0 for success + */ +int fcoe_start_io(struct sk_buff *skb) +{ + struct sk_buff *nskb; + int rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + rc = dev_queue_xmit(nskb); + if (rc != 0) + return rc; + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_start_io); + + +/** + * fcoe_clean_pending_queue() - Dequeue a skb and free it + * @lport: The local port to dequeue a skb on + */ +void fcoe_clean_pending_queue(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct sk_buff *skb; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&port->fcoe_pending_queue.lock); + kfree_skb(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + } + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); + +/** + * fcoe_check_wait_queue() - Attempt to clear the transmit backlog + * @lport: The local port whose backlog is to be cleared + * + * This empties the wait_queue, dequeues the head of the wait_queue queue + * and calls fcoe_start_io() for each packet. If all skb have been + * transmitted it returns the qlen. If an error occurs it restores + * wait_queue (to try again later) and returns -1. + * + * The wait_queue is used when the skb transmit fails. The failed skb + * will go in the wait_queue which will be emptied by the timer function or + * by the next skb transmit. + */ +void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) +{ + struct fcoe_port *port = lport_priv(lport); + int rc; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (skb) + __skb_queue_tail(&port->fcoe_pending_queue, skb); + + if (port->fcoe_pending_queue_active) + goto out; + port->fcoe_pending_queue_active = 1; + + while (port->fcoe_pending_queue.qlen) { + /* keep qlen > 0 until fcoe_start_io succeeds */ + port->fcoe_pending_queue.qlen++; + skb = __skb_dequeue(&port->fcoe_pending_queue); + + spin_unlock_bh(&port->fcoe_pending_queue.lock); + rc = fcoe_start_io(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (rc) { + __skb_queue_head(&port->fcoe_pending_queue, skb); + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + break; + } + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + } + + if (port->fcoe_pending_queue.qlen < port->min_queue_depth) + lport->qfull = 0; + if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) + mod_timer(&port->timer, jiffies + 2); + port->fcoe_pending_queue_active = 0; +out: + if (port->fcoe_pending_queue.qlen > port->max_queue_depth) + lport->qfull = 1; + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); + +/** + * fcoe_queue_timer() - The fcoe queue timer + * @lport: The local port + * + * Calls fcoe_check_wait_queue on timeout + */ +void fcoe_queue_timer(ulong lport) +{ + fcoe_check_wait_queue((struct fc_lport *)lport, NULL); +} +EXPORT_SYMBOL_GPL(fcoe_queue_timer); + +/** + * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * @skb: The packet to be transmitted + * @tlen: The total length of the trailer + * @fps: The fcoe context + * + * This routine allocates a page for frame trailers. The page is re-used if + * there is enough room left on it for the current trailer. If there isn't + * enough buffer left a new page is allocated for the trailer. Reference to + * the page from this function as well as the skbs using the page fragments + * ensure that the page is freed at the appropriate time. + * + * Returns: 0 for success + */ +int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, + struct fcoe_percpu_s *fps) +{ + struct page *page; + + page = fps->crc_eof_page; + if (!page) { + page = alloc_page(GFP_ATOMIC); + if (!page) + return -ENOMEM; + + fps->crc_eof_page = page; + fps->crc_eof_offset = 0; + } + + get_page(page); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, + fps->crc_eof_offset, tlen); + skb->len += tlen; + skb->data_len += tlen; + skb->truesize += tlen; + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); + + if (fps->crc_eof_offset >= PAGE_SIZE) { + fps->crc_eof_page = NULL; + fps->crc_eof_offset = 0; + put_page(page); + } + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); + /** * fcoe_transport_lookup - find an fcoe transport that matches a netdev * @netdev: The netdev to look for from all attached transports -- cgit v1.2.3 From d75733d51fdab5c99a0d9491b25f22e13b39cdc1 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 10 Feb 2011 11:50:39 +0530 Subject: [SCSI] mptfusion: Support SAS2.0 Devices with SAS1.0 Controllers SAS1.0 Controller was not able to detect SAS2.0 Expanders due to Link RATE detection was limited to 1.5 Gbps and 3.0 Gbps for SAS1 controllers. Added detection for 6.0 Gbps link. Now, user can mix-up 6.0 Gpbs links with SAS1.0 controller. e.g SAS1.0 HBA <----> SAS2.0 Expander <------> SAS2.0 Expander <--------> SAS1.0 Drive. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/lsi/mpi_cnfg.h | 1 + drivers/message/fusion/lsi/mpi_ioc.h | 1 + drivers/message/fusion/mptbase.c | 7 ++++++- drivers/message/fusion/mptsas.c | 6 +++++- 4 files changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h index 013c7d881948..22027e7946f7 100644 --- a/drivers/message/fusion/lsi/mpi_cnfg.h +++ b/drivers/message/fusion/lsi/mpi_cnfg.h @@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 #define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03) #define MPI_SAS_IOUNIT0_RATE_1_5 (0x08) #define MPI_SAS_IOUNIT0_RATE_3_0 (0x09) +#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A) /* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h index 8faa4fab7b89..fd6222882a0e 100644 --- a/drivers/message/fusion/lsi/mpi_ioc.h +++ b/drivers/message/fusion/lsi/mpi_ioc.h @@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS #define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03) #define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08) #define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09) +#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A) /* SAS Discovery Event data */ diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 3358c0af3466..ec8080c98081 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) case MPI_EVENT_SAS_PLS_LR_RATE_3_0: snprintf(evStr, EVENT_DESCR_STR_SZ, "SAS PHY Link Status: Phy=%d:" - " Rate 3.0 Gpbs",PhyNumber); + " Rate 3.0 Gbps", PhyNumber); + break; + case MPI_EVENT_SAS_PLS_LR_RATE_6_0: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" + " Rate 6.0 Gbps", PhyNumber); break; default: snprintf(evStr, EVENT_DESCR_STR_SZ, diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 8aefb1829fcd..518cfcaaaa4e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -3063,6 +3063,9 @@ static int mptsas_probe_one_phy(struct device *dev, case MPI_SAS_IOUNIT0_RATE_3_0: phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; break; + case MPI_SAS_IOUNIT0_RATE_6_0: + phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE: case MPI_SAS_IOUNIT0_RATE_UNKNOWN: default: @@ -3691,7 +3694,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event) } if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 || - link_rate == MPI_SAS_IOUNIT0_RATE_3_0) { + link_rate == MPI_SAS_IOUNIT0_RATE_3_0 || + link_rate == MPI_SAS_IOUNIT0_RATE_6_0) { if (!port_info) { if (ioc->old_sas_discovery_protocal) { -- cgit v1.2.3 From 3487735aa54407565278a5a1288119c45210b86c Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 10 Feb 2011 11:51:24 +0530 Subject: [SCSI] mptfusion: Remove bus reset for mptsas module Bus reset is not required for SAS Controller. It is valid for mptspi and mptfc, but for mptsas it is not required. It is an extra work for Error handling escallation for mptsas. Removing bus reset from error handling will eventually speedup Error handling for SAS controller. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 518cfcaaaa4e..f5a14afad2cd 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = { .change_queue_depth = mptscsih_change_queue_depth, .eh_abort_handler = mptscsih_abort, .eh_device_reset_handler = mptscsih_dev_reset, - .eh_bus_reset_handler = mptscsih_bus_reset, .eh_host_reset_handler = mptscsih_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_SAS_CAN_QUEUE, -- cgit v1.2.3 From 04b6e153b64471ff43dde82b0122e67cf491f2f5 Mon Sep 17 00:00:00 2001 From: "Moger, Babu" Date: Fri, 11 Feb 2011 15:14:08 -0700 Subject: [SCSI] scsi_dh_rdac: fix for lun_table update for rdac device handler During one of our testing, we noticed that mode select command sent from the host did not have the lun_table updated. Problem is root caused to the way lun table is updated. Lun table update was done after the call to blk_rq_map_kern is made. This was causing problem because kernel uses bounce buffer(bio_copy_kern) if the address is not aligned. The command buffer updated after the call(blk_rq_map_kern) was not going on the wire. Moved the code to update the lun_table before the call to fix the problem. Signed-off-by: Babu Moger Signed-off-by: Somasundaram Krishnasamy Signed-off-by: Yanling Qi Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 5be3ae15cb71..7d83a84442a7 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev, } static struct request *rdac_failover_get(struct scsi_device *sdev, - struct rdac_dh_data *h) + struct rdac_dh_data *h, struct list_head *list) { struct request *rq; struct rdac_mode_common *common; unsigned data_size; + struct rdac_queue_data *qdata; + u8 *lun_table; if (h->ctlr->use_ms10) { struct rdac_pg_expanded *rdac_pg; @@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, rdac_pg->subpage_code = 0x1; rdac_pg->page_len[0] = 0x01; rdac_pg->page_len[1] = 0x28; + lun_table = rdac_pg->lun_table; } else { struct rdac_pg_legacy *rdac_pg; @@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; rdac_pg->page_len = 0x68; + lun_table = rdac_pg->lun_table; } common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; common->quiescence_timeout = RDAC_QUIESCENCE_TIME; common->rdac_options = RDAC_FORCED_QUIESENCE; + list_for_each_entry(qdata, list, entry) { + lun_table[qdata->h->lun] = 0x81; + } + /* get request for block layer packet command */ rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); if (!rq) @@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work) int err, retry_cnt = RDAC_RETRY_COUNT; struct rdac_queue_data *tmp, *qdata; LIST_HEAD(list); - u8 *lun_table; spin_lock(&ctlr->ms_lock); list_splice_init(&ctlr->ms_head, &list); @@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work) ctlr->ms_sdev = NULL; spin_unlock(&ctlr->ms_lock); - if (ctlr->use_ms10) - lun_table = ctlr->mode_select.expanded.lun_table; - else - lun_table = ctlr->mode_select.legacy.lun_table; - retry: err = SCSI_DH_RES_TEMP_UNAVAIL; - rq = rdac_failover_get(sdev, h); + rq = rdac_failover_get(sdev, h, &list); if (!rq) goto done; - list_for_each_entry(qdata, &list, entry) { - lun_table[qdata->h->lun] = 0x81; - } - RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, -- cgit v1.2.3 From 9dfeb3157e34315b8ca27d816a79358698a999ac Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Fri, 11 Feb 2011 15:17:33 -0700 Subject: [SCSI] scsi_dh: cosmetic change to sizeof() instead of doing sizeof(struct X) it's better to do sizeof(*v) where v is the variable pointing to struct X. Signed-off-by: Hillf Danton Signed-off-by: Babu Moger Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 2 +- drivers/scsi/device_handler/scsi_dh_emc.c | 2 +- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 4 ++-- drivers/scsi/device_handler/scsi_dh_rdac.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 5b6f9ab10d7a..7cae0bc85390 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -763,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev) unsigned long flags; int err = SCSI_DH_OK; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 6faf472f7537..48441f6908a4 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev) unsigned long flags; int err; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index d0363c8fa348..b479f1eef968 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -339,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev) unsigned long flags; int ret; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) - + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", HP_SW_NAME); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 7d83a84442a7..bdff28014a2a 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -798,7 +798,7 @@ static int rdac_bus_attach(struct scsi_device *sdev) int err; char array_name[ARRAY_LABEL_LEN]; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", -- cgit v1.2.3 From a0b990c62c8915bb0c4ba55cf5d3da3c8fa74413 Mon Sep 17 00:00:00 2001 From: "Moger, Babu" Date: Fri, 11 Feb 2011 16:12:44 -0700 Subject: [SCSI] scsi_dh_rdac : Adding MODULE VERSION for rdac device handler Adding MODULE_VERSION for scsi_dh_rdac. This will be helpful sometimes to get the code level without looking at the code. Signed-off-by: Babu Moger Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index bdff28014a2a..f468b05cfaac 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -904,4 +904,5 @@ module_exit(rdac_exit); MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); +MODULE_VERSION("01.00.0000.0000"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 1f723867e650e5e021445eea34fddeffca9c4faa Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:28:33 -0600 Subject: [SCSI] libata: plumb sas port scan into standard libata paths The function ata_sas_port_init() has always really done its own thing. However, as a precursor to moving to the libata new eh, it has to be properly using the standard libata scan paths. This means separating the current libata scan paths into pieces which can be shared with libsas and pieces which cant (really just the async call and the host scan). Cc: Tejun Heo Cc: Jeff Garzik Signed-off-by: James Bottomley --- drivers/ata/libata-core.c | 46 ++++++++++++++++++++++------------------------ drivers/ata/libata-scsi.c | 2 +- drivers/ata/libata.h | 1 + 3 files changed, 24 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a31fe96f7de6..f5917756c49d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5886,21 +5886,9 @@ void ata_host_init(struct ata_host *host, struct device *dev, host->ops = ops; } - -static void async_port_probe(void *data, async_cookie_t cookie) +int ata_port_probe(struct ata_port *ap) { - int rc; - struct ata_port *ap = data; - - /* - * If we're not allowed to scan this host in parallel, - * we need to wait until all previous scans have completed - * before going further. - * Jeff Garzik says this is only within a controller, so we - * don't need to wait for port 0, only for later ports. - */ - if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) - async_synchronize_cookie(cookie); + int rc = 0; /* probe */ if (ap->ops->error_handler) { @@ -5926,23 +5914,33 @@ static void async_port_probe(void *data, async_cookie_t cookie) DPRINTK("ata%u: bus probe begin\n", ap->print_id); rc = ata_bus_probe(ap); DPRINTK("ata%u: bus probe end\n", ap->print_id); - - if (rc) { - /* FIXME: do something useful here? - * Current libata behavior will - * tear down everything when - * the module is removed - * or the h/w is unplugged. - */ - } } + return rc; +} + + +static void async_port_probe(void *data, async_cookie_t cookie) +{ + struct ata_port *ap = data; + + /* + * If we're not allowed to scan this host in parallel, + * we need to wait until all previous scans have completed + * before going further. + * Jeff Garzik says this is only within a controller, so we + * don't need to wait for port 0, only for later ports. + */ + if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) + async_synchronize_cookie(cookie); + + (void)ata_port_probe(ap); /* in order to keep device order, we need to synchronize at this point */ async_synchronize_cookie(cookie); ata_scsi_scan_host(ap, 1); - } + /** * ata_host_register - register initialized ATA host * @host: ATA host to register diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5defc74973d7..897066761359 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3809,7 +3809,7 @@ int ata_sas_port_init(struct ata_port *ap) if (!rc) { ap->print_id = ata_print_id++; - rc = ata_bus_probe(ap); + rc = ata_port_probe(ap); } return rc; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index a9be110dbf51..773de97988a2 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -103,6 +103,7 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); extern struct ata_port *ata_port_alloc(struct ata_host *host); extern const char *sata_spd_string(unsigned int spd); +extern int ata_port_probe(struct ata_port *ap); /* libata-acpi.c */ #ifdef CONFIG_ATA_ACPI -- cgit v1.2.3 From b8784f77455124ceb4746e798f7aaf5b3b8a5406 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:30:00 -0600 Subject: [SCSI] libata: fix locking for sas paths For historical reasons, libsas uses the scsi host lock as the ata port lock, and libata always uses the ata host. For the old eh, this was largely irrelevant since the two locks were never mixed inside the code. However, the new eh has a case where it nests acquisition of the host lock inside the port lock (this does look rather deadlock prone). Obviously this would be an instant deadlock if the port lock were the host lock, so switch the libsas paths to use the ata host lock as well. Cc: Tejun Heo Cc: Jeff Garzik Signed-off-by: James Bottomley --- drivers/ata/libata-scsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 897066761359..b935f3a2b010 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3747,7 +3747,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host, return NULL; ap->port_no = 0; - ap->lock = shost->host_lock; + ap->lock = &host->lock; ap->pio_mask = port_info->pio_mask; ap->mwdma_mask = port_info->mwdma_mask; ap->udma_mask = port_info->udma_mask; -- cgit v1.2.3 From 4451ef63e1a881eb275eb16dd1980491eea709de Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 08:31:14 -0600 Subject: [SCSI] libata: fix eh locking The SCSI host eh_cmd_q should be protected by the host lock (not the port lock). This probably doesn't matter that much at the moment, since we try to serialise the add and eh pieces, but it might matter in future for more convenient error handling. Plus this switches libata to the standard eh pattern where you lock, remove from the cmd queue to a local list and unlock and then operate on the local list. Cc: Tejun Heo Cc: Jeff Garzik Signed-off-by: James Bottomley --- drivers/ata/libata-eh.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 17a637877d03..fc3f339a8d3c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -589,9 +589,14 @@ void ata_scsi_error(struct Scsi_Host *host) struct ata_port *ap = ata_shost_to_port(host); int i; unsigned long flags; + LIST_HEAD(eh_work_q); DPRINTK("ENTER\n"); + spin_lock_irqsave(host->host_lock, flags); + list_splice_init(&host->eh_cmd_q, &eh_work_q); + spin_unlock_irqrestore(host->host_lock, flags); + /* make sure sff pio task is not running */ ata_sff_flush_pio_task(ap); @@ -627,7 +632,7 @@ void ata_scsi_error(struct Scsi_Host *host) if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); - list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { + list_for_each_entry_safe(scmd, tmp, &eh_work_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { @@ -762,7 +767,7 @@ void ata_scsi_error(struct Scsi_Host *host) } /* finish or retry handled scmd's and clean up */ - WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); + WARN_ON(host->host_failed || !list_empty(&eh_work_q)); scsi_eh_flush_done_q(&ap->eh_done_q); -- cgit v1.2.3 From 64878c0eff5737e15b3ff06d02e7227eda4aa04c Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 09:42:50 -0600 Subject: [SCSI] libata: separate error handler into usable components Right at the moment, the libata error handler is incredibly monolithic. This makes it impossible to use from composite drivers like libsas and ipr which have to handle error themselves in the first instance. The essence of the change is to split the monolithic error handler into two components: one which handles a queue of ata commands for processing and the other which handles the back end of readying a port. This allows the upper error handler fine grained control in calling libsas functions (and making sure they only get called for ATA commands whose lower errors have been fixed up). Cc: Tejun Heo Cc: Jeff Garzik Signed-off-by: James Bottomley --- drivers/ata/libata-eh.c | 53 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index fc3f339a8d3c..e02455b751ca 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -587,7 +587,6 @@ static void ata_eh_unload(struct ata_port *ap) void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); - int i; unsigned long flags; LIST_HEAD(eh_work_q); @@ -597,6 +596,34 @@ void ata_scsi_error(struct Scsi_Host *host) list_splice_init(&host->eh_cmd_q, &eh_work_q); spin_unlock_irqrestore(host->host_lock, flags); + ata_scsi_cmd_error_handler(host, ap, &eh_work_q); + + /* If we timed raced normal completion and there is nothing to + recover nr_timedout == 0 why exactly are we doing error recovery ? */ + ata_scsi_port_error_handler(host, ap); + + /* finish or retry handled scmd's and clean up */ + WARN_ON(host->host_failed || !list_empty(&eh_work_q)); + + DPRINTK("EXIT\n"); +} + +/** + * ata_scsi_cmd_error_handler - error callback for a list of commands + * @host: scsi host containing the port + * @ap: ATA port within the host + * @eh_work_q: list of commands to process + * + * process the given list of commands and return those finished to the + * ap->eh_done_q. This function is the first part of the libata error + * handler which processes a given list of failed commands. + */ +void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, + struct list_head *eh_work_q) +{ + int i; + unsigned long flags; + /* make sure sff pio task is not running */ ata_sff_flush_pio_task(ap); @@ -632,7 +659,7 @@ void ata_scsi_error(struct Scsi_Host *host) if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); - list_for_each_entry_safe(scmd, tmp, &eh_work_q, eh_entry) { + list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { @@ -676,8 +703,20 @@ void ata_scsi_error(struct Scsi_Host *host) } else spin_unlock_wait(ap->lock); - /* If we timed raced normal completion and there is nothing to - recover nr_timedout == 0 why exactly are we doing error recovery ? */ +} +EXPORT_SYMBOL(ata_scsi_cmd_error_handler); + +/** + * ata_scsi_port_error_handler - recover the port after the commands + * @host: SCSI host containing the port + * @ap: the ATA port + * + * Handle the recovery of the port @ap after all the commands + * have been recovered. + */ +void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) +{ + unsigned long flags; /* invoke error handler */ if (ap->ops->error_handler) { @@ -766,9 +805,6 @@ void ata_scsi_error(struct Scsi_Host *host) ap->ops->eng_timeout(ap); } - /* finish or retry handled scmd's and clean up */ - WARN_ON(host->host_failed || !list_empty(&eh_work_q)); - scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ @@ -789,9 +825,8 @@ void ata_scsi_error(struct Scsi_Host *host) wake_up_all(&ap->eh_wait_q); spin_unlock_irqrestore(ap->lock, flags); - - DPRINTK("EXIT\n"); } +EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); /** * ata_port_wait_eh - Wait for the currently pending EH to complete -- cgit v1.2.3 From c299190b9398d4edfbf80a749875d5bac199bfdc Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 23 Jan 2011 09:44:12 -0600 Subject: [SCSI] libsas: convert to libata new error handler The conversion is quite complex given that the libata new error handler has to be hooked into the current libsas timeout and error handling. The way this is done is to process all the failed commands via libsas first, but if they have no underlying sas task (and they're on a sata device) assume they are destined for the libata error handler and send them accordingly. Finally, activate the port recovery of the libata error handler for each port known to the host. This is somewhat suboptimal, since that port may not need recovering, but given the current architecture of the libata error handler, it's the only way; and the spurious activation is harmless. Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 87 ++++++++++++++++++++++++++++++++++--- drivers/scsi/libsas/sas_scsi_host.c | 14 +++++- 2 files changed, 93 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index c2a5cc75b407..16c5094bc86c 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -240,37 +240,43 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) return true; } -static void sas_ata_phy_reset(struct ata_port *ap) +static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, + unsigned long deadline) { + struct ata_port *ap = link->ap; struct domain_device *dev = ap->private_data; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); int res = TMF_RESP_FUNC_FAILED; + int ret = 0; if (i->dft->lldd_I_T_nexus_reset) res = i->dft->lldd_I_T_nexus_reset(dev); - if (res != TMF_RESP_FUNC_COMPLETE) + if (res != TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__); + ret = -EAGAIN; + } switch (dev->sata_dev.command_set) { case ATA_COMMAND_SET: SAS_DPRINTK("%s: Found ATA device.\n", __func__); - ap->link.device[0].class = ATA_DEV_ATA; + *class = ATA_DEV_ATA; break; case ATAPI_COMMAND_SET: SAS_DPRINTK("%s: Found ATAPI device.\n", __func__); - ap->link.device[0].class = ATA_DEV_ATAPI; + *class = ATA_DEV_ATAPI; break; default: SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", __func__, dev->sata_dev.command_set); - ap->link.device[0].class = ATA_DEV_UNKNOWN; + *class = ATA_DEV_UNKNOWN; break; } ap->cbl = ATA_CBL_SATA; + return ret; } static void sas_ata_post_internal(struct ata_queued_cmd *qc) @@ -302,7 +308,11 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc) } static struct ata_port_operations sas_sata_ops = { - .phy_reset = sas_ata_phy_reset, + .prereset = ata_std_prereset, + .softreset = NULL, + .hardreset = sas_ata_hard_reset, + .postreset = ata_std_postreset, + .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, .qc_prep = ata_noop_qc_prep, @@ -732,3 +742,68 @@ int sas_discover_sata(struct domain_device *dev) return res; } + +void sas_ata_strategy_handler(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + struct domain_device *ddev = sdev_to_domain_dev(sdev); + struct ata_port *ap = ddev->sata_dev.ap; + + if (!dev_is_sata(ddev)) + continue; + + ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler"); + ata_scsi_port_error_handler(shost, ap); + } +} + +int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task, + enum blk_eh_timer_return *rtn) +{ + struct domain_device *ddev = cmd_to_domain_dev(cmd); + + if (!dev_is_sata(ddev) || task) + return 0; + + /* we're a sata device with no task, so this must be a libata + * eh timeout. Ideally should hook into libata timeout + * handling, but there's no point, it just wants to activate + * the eh thread */ + *rtn = BLK_EH_NOT_HANDLED; + return 1; +} + +int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, + struct list_head *done_q) +{ + int rtn = 0; + struct scsi_cmnd *cmd, *n; + struct ata_port *ap; + + do { + LIST_HEAD(sata_q); + + ap = NULL; + + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { + struct domain_device *ddev = cmd_to_domain_dev(cmd); + + if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd)) + continue; + if (ap && ap != ddev->sata_dev.ap) + continue; + ap = ddev->sata_dev.ap; + rtn = 1; + list_move(&cmd->eh_entry, &sata_q); + } + + if (!list_empty(&sata_q)) { + ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n"); + ata_scsi_cmd_error_handler(shost, ap, &sata_q); + } + } while (ap); + + return rtn; +} diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 5815cbeb27a6..2119871aed3e 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -662,11 +662,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any * command we see here has no sas_task and is thus unknown to the HA. */ - if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) - scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); + if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q)) + if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) + scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); out: + /* now link into libata eh --- if we have any ata devices */ + sas_ata_strategy_handler(shost); + scsi_eh_flush_done_q(&ha->eh_done_q); + SAS_DPRINTK("--- Exit %s\n", __func__); return; } @@ -675,6 +680,11 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); unsigned long flags; + enum blk_eh_timer_return rtn; + + if (sas_ata_timed_out(cmd, task, &rtn)) + return rtn; + if (!task) { cmd->request->timeout /= 2; -- cgit v1.2.3 From 5767a1c498931417e69e663ddd5e110cbaabec32 Mon Sep 17 00:00:00 2001 From: Kleber Sacilotto de Souza Date: Mon, 14 Feb 2011 20:19:31 -0200 Subject: [SCSI] ipr: Fix a race on multiple configuration changes In a multiple configuration change scenario a remove notification can be followed by an immediate add notification for the same device, which will cause the device to be removed but never added back. This patch fixes the problem by ensuring that in such situations the device will be added back. Signed-off-by: Kleber Sacilotto de Souza Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index a8e24f06b8d8..14857679f4e9 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, ipr_clear_res_target(res); list_move_tail(&res->queue, &ioa_cfg->free_res_q); } - } else if (!res->sdev) { + } else if (!res->sdev || res->del_from_ml) { res->add_to_ml = 1; if (ioa_cfg->allow_ml_add_del) schedule_work(&ioa_cfg->work_q); @@ -3104,7 +3104,10 @@ restart: did_work = 1; sdev = res->sdev; if (!scsi_device_get(sdev)) { - list_move_tail(&res->queue, &ioa_cfg->free_res_q); + if (!res->add_to_ml) + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + else + res->del_from_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_remove_device(sdev); scsi_device_put(sdev); -- cgit v1.2.3 From 9e0fc764eaec082cd2ffcf82568dfdd086935934 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 15 Feb 2011 15:32:48 -0600 Subject: [SCSI] hpsa: do not re-order commands in internal queues Driver's internal queues should be FIFO, not LIFO. This is a port of an almost identical patch from cciss by Jens Axboe. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 23 +++++++++++------------ drivers/scsi/hpsa.h | 4 ++-- drivers/scsi/hpsa_cmd.h | 2 +- 3 files changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 959eeb202d92..0f40de2a33de 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -314,9 +314,9 @@ static ssize_t host_show_commands_outstanding(struct device *dev, } /* Enqueuing and dequeuing functions for cmdlists. */ -static inline void addQ(struct hlist_head *list, struct CommandList *c) +static inline void addQ(struct list_head *list, struct CommandList *c) { - hlist_add_head(&c->list, list); + list_add_tail(&c->list, list); } static inline u32 next_command(struct ctlr_info *h) @@ -366,9 +366,9 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h, static inline void removeQ(struct CommandList *c) { - if (WARN_ON(hlist_unhashed(&c->list))) + if (WARN_ON(list_empty(&c->list))) return; - hlist_del_init(&c->list); + list_del_init(&c->list); } static inline int is_hba_lunid(unsigned char scsi3addr[]) @@ -2228,7 +2228,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) c->cmdindex = i; - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -2266,7 +2266,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) } memset(c->err_info, 0, sizeof(*c->err_info)); - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -2837,8 +2837,8 @@ static void start_io(struct ctlr_info *h) { struct CommandList *c; - while (!hlist_empty(&h->reqQ)) { - c = hlist_entry(h->reqQ.first, struct CommandList, list); + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, struct CommandList, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); @@ -2929,10 +2929,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h, { u32 tag; struct CommandList *c = NULL; - struct hlist_node *tmp; tag = hpsa_tag_discard_error_bits(raw_tag); - hlist_for_each_entry(c, tmp, &h->cmpQ, list) { + list_for_each_entry(c, &h->cmpQ, list) { if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { finish_cmd(c, raw_tag); return next_command(h); @@ -3761,8 +3760,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->pdev = pdev; h->busy_initializing = 1; - INIT_HLIST_HEAD(&h->cmpQ); - INIT_HLIST_HEAD(&h->reqQ); + INIT_LIST_HEAD(&h->cmpQ); + INIT_LIST_HEAD(&h->reqQ); spin_lock_init(&h->lock); spin_lock_init(&h->scan_lock); rc = hpsa_pci_init(h); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 074d237f4497..e89819342364 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -75,8 +75,8 @@ struct ctlr_info { struct access_method access; /* queue and queue Info */ - struct hlist_head reqQ; - struct hlist_head cmpQ; + struct list_head reqQ; + struct list_head cmpQ; unsigned int Qdepth; unsigned int maxQsinceinit; unsigned int maxSG; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 7910c1411156..785abdd0333c 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -292,7 +292,7 @@ struct CommandList { struct ctlr_info *h; int cmd_type; long cmdindex; - struct hlist_node list; + struct list_head list; struct request *rq; struct completion *waiting; void *scsi_cmd; -- cgit v1.2.3 From a9a3a2739a44fc05dcaba0d4d36e52dc444c294f Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 15 Feb 2011 15:32:53 -0600 Subject: [SCSI] hpsa: make hpsa.hpsa_simple_mode=1 module parameter actually work It's not enough to simple avoid putting the board into performant mode, as we have to set up the interrupts differently, etc. When I originally tested this module parameter, I tested it incorrectly without realizing it, and the driver was running in performant mode the whole time unbeknownst to me. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 37 +++++++++++++++++++++++-------------- drivers/scsi/hpsa.h | 1 + 2 files changed, 24 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 0f40de2a33de..66ccacfffd51 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1186,7 +1186,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h) sh->sg_tablesize = h->maxsgentries; h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; - sh->irq = h->intr[PERF_MODE_INT]; + sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; error = scsi_add_host(sh, &h->pdev->dev); if (error) @@ -2902,10 +2902,14 @@ static inline u32 hpsa_tag_to_index(u32 tag) return tag >> DIRECT_LOOKUP_SHIFT; } -static inline u32 hpsa_tag_discard_error_bits(u32 tag) + +static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) { -#define HPSA_ERROR_BITS 0x03 - return tag & ~HPSA_ERROR_BITS; +#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) +#define HPSA_SIMPLE_ERROR_BITS 0x03 + if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) + return tag & ~HPSA_SIMPLE_ERROR_BITS; + return tag & ~HPSA_PERF_ERROR_BITS; } /* process completion of an indexed ("direct lookup") command */ @@ -2930,7 +2934,7 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h, u32 tag; struct CommandList *c = NULL; - tag = hpsa_tag_discard_error_bits(raw_tag); + tag = hpsa_tag_discard_error_bits(h, raw_tag); list_for_each_entry(c, &h->cmpQ, list) { if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { finish_cmd(c, raw_tag); @@ -2981,7 +2985,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) return IRQ_HANDLED; } -/* Send a message CDB to the firmware. */ +/* Send a message CDB to the firmware. Careful, this only works + * in simple mode, not performant mode due to the tag lookup. + * We only ever use this immediately after a controller reset. + */ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { @@ -3047,7 +3054,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); - if (hpsa_tag_discard_error_bits(tag) == paddr32) + if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) break; msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); } @@ -3379,7 +3386,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ - h->intr[PERF_MODE_INT] = h->pdev->irq; + h->intr[h->intr_mode] = h->pdev->irq; } static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) @@ -3760,6 +3767,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->pdev = pdev; h->busy_initializing = 1; + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; + printk(KERN_WARNING "hpsa_simple_mode is %d\n", hpsa_simple_mode); INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); spin_lock_init(&h->lock); @@ -3790,20 +3799,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->access.set_intr_mask(h, HPSA_INTR_OFF); if (h->msix_vector || h->msi_vector) - rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi, + rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi, IRQF_DISABLED, h->devname, h); else - rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx, + rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx, IRQF_DISABLED, h->devname, h); if (rc) { dev_err(&pdev->dev, "unable to get irq %d for %s\n", - h->intr[PERF_MODE_INT], h->devname); + h->intr[h->intr_mode], h->devname); goto clean2; } dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", h->devname, pdev->device, - h->intr[PERF_MODE_INT], dac ? "" : " not"); + h->intr[h->intr_mode], dac ? "" : " not"); h->cmd_pool_bits = kmalloc(((h->nr_cmds + BITS_PER_LONG - @@ -3854,7 +3863,7 @@ clean4: h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); - free_irq(h->intr[PERF_MODE_INT], h); + free_irq(h->intr[h->intr_mode], h); clean2: clean1: h->busy_initializing = 0; @@ -3898,7 +3907,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); - free_irq(h->intr[PERF_MODE_INT], h); + free_irq(h->intr[h->intr_mode], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index e89819342364..621a1530054a 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -72,6 +72,7 @@ struct ctlr_info { unsigned int intr[4]; unsigned int msix_vector; unsigned int msi_vector; + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ struct access_method access; /* queue and queue Info */ -- cgit v1.2.3 From 745a7a25bc0f6dc77db72656b7bc8d17b6ee8e53 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 15 Feb 2011 15:32:58 -0600 Subject: [SCSI] hpsa: Add transport_mode host attribute in /sys Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 66ccacfffd51..563d439c2f46 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -161,6 +161,8 @@ static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t host_show_commands_outstanding(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t host_show_transport_mode(struct device *dev, + struct device_attribute *attr, char *buf); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -192,6 +194,8 @@ static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); static DEVICE_ATTR(commands_outstanding, S_IRUGO, host_show_commands_outstanding, NULL); +static DEVICE_ATTR(transport_mode, S_IRUGO, + host_show_transport_mode, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, @@ -204,6 +208,7 @@ static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_rescan, &dev_attr_firmware_revision, &dev_attr_commands_outstanding, + &dev_attr_transport_mode, NULL, }; @@ -313,6 +318,18 @@ static ssize_t host_show_commands_outstanding(struct device *dev, return snprintf(buf, 20, "%d\n", h->commands_outstanding); } +static ssize_t host_show_transport_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%s\n", + h->transMethod == CFGTBL_Trans_Performant ? + "performant" : "simple"); +} + /* Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct list_head *list, struct CommandList *c) { @@ -3768,7 +3785,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->pdev = pdev; h->busy_initializing = 1; h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; - printk(KERN_WARNING "hpsa_simple_mode is %d\n", hpsa_simple_mode); INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); spin_lock_init(&h->lock); -- cgit v1.2.3 From 960a30e7a73affcc441b9ceaff3b1b9e73e99c1f Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 15 Feb 2011 15:33:03 -0600 Subject: [SCSI] hpsa: Inform controller we are using 32-bit tags. Controller will transfer only 32-bits on completion if it knows we are only using 32-bit tags. Also, some newer controllers apparently (and erroneously) require that we only use 32-bit tags, and that we inform the controller of this. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 24 +++++++++++++----------- drivers/scsi/hpsa_cmd.h | 1 + 2 files changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 563d439c2f46..a778cb1fd8bc 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -326,7 +326,7 @@ static ssize_t host_show_transport_mode(struct device *dev, h = shost_to_hba(shost); return snprintf(buf, 20, "%s\n", - h->transMethod == CFGTBL_Trans_Performant ? + h->transMethod & CFGTBL_Trans_Performant ? "performant" : "simple"); } @@ -340,7 +340,7 @@ static inline u32 next_command(struct ctlr_info *h) { u32 a; - if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { @@ -364,7 +364,7 @@ static inline u32 next_command(struct ctlr_info *h) */ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) { - if (likely(h->transMethod == CFGTBL_Trans_Performant)) + if (likely(h->transMethod & CFGTBL_Trans_Performant)) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } @@ -2924,7 +2924,7 @@ static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) { #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) #define HPSA_SIMPLE_ERROR_BITS 0x03 - if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return tag & ~HPSA_SIMPLE_ERROR_BITS; return tag & ~HPSA_PERF_ERROR_BITS; } @@ -3640,6 +3640,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) "unable to get board into simple mode\n"); return -ENODEV; } + h->transMethod = CFGTBL_Trans_Simple; return 0; } @@ -4025,7 +4026,8 @@ static void calc_bucket_map(int bucket[], int num_buckets, } } -static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) +static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, + u32 use_short_tags) { int i; unsigned long register_value; @@ -4073,7 +4075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); - writel(CFGTBL_Trans_Performant, + writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); @@ -4083,6 +4085,9 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) " performant mode\n"); return; } + /* Change the access methods to the performant access methods */ + h->access = SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; } static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) @@ -4111,11 +4116,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) || (h->blockFetchTable == NULL)) goto clean_up; - hpsa_enter_performant_mode(h); - - /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; + hpsa_enter_performant_mode(h, + trans_support & CFGTBL_Trans_use_short_tags); return; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 785abdd0333c..18464900e761 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -104,6 +104,7 @@ #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_use_short_tags 0x20000000l #define CFGTBL_BusType_Ultra2 0x00000001l #define CFGTBL_BusType_Ultra3 0x00000002l -- cgit v1.2.3 From ba95e2ac6bfeb9af92153058a353fc47e1addc02 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 15 Feb 2011 15:33:08 -0600 Subject: [SCSI] hpsa: Do not attempt kdump if we detect resetting controller failed. We can get completions left over from before the attempted reset which will interfere with the kdump. Better to just not make the attempt in that case. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a778cb1fd8bc..eb6938fe77b0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3264,13 +3264,13 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * It means we're on one of those controllers which doesn't support * the doorbell reset method and on which the PCI power management reset * method doesn't work (P800, for example.) - * In those cases, pretend the reset worked and hope for the best. + * In those cases, don't try to proceed, as it generally doesn't work. */ active_transport = readl(&cfgtable->TransportActive); if (active_transport & PERFORMANT_MODE) { dev_warn(&pdev->dev, "Unable to successfully reset controller," - " proceeding anyway.\n"); - rc = -ENOTSUPP; + " Ignoring controller.\n"); + rc = -ENODEV; } unmap_cfgtable: -- cgit v1.2.3 From 382be668c5a284844f9dcbb5b1cb8ffba2386d80 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Feb 2011 15:33:13 -0600 Subject: [SCSI] hpsa: fix bad comparison '!' has higher precedence than '&'. CFGTBL_ChangeReq is 0x1 so the original code is equivelent to if (!doorbell_value) {... Signed-off-by: Dan Carpenter Acked-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index eb6938fe77b0..c30591f84a06 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3614,7 +3614,7 @@ static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); - if (!doorbell_value & CFGTBL_ChangeReq) + if (!(doorbell_value & CFGTBL_ChangeReq)) break; /* delay and try again */ usleep_range(10000, 20000); -- cgit v1.2.3 From fedd3b7b93302c7789bd3eeb190653cfb0fe7645 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:39:24 -0500 Subject: [SCSI] lpfc 8.3.21: Critical Errors and Bug Fixes Critical Errors: - Correctly handle non-zero return lpfc_workq_post_event and return ENOMEM - Save the irq level when locking the host_lock in lpfc_findnode_did Bug Fixes: - Adjust payload_length and request_length for sli4_config mailbox commands. - Add the freed sgl/XRI to the tail of the list rather than to the head. - Set the FC_VPORT_NEEDS_INIT_VPI on vport deletes and check it before issuing a fdisc on an els retry. - Only call lpfc_hba_init_link() if phba->cfg_suppress_link_up is LPFC_INITIALIZE_LINK. - Add support for SLI-4 Performance Hints Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 2 + drivers/scsi/lpfc/lpfc_attr.c | 29 ++++++++-- drivers/scsi/lpfc/lpfc_crtn.h | 4 +- drivers/scsi/lpfc/lpfc_els.c | 3 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 5 +- drivers/scsi/lpfc/lpfc_hw4.h | 104 +++++++++++++++++++++++++++++++++-- drivers/scsi/lpfc/lpfc_init.c | 113 ++++++++++++++++++++++++++++++--------- drivers/scsi/lpfc/lpfc_mbox.c | 16 +++--- drivers/scsi/lpfc/lpfc_scsi.c | 15 +++++- drivers/scsi/lpfc/lpfc_sli.c | 21 +++++--- drivers/scsi/lpfc/lpfc_sli4.h | 4 ++ drivers/scsi/lpfc/lpfc_vport.c | 4 ++ 12 files changed, 264 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 746dd3d7a092..b388e436c687 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -548,6 +548,8 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_BG_ENABLED 0x20 #define LPFC_SLI3_DSS_ENABLED 0x40 +#define LPFC_SLI4_PERFH_ENABLED 0x80 +#define LPFC_SLI4_PHWQ_ENABLED 0x100 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3512abb8a587..745774c5a4be 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int status = 0; int cnt = 0; int i; + int rc; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) } init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, type); + rc = lpfc_workq_post_event(phba, &status, &online_compl, type); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -682,6 +689,7 @@ lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; + int rc; if (!phba->cfg_enable_hba_reset) return -EIO; @@ -692,8 +700,11 @@ lpfc_selective_reset(struct lpfc_hba *phba) return status; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -812,14 +823,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; struct completion online_compl; int status=0; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; wait_for_completion(&online_compl); } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -1813,6 +1827,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, int stat1=0, stat2=0; unsigned int i, j, cnt=count; u8 wwpn[8]; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; @@ -1863,7 +1878,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, "0463 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat1); init_completion(&online_compl); - lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); + rc = lpfc_workq_post_event(phba, &stat2, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (stat2) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 17fde522c84a..5b4513792a89 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_supported_pages(struct lpfcMboxq *); -void lpfc_sli4_params(struct lpfcMboxq *); +void lpfc_pc_sli4_params(struct lpfcMboxq *); int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); - +int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c62d567cc845..58e1a55d97f9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2745,7 +2745,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) } break; case ELS_CMD_FDISC: - lpfc_issue_els_fdisc(vport, ndlp, retry); + if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) + lpfc_issue_els_fdisc(vport, ndlp, retry); break; } return; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index bb015960dbc9..1f9c7f19c31f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4426,10 +4426,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; + unsigned long iflags; - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); ndlp = __lpfc_findnode_did(vport, did); - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); return ndlp; } diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 94c1aa1136de..c7178d60c7bf 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -778,6 +778,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 +#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 }; struct lpfc_mbx_supp_pages { @@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages { #define LPFC_SLI4_PARAMETERS 2 }; -struct lpfc_mbx_sli4_params { +struct lpfc_mbx_pc_sli4_params { uint32_t word1; #define qs_SHIFT 0 #define qs_MASK 0x00000001 @@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params { uint32_t rsvd_13_63[51]; }; +struct lpfc_sli4_parameters { + uint32_t word0; +#define cfg_prot_type_SHIFT 0 +#define cfg_prot_type_MASK 0x000000FF +#define cfg_prot_type_WORD word0 + uint32_t word1; +#define cfg_ft_SHIFT 0 +#define cfg_ft_MASK 0x00000001 +#define cfg_ft_WORD word1 +#define cfg_sli_rev_SHIFT 4 +#define cfg_sli_rev_MASK 0x0000000f +#define cfg_sli_rev_WORD word1 +#define cfg_sli_family_SHIFT 8 +#define cfg_sli_family_MASK 0x0000000f +#define cfg_sli_family_WORD word1 +#define cfg_if_type_SHIFT 12 +#define cfg_if_type_MASK 0x0000000f +#define cfg_if_type_WORD word1 +#define cfg_sli_hint_1_SHIFT 16 +#define cfg_sli_hint_1_MASK 0x000000ff +#define cfg_sli_hint_1_WORD word1 +#define cfg_sli_hint_2_SHIFT 24 +#define cfg_sli_hint_2_MASK 0x0000001f +#define cfg_sli_hint_2_WORD word1 + uint32_t word2; + uint32_t word3; + uint32_t word4; +#define cfg_cqv_SHIFT 14 +#define cfg_cqv_MASK 0x00000003 +#define cfg_cqv_WORD word4 + uint32_t word5; + uint32_t word6; +#define cfg_mqv_SHIFT 14 +#define cfg_mqv_MASK 0x00000003 +#define cfg_mqv_WORD word6 + uint32_t word7; + uint32_t word8; +#define cfg_wqv_SHIFT 14 +#define cfg_wqv_MASK 0x00000003 +#define cfg_wqv_WORD word8 + uint32_t word9; + uint32_t word10; +#define cfg_rqv_SHIFT 14 +#define cfg_rqv_MASK 0x00000003 +#define cfg_rqv_WORD word10 + uint32_t word11; +#define cfg_rq_db_window_SHIFT 28 +#define cfg_rq_db_window_MASK 0x0000000f +#define cfg_rq_db_window_WORD word11 + uint32_t word12; +#define cfg_fcoe_SHIFT 0 +#define cfg_fcoe_MASK 0x00000001 +#define cfg_fcoe_WORD word12 +#define cfg_phwq_SHIFT 15 +#define cfg_phwq_MASK 0x00000001 +#define cfg_phwq_WORD word12 +#define cfg_loopbk_scope_SHIFT 28 +#define cfg_loopbk_scope_MASK 0x0000000f +#define cfg_loopbk_scope_WORD word12 + uint32_t sge_supp_len; + uint32_t word14; +#define cfg_sgl_page_cnt_SHIFT 0 +#define cfg_sgl_page_cnt_MASK 0x0000000f +#define cfg_sgl_page_cnt_WORD word14 +#define cfg_sgl_page_size_SHIFT 8 +#define cfg_sgl_page_size_MASK 0x000000ff +#define cfg_sgl_page_size_WORD word14 +#define cfg_sgl_pp_align_SHIFT 16 +#define cfg_sgl_pp_align_MASK 0x000000ff +#define cfg_sgl_pp_align_WORD word14 + uint32_t word15; + uint32_t word16; + uint32_t word17; + uint32_t word18; + uint32_t word19; +}; + +struct lpfc_mbx_get_sli4_parameters { + struct mbox_header header; + struct lpfc_sli4_parameters sli4_parameters; +}; + /* Mailbox Completion Queue Error Messages */ #define MB_CQE_STATUS_SUCCESS 0x0 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 @@ -2103,7 +2192,8 @@ struct lpfc_mqe { struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; struct lpfc_mbx_query_fw_cfg query_fw_cfg; struct lpfc_mbx_supp_pages supp_pages; - struct lpfc_mbx_sli4_params sli4_params; + struct lpfc_mbx_pc_sli4_params sli4_params; + struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; struct lpfc_mbx_nop nop; } un; }; @@ -2381,6 +2471,10 @@ struct wqe_common { #define wqe_wqes_SHIFT 15 #define wqe_wqes_MASK 0x00000001 #define wqe_wqes_WORD word10 +/* Note that this field overlaps above fields */ +#define wqe_wqid_SHIFT 1 +#define wqe_wqid_MASK 0x0000007f +#define wqe_wqid_WORD word10 #define wqe_pri_SHIFT 16 #define wqe_pri_MASK 0x00000007 #define wqe_pri_WORD word10 @@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe { uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_iread64_wqe { @@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe { uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_icmnd64_wqe { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6d0b36aa3389..0b5f76caa6e4 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4283,36 +4283,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } - /* Get the Supported Pages. It is always available. */ + /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (unlikely(rc)) { - rc = -EIO; - mempool_free(mboxq, phba->mbox_mem_pool); - goto out_free_bsmbx; - } - - mqe = &mboxq->u.mqe; - memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), - LPFC_MAX_SUPPORTED_PAGES); - for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { - switch (pn_page[i]) { - case LPFC_SLI4_PARAMETERS: - phba->sli4_hba.pc_sli4_params.supported = 1; - break; - default: - break; + if (!rc) { + mqe = &mboxq->u.mqe; + memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), + LPFC_MAX_SUPPORTED_PAGES); + for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { + switch (pn_page[i]) { + case LPFC_SLI4_PARAMETERS: + phba->sli4_hba.pc_sli4_params.supported = 1; + break; + default: + break; + } + } + /* Read the port's SLI4 Parameters capabilities if supported. */ + if (phba->sli4_hba.pc_sli4_params.supported) + rc = lpfc_pc_sli4_params_get(phba, mboxq); + if (rc) { + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; } } - - /* Read the port's SLI4 Parameters capabilities if supported. */ - if (phba->sli4_hba.pc_sli4_params.supported) - rc = lpfc_pc_sli4_params_get(phba, mboxq); + /* + * Get sli4 parameters that override parameters from Port capabilities. + * If this call fails it is not a critical error so continue loading. + */ + lpfc_get_sli4_parameters(phba, mboxq); mempool_free(mboxq, phba->mbox_mem_pool); - if (rc) { - rc = -EIO; - goto out_free_bsmbx; - } /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) @@ -7810,7 +7811,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mqe = &mboxq->u.mqe; /* Read the port's SLI4 Parameters port capabilities */ - lpfc_sli4_params(mboxq); + lpfc_pc_sli4_params(mboxq); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { @@ -7853,6 +7854,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) return rc; } +/** + * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to the mailboxq memory for the mailbox command response. + * + * This function is called in the SLI4 code path to read the port's + * sli4 capabilities. + * + * This function may be be called from any context that can block-wait + * for the completion. The expectation is that this routine is called + * typically from probe_one or from the online routine. + **/ +int +lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc; + struct lpfc_mqe *mqe = &mboxq->u.mqe; + struct lpfc_pc_sli4_params *sli4_params; + int length; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, + lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); + if (unlikely(rc)) + return rc; + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); + sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); + sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); + sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, + mbx_sli4_parameters); + sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, + mbx_sli4_parameters); + if (bf_get(cfg_phwq, mbx_sli4_parameters)) + phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; + sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; + sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); + sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); + sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); + sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); + sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, + mbx_sli4_parameters); + sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, + mbx_sli4_parameters); + return 0; +} + /** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 23403c650207..9fb4345ec021 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1692,7 +1692,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. - * @length: Length of the sli4 config mailbox command. + * @length: Length of the sli4 config mailbox command (including sub-header). * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. @@ -1723,14 +1723,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); - sli4_config->header.cfg_mhdr.payload_length = - LPFC_MBX_CMD_HDR_LENGTH + length; + sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); - sli4_config->header.cfg_shdr.request.request_length = length; + sli4_config->header.cfg_shdr.request.request_length = + length - LPFC_MBX_CMD_HDR_LENGTH; return length; } @@ -1902,6 +1902,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) @@ -2159,17 +2160,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox) } /** - * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params - * mailbox command. + * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to * retrieve the particular SLI4 features supported by the port. **/ void -lpfc_sli4_params(struct lpfcMboxq *mbox) +lpfc_pc_sli4_params(struct lpfcMboxq *mbox) { - struct lpfc_mbx_sli4_params *sli4_params; + struct lpfc_mbx_pc_sli4_params *sli4_params; memset(mbox, 0, sizeof(*mbox)); sli4_params = &mbox->u.mqe.un.sli4_params; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c97751c95d77..af5498d735e2 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1981,12 +1981,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + struct sli4_sge *first_data_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t num_bde = 0; uint32_t dma_len; uint32_t dma_offset = 0; int nseg; + struct ulp_bde64 *bde; /* * There are three possibilities here - use scatter-gather segment, use @@ -2011,7 +2013,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl += 1; - + first_data_sgl = sgl; lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" @@ -2047,6 +2049,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) dma_offset += dma_len; sgl++; } + /* setup the performance hint (first data BDE) if enabled */ + if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) { + bde = (struct ulp_bde64 *) + &(iocb_cmd->unsli3.sli3Words[5]); + bde->addrLow = first_data_sgl->addr_lo; + bde->addrHigh = first_data_sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(first_data_sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } } else { sgl += 1; /* clear the last flag in the fcp_rsp map entry */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a359d2b873ce..ed8f048dfb53 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* set consumption flag every once in a while */ if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); - + if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) + bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); /* Update the host index before invoking device */ @@ -969,7 +970,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } else { sglq->state = SGL_FREED; sglq->ndlp = NULL; - list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_sgl_list); /* Check if TXQ queue needs to be serviced */ if (pring->txq_cnt) @@ -4817,7 +4819,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0378 No support for fcpi mode.\n"); ftr_rsp++; } - + if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) + phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; /* * If the port cannot support the host's requested features * then turn off the global config parameters to disable the @@ -5004,7 +5009,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); - rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) + rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); out_unset_queue: /* Unset all the queues set up in this routine when error out */ if (rc) @@ -11189,7 +11195,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_rq_destroy) - - sizeof(struct mbox_header)); + sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); @@ -11279,7 +11285,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, sizeof(struct lpfc_mbx_post_sgl_pages) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) &mbox->u.mqe.un.post_sgl_pages; @@ -12402,7 +12408,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, sizeof(struct lpfc_mbx_post_hdr_tmpl) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, hdr_tmpl, rpi_page->page_count); bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index c7217d579e0f..aea4aa012270 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params { uint32_t hdr_pp_align; uint32_t sgl_pages_max; uint32_t sgl_pp_align; + uint8_t cqv; + uint8_t mqv; + uint8_t wqv; + uint8_t rqv; }; /* SLI4 HBA data structure entries */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 6b8d2952e32f..30ba5440c67a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport) struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; long timeout; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp = lpfc_findnode_did(vport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) @@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport) * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, -- cgit v1.2.3 From 1151e3ec15c32021a8a12a123459ab5e41692898 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:39:35 -0500 Subject: [SCSI] lpfc 8.3.21: RRQ Implementation fixes RRQ Implementation fixes - Added checks to prevent a call to findnode_did in clr_active_rrq - Added the del_sync_timer call for the rrq_tmr to the stop_hba_timers routine. - Added a check in __lpfc_set_active_rrq for the driver unloading to prevent adding an rrq when the driver is being removed. - Add code to scsi_iocb_cmpl to check for the remote stop and add the rrq. - Added the same check to els retry. - Added code to compare the source did in the els rrq to the vports did and chose the right exchange ID. - Initialize the start_cmd pointer to indicate when we have looped through all of the scsi buffers. - Remove the need for the lock around the clearing of the active bit in the rrq. - Added code to clean the els and fcp xri aborted list and remove the all of the RRQs for a deleted vport. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_crtn.h | 2 +- drivers/scsi/lpfc/lpfc_els.c | 52 +++++++++-- drivers/scsi/lpfc/lpfc_hbadisc.c | 6 +- drivers/scsi/lpfc/lpfc_init.c | 13 +-- drivers/scsi/lpfc/lpfc_scsi.c | 77 +++++++++++----- drivers/scsi/lpfc/lpfc_sli.c | 191 ++++++++++++++++++--------------------- drivers/scsi/lpfc/lpfc_sli4.h | 2 + 7 files changed, 205 insertions(+), 138 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 5b4513792a89..34fbc1820c40 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -423,6 +423,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t, uint16_t, uint16_t); void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); -void lpfc_cleanup_vports_rrqs(struct lpfc_vport *); +void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 58e1a55d97f9..c6d01f63f557 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2816,9 +2816,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, switch (irsp->ulpStatus) { case IOSTAT_FCP_RSP_ERROR: + break; case IOSTAT_REMOTE_STOP: + if (phba->sli_rev == LPFC_SLI_REV4) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, ndlp, + cmdiocb->sli4_xritag, 0, 0); + } break; - case IOSTAT_LOCAL_REJECT: switch ((irsp->un.ulpWord[4] & 0xff)) { case IOERR_LOOP_OPEN_FAILURE: @@ -4014,28 +4022,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, uint8_t *pcmd; struct RRQ *rrq; uint16_t rxid; + uint16_t xri; struct lpfc_node_rrq *prrq; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); pcmd += sizeof(uint32_t); rrq = (struct RRQ *)pcmd; - rxid = bf_get(rrq_oxid, rrq); + rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); + rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" " x%x x%x\n", - bf_get(rrq_did, rrq), - bf_get(rrq_oxid, rrq), + be32_to_cpu(bf_get(rrq_did, rrq)), + be16_to_cpu(bf_get(rrq_oxid, rrq)), rxid, iocb->iotag, iocb->iocb.ulpContext); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); - prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID); + if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) + xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); + else + xri = rxid; + prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); if (prrq) - lpfc_clr_rrq_active(phba, rxid, prrq); + lpfc_clr_rrq_active(phba, xri, prrq); return; } @@ -7582,6 +7596,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) IOERR_SLI_ABORTED); } +/** + * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) + sglq_entry->ndlp = NULL; + } + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + /** * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort * @phba: pointer to lpfc hba data structure. diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 1f9c7f19c31f..63300be2e45b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -3160,7 +3160,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_unlock_irq(shost->host_lock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); - lpfc_cleanup_vports_rrqs(vport); + lpfc_cleanup_vports_rrqs(vport, NULL); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() @@ -3900,6 +3900,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_nlp_put(ndlp); return; } @@ -4289,7 +4291,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); - + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); return 0; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 0b5f76caa6e4..32cd138f6543 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -945,17 +945,13 @@ static void lpfc_rrq_timeout(unsigned long ptr) { struct lpfc_hba *phba; - uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE; - if (!tmo_posted) - phba->hba_flag |= HBA_RRQ_ACTIVE; + phba->hba_flag |= HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - if (!tmo_posted) - lpfc_worker_wake_up(phba); + lpfc_worker_wake_up(phba); } /** @@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } + lpfc_cleanup_vports_rrqs(vport, NULL); } /** @@ -2355,6 +2352,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); + if (phba->sli_rev == LPFC_SLI_REV4) { + del_timer_sync(&phba->rrq_tmr); + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + } phba->hb_outstanding = 0; switch (phba->pci_dev_grp) { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index af5498d735e2..d482dfc951f3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -608,6 +608,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) return bcnt; } +/** + * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->rdata && psb->rdata->pnode + && psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + /** * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort * @phba: pointer to lpfc hba data structure. @@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, psb->status = IOSTAT_SUCCESS; spin_unlock( &phba->sli4_hba.abts_scsi_buf_list_lock); - ndlp = psb->rdata->pnode; + if (psb->rdata && psb->rdata->pnode) + ndlp = psb->rdata->pnode; + else + ndlp = NULL; + rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); if (ndlp) @@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - struct lpfc_scsi_buf *lpfc_cmd = NULL; - struct lpfc_scsi_buf *start_lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; + struct lpfc_scsi_buf *lpfc_cmd ; unsigned long iflag = 0; int found = 0; spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); - while (!found && lpfc_cmd) { + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, + list) { if (lpfc_test_rrq_active(phba, ndlp, - lpfc_cmd->cur_iocbq.sli4_xritag)) { - lpfc_release_scsi_buf_s4(phba, lpfc_cmd); - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, - struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); - if (lpfc_cmd == start_lpfc_cmd) { - lpfc_cmd = NULL; - break; - } else - continue; - } + lpfc_cmd->cur_iocbq.sli4_xritag)) + continue; + list_del(&lpfc_cmd->list); found = 1; lpfc_cmd->seg_cnt = 0; lpfc_cmd->nonsg_phys = 0; lpfc_cmd->prot_seg_cnt = 0; + break; } - return lpfc_cmd; + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, + iflag); + if (!found) + return NULL; + else + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -2484,6 +2507,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_worker_wake_up(phba); break; case IOSTAT_LOCAL_REJECT: + case IOSTAT_REMOTE_STOP: if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || @@ -2510,6 +2534,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, "on unprotected cmd\n"); } } + if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) + && (phba->sli_rev == LPFC_SLI_REV4) + && (pnode && NLP_CHK_NODE_ACT(pnode))) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, pnode, + lpfc_cmd->cur_iocbq.sli4_xritag, + 0, 0); + } /* else: fall through */ default: diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ed8f048dfb53..b85c40e3bf55 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -535,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t adj_xri; struct lpfc_node_rrq *rrq; int empty; + uint32_t did = 0; + + + if (!ndlp) + return -EINVAL; + + if (!phba->cfg_enable_rrq) + return -EINVAL; + + if (phba->pport->load_flag & FC_UNLOADING) { + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + goto out; + } + did = ndlp->nlp_DID; /* * set the active bit even if there is no mem available. */ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (!ndlp) - return -EINVAL; + + if (NLP_CHK_FREE_REQ(ndlp)) + goto out; + + if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) + goto out; + if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) - return -EINVAL; + goto out; + rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); if (rrq) { rrq->send_rrq = send_rrq; @@ -554,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, rrq->vport = ndlp->vport; rrq->rxid = rxid; empty = list_empty(&phba->active_rrq_list); - if (phba->cfg_enable_rrq && send_rrq) - /* - * We need the xri before we can add this to the - * phba active rrq list. - */ - rrq->send_rrq = send_rrq; - else - rrq->send_rrq = 0; + rrq->send_rrq = send_rrq; list_add_tail(&rrq->list, &phba->active_rrq_list); if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { phba->hba_flag |= HBA_RRQ_ACTIVE; @@ -570,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } return 0; } - return -ENOMEM; +out: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2921 Can't set rrq active xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, did, send_rrq); + return -EINVAL; } /** - * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. + * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @xritag: xri used in this exchange. * @rrq: The RRQ to be cleared. * - * This function is called with hbalock held. This function **/ -static void -__lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) +void +lpfc_clr_rrq_active(struct lpfc_hba *phba, + uint16_t xritag, + struct lpfc_node_rrq *rrq) { uint16_t adj_xri; - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = NULL; - ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); + if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) + ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); /* The target DID could have been swapped (cable swap) * we should use the ndlp from the findnode if it is * available. */ - if (!ndlp) + if ((!ndlp) && rrq->ndlp) ndlp = rrq->ndlp; + if (!ndlp) + goto out; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; } +out: mempool_free(rrq, phba->rrq_pool); } @@ -628,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(send_rrq); spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov + 1); list_for_each_entry_safe(rrq, nextrrq, - &phba->active_rrq_list, list) { - if (time_after(jiffies, rrq->rrq_stop_time)) { - list_del(&rrq->list); - if (!rrq->send_rrq) - /* this call will free the rrq */ - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - else { - /* if we send the rrq then the completion handler - * will clear the bit in the xribitmap. - */ - spin_unlock_irqrestore(&phba->hbalock, iflags); - if (lpfc_send_rrq(phba, rrq)) { - lpfc_clr_rrq_active(phba, rrq->xritag, - rrq); - } - spin_lock_irqsave(&phba->hbalock, iflags); - } - } else if (time_before(rrq->rrq_stop_time, next_time)) + &phba->active_rrq_list, list) { + if (time_after(jiffies, rrq->rrq_stop_time)) + list_move(&rrq->list, &send_rrq); + else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); + list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { + list_del(&rrq->list); + if (!rrq->send_rrq) + /* this call will free the rrq */ + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + else if (lpfc_send_rrq(phba, rrq)) { + /* if we send the rrq then the completion handler + * will clear the bit in the xribitmap. + */ + lpfc_clr_rrq_active(phba, rrq->xritag, + rrq); + } + } } /** @@ -693,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) /** * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. * @vport: Pointer to vport context object. - * - * Remove all active RRQs for this vport from the phba->active_rrq_list and - * clear the rrq. + * @ndlp: Pointer to the lpfc_node_list structure. + * If ndlp is NULL Remove all active RRQs for this vport from the + * phba->active_rrq_list and clear the rrq. + * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. **/ void -lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport) +lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; - spin_lock_irqsave(&phba->hbalock, iflags); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { - if (rrq->vport == vport) { - list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - } + if (!ndlp) { + lpfc_sli4_vport_delete_els_xri_aborted(vport); + lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } + spin_lock_irqsave(&phba->hbalock, iflags); + list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) + if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) + list_move(&rrq->list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { + list_del(&rrq->list); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } } /** @@ -733,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov * 2); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { + list_splice_init(&phba->active_rrq_list, &rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } - spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); } /** - * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. + * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. * @phba: Pointer to HBA context object. * @ndlp: Targets nodelist pointer for this exchange. * @xritag the xri in the bitmap to test. @@ -759,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) * returns 0 = rrq not active for this xri * 1 = rrq is valid for this xri. **/ -static int -__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, +int +lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag) { uint16_t adj_xri; @@ -802,52 +835,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, return ret; } -/** - * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @xritag: xri used in this exchange. - * @rrq: The RRQ to be cleared. - * - * This function is takes the hbalock. - **/ -void -lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) -{ - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - __lpfc_clr_rrq_active(phba, xritag, rrq); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return; -} - - - -/** - * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @ndlp: Targets nodelist pointer for this exchange. - * @xritag the xri in the bitmap to test. - * - * This function takes the hbalock. - * returns 0 = rrq not active for this xri - * 1 = rrq is valid for this xri. - **/ -int -lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - uint16_t xritag) -{ - int ret; - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - ret = __lpfc_test_rrq_active(phba, ndlp, xritag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return ret; -} - /** * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. @@ -885,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { + if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index aea4aa012270..435a09d331ee 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -566,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); +void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *); int lpfc_sli4_brdreset(struct lpfc_hba *); int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); -- cgit v1.2.3 From 924941444b481fc862b2de5e1dd7692ca85274d7 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:39:44 -0500 Subject: [SCSI] lpfc 8.3.21: FC Discovery changes FC Discovery changes - Treat received PLOGI while logged in as a relogin (unregister and reregister). - Added a timer to delay Nport discovery when clean bit is cleared and Fabric portname/nodename/FCID is changed. - Invalidate Port's DID when receiving PLOGI from p2p port with CONFIG_PORT mailbox command. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 5 ++ drivers/scsi/lpfc/lpfc_attr.c | 21 ++++++- drivers/scsi/lpfc/lpfc_crtn.h | 3 + drivers/scsi/lpfc/lpfc_ct.c | 49 +++++++++++++++ drivers/scsi/lpfc/lpfc_els.c | 118 ++++++++++++++++++++++++++++++++++++- drivers/scsi/lpfc/lpfc_hbadisc.c | 7 +++ drivers/scsi/lpfc/lpfc_hw.h | 6 ++ drivers/scsi/lpfc/lpfc_init.c | 6 ++ drivers/scsi/lpfc/lpfc_nportdisc.c | 11 +++- 9 files changed, 219 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b388e436c687..e2a763426a7c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -325,6 +325,7 @@ struct lpfc_vport { #define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ +#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ uint32_t ct_flags; #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ @@ -348,6 +349,8 @@ struct lpfc_vport { uint32_t fc_myDID; /* fibre channel S_ID */ uint32_t fc_prevDID; /* previous fibre channel S_ID */ + struct lpfc_name fabric_portname; + struct lpfc_name fabric_nodename; int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ @@ -372,6 +375,7 @@ struct lpfc_vport { #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ #define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */ +#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ #define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ @@ -382,6 +386,7 @@ struct lpfc_vport { struct timer_list fc_fdmitmo; struct timer_list els_tmofunc; + struct timer_list delayed_disc_tmo; int unreg_vpi_cmpl; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 745774c5a4be..a57f6c0cbbba 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3365,6 +3365,25 @@ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; module_param(lpfc_prot_guard, byte, 0); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); +/* + * Delay initial NPort discovery when Clean Address bit is cleared in + * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. + * This parameter can have value 0 or 1. + * When this parameter is set to 0, no delay is added to the initial + * discovery. + * When this parameter is set to non-zero value, initial Nport discovery is + * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion + * when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Default value is 0. + */ +int lpfc_delay_discovery; +module_param(lpfc_delay_discovery, int, 0); +MODULE_PARM_DESC(lpfc_delay_discovery, + "Delay NPort discovery when Clean Address bit is cleared. " + "Allowed values: 0,1."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 34fbc1820c40..3d40023f4804 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); void lpfc_fdmi_timeout_handler(struct lpfc_vport *); +void lpfc_delayed_disc_tmo(unsigned long); +void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); int lpfc_config_port_prep(struct lpfc_hba *); int lpfc_config_port_post(struct lpfc_hba *); @@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; extern int lpfc_sli_mode; extern int lpfc_enable_npiv; +extern int lpfc_delay_discovery; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c004fa9a681e..d9edfd90d7ff 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1738,6 +1738,55 @@ fdmi_cmd_exit: return 1; } +/** + * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. + * @ptr - Context object of the timer. + * + * This function set the WORKER_DELAYED_DISC_TMO flag and wake up + * the worker thread. + **/ +void +lpfc_delayed_disc_tmo(unsigned long ptr) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)ptr; + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long iflag; + + spin_lock_irqsave(&vport->work_port_lock, iflag); + tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; + if (!tmo_posted) + vport->work_port_events |= WORKER_DELAYED_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_delayed_disc_timeout_handler - Function called by worker thread to + * handle delayed discovery. + * @vport: pointer to a host virtual N_Port data structure. + * + * This function start nport discovery of the vport. + **/ +void +lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + if (!(vport->fc_flag & FC_DISC_DELAYED)) { + spin_unlock_irq(shost->host_lock); + return; + } + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + + lpfc_do_scr_ns_plogi(vport->phba, vport); +} + void lpfc_fdmi_tmo(unsigned long ptr) { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c6d01f63f557..8e28edf9801e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -484,6 +484,59 @@ fail: return rc; } +/** + * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. + * @vport: pointer to a host virtual N_Port data structure. + * @sp: pointer to service parameter data structure. + * + * This routine is called from FLOGI/FDISC completion handler functions. + * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter else return + * 0. This function also set flag in the vport data structure to delay + * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit + * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter. + * + * Return code + * 0 - FCID and Fabric Nodename and Fabric portname is not changed. + * 1 - FCID or Fabric Nodename or Fabric portname is changed. + * + **/ +static uint8_t +lpfc_check_clean_addr_bit(struct lpfc_vport *vport, + struct serv_parm *sp) +{ + uint8_t fabric_param_changed = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if ((vport->fc_prevDID != vport->fc_myDID) || + memcmp(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)) || + memcmp(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name))) + fabric_param_changed = 1; + + /* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + * + * If fabric parameter is changed and clean address bit is + * cleared delay nport discovery if + * - vport->fc_prevDID != 0 (not initial discovery) OR + * - lpfc_delay_discovery module parameter is set. + */ + if (fabric_param_changed && !sp->cmn.clean_address_bit && + (vport->fc_prevDID || lpfc_delay_discovery)) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + } + + return fabric_param_changed; +} + + /** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. @@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *np; struct lpfc_nodelist *next_np; + uint8_t fabric_param_changed; spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_FABRIC; @@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { @@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - if ((vport->fc_prevDID != vport->fc_myDID) && + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all @@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_sli *psli; + struct lpfcMboxq *mbox; psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ @@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_EVT_CMPL_LOGO); out: lpfc_els_free_iocb(phba, cmdiocb); + /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ + if ((vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_PT2PT_PLOGI)) { + phba->pport->fc_myDID = 0; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == + MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + } + } + } return; } @@ -6181,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (vport->load_flag & FC_UNLOADING) goto dropit; + /* If NPort discovery is delayed drop incoming ELS */ + if ((vport->fc_flag & FC_DISC_DELAYED) && + (cmd != ELS_CMD_PLOGI)) + goto dropit; + ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ @@ -6233,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); lpfc_send_els_event(vport, ndlp, payload); + + /* If Nport discovery is delayed, reject PLOGIs */ + if (vport->fc_flag & FC_DISC_DELAYED) { + rjt_err = LSRJT_UNABLE_TPC; + break; + } if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6611,6 +6698,21 @@ void lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *ndlp_fdmi; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* + * If lpfc_delay_discovery parameter is set and the clean address + * bit is cleared and fc fabric parameters chenged, delay FC NPort + * discovery. + */ + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_DISC_DELAYED) { + spin_unlock_irq(shost->host_lock); + mod_timer(&vport->delayed_disc_tmo, + jiffies + HZ * phba->fc_ratov); + return; + } + spin_unlock_irq(shost->host_lock); ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp) { @@ -6953,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *next_np; IOCB_t *irsp = &rspiocb->iocb; struct lpfc_iocbq *piocb; + struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; + struct serv_parm *sp; + uint8_t fabric_param_changed; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0123 FDISC completes. x%x/x%x prevDID: x%x\n", @@ -6996,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); - if ((vport->fc_prevDID != vport->fc_myDID) && + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + sp = prsp->virt + sizeof(uint32_t); + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all * remaining NPORTs get unreg_login'ed so we can diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 63300be2e45b..154c715fb3af 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_RAMP_UP_QUEUE) lpfc_ramp_up_queue_handler(phba); + if (work_port_events & WORKER_DELAYED_DISC_TMO) + lpfc_delayed_disc_timeout_handler(vport); } lpfc_destroy_vport_work_array(phba, vports); @@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport) lpfc_port_link_failure(vport); + /* Stop delayed Nport discovery */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&vport->delayed_disc_tmo); } int diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 96ed3ba6ba95..5a4a196baa90 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -341,6 +341,12 @@ struct csp { uint8_t bbCreditMsb; uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ +/* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + */ +#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 32cd138f6543..5e84d2a6ada6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2292,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->fc_fdmitmo); + del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -2733,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; + + init_timer(&vport->delayed_disc_tmo); + vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; + vport->delayed_disc_tmo.data = (unsigned long)vport; + error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d85a7423a694..52b35159fc35 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* no need to reg_login if we are already in one of these states */ + /* + * Need to unreg_login if we are already in one of these states and + * change to NPR state. This will block the port until after the ACC + * completes and the reg_login is issued and completed. + */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) @@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); - return 1; + lpfc_unreg_rpi(vport, ndlp); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } if ((vport->fc_flag & FC_PT2PT) && -- cgit v1.2.3 From ab56dc2e1d32556971e0729b3a6c37e0ff3104a6 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:39:57 -0500 Subject: [SCSI] lpfc 8.3.21: Initialization and user interface changes - Make link speed not supported by port message an error message. - Add support for new SLI failure codes add sysfs parameter to reflect the security setting and current state. - Add all lpfc module parameters to the /sys/modules/lpfc/parameters directory. [jejb: fix up compile failure] Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 2 +- drivers/scsi/lpfc/lpfc_attr.c | 75 ++++++++++++++++++++++++++-------------- drivers/scsi/lpfc/lpfc_debugfs.c | 8 ++--- drivers/scsi/lpfc/lpfc_hw.h | 5 ++- drivers/scsi/lpfc/lpfc_init.c | 2 +- drivers/scsi/lpfc/lpfc_mbox.c | 3 +- drivers/scsi/lpfc/lpfc_scsi.c | 14 +++++--- 7 files changed, 72 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e2a763426a7c..f41722e8652a 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -662,7 +662,7 @@ struct lpfc_hba { #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ - + uint32_t cfg_enable_dss; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index a57f6c0cbbba..e7c020df12fa 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -678,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) * * Notes: * Assumes any error from lpfc_do_offline() will be negative. + * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero @@ -1292,6 +1293,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); } +/** + * lpfc_dss_show - Return the current state of dss and the configured state + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_dss_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", + (phba->cfg_enable_dss) ? "Enabled" : "Disabled", + (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? + "" : "Not "); +} + /** * lpfc_param_show - Return a cfg attribute value in decimal * @@ -1611,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_init(name, defval, minval, maxval) #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1625,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1636,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1644,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1655,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_init(name, defval, minval, maxval) #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1669,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1680,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1688,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1732,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); - +static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -1973,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ static int lpfc_poll = 0; -module_param(lpfc_poll, int, 0); +module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" @@ -1983,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); int lpfc_sli_mode = 0; -module_param(lpfc_sli_mode, int, 0); +module_param(lpfc_sli_mode, int, S_IRUGO); MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); int lpfc_enable_npiv = 1; -module_param(lpfc_enable_npiv, int, 0); +module_param(lpfc_enable_npiv, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); int lpfc_enable_rrq; -module_param(lpfc_enable_rrq, int, 0); +module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); lpfc_param_init(enable_rrq, 0, 0, 1); @@ -2059,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); int lpfc_iocb_cnt = 2; -module_param(lpfc_iocb_cnt, int, 1); +module_param(lpfc_iocb_cnt, int, S_IRUGO); MODULE_PARM_DESC(lpfc_iocb_cnt, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); lpfc_param_show(iocb_cnt); @@ -2211,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ -module_param(lpfc_devloss_tmo, int, 0); +module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); @@ -2321,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; -module_param(lpfc_restrict_login, int, 0); +module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); @@ -2492,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, return -EINVAL; } static int lpfc_topology = 0; -module_param(lpfc_topology, int, 0); +module_param(lpfc_topology, int, S_IRUGO); MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); lpfc_param_show(topology) lpfc_param_init(topology, 0, 0, 6) @@ -2934,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, } static int lpfc_link_speed = 0; -module_param(lpfc_link_speed, int, 0); +module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) @@ -3062,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, } static int lpfc_aer_support = 1; -module_param(lpfc_aer_support, int, 1); +module_param(lpfc_aer_support, int, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); lpfc_param_show(aer_support) @@ -3174,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, # The value is set in milliseconds. */ static int lpfc_max_scsicmpl_time; -module_param(lpfc_max_scsicmpl_time, int, 0); +module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); MODULE_PARM_DESC(lpfc_max_scsicmpl_time, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); @@ -3350,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; -module_param(lpfc_prot_mask, uint, 0); +module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* @@ -3362,7 +3385,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); # */ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; -module_param(lpfc_prot_guard, byte, 0); +module_param(lpfc_prot_guard, byte, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); /* @@ -3380,7 +3403,7 @@ MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); * Default value is 0. */ int lpfc_delay_discovery; -module_param(lpfc_delay_discovery, int, 0); +module_param(lpfc_delay_discovery, int, S_IRUGO); MODULE_PARM_DESC(lpfc_delay_discovery, "Delay NPort discovery when Clean Address bit is cleared. " "Allowed values: 0,1."); @@ -3475,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, + &dev_attr_lpfc_dss, NULL, }; @@ -4677,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); + phba->cfg_enable_dss = 1; return; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a80d938fafc9..3c8c91a7609c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -82,23 +82,23 @@ * the HBA. X MUST also be a power of 2. */ static int lpfc_debugfs_enable = 1; -module_param(lpfc_debugfs_enable, int, 0); +module_param(lpfc_debugfs_enable, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_disc_trc; -module_param(lpfc_debugfs_max_disc_trc, int, 0); +module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_slow_ring_trc; -module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); +module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); static int lpfc_debugfs_mask_disc_trc; -module_param(lpfc_debugfs_mask_disc_trc, int, 0); +module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 5a4a196baa90..94ae37c5111a 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -3204,7 +3204,10 @@ typedef struct { #define IOERR_SLER_RRQ_RJT_ERR 0x4C #define IOERR_SLER_RRQ_RETRY_ERR 0x4D #define IOERR_SLER_ABTS_ERR 0x4E - +#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0 +#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1 +#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2 +#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3 #define IOERR_DRVR_MASK 0x100 #define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ #define IOERR_SLI_BRESET 0x102 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5e84d2a6ada6..35665cfb5689 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { /* Reset link speed to auto */ - lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1302 Invalid speed for this board: " "Reset link speed to auto: x%x\n", phba->cfg_link_speed); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 9fb4345ec021..dba32dfdb59b 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ - mb->un.varCfgPort.cdss = 1; /* Configure Security */ + if (phba->cfg_enable_dss) + mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d482dfc951f3..1255b9d1b1b0 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2508,6 +2508,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, break; case IOSTAT_LOCAL_REJECT: case IOSTAT_REMOTE_STOP: + if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || + lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { + cmd->result = ScsiResult(DID_NO_CONNECT, 0); + break; + } if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || @@ -2515,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->result = ScsiResult(DID_REQUEUE, 0); break; } - if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { @@ -2545,7 +2553,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->cur_iocbq.sli4_xritag, 0, 0); } - /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); @@ -2556,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, SAM_STAT_BUSY); - } else { + } else cmd->result = ScsiResult(DID_OK, 0); - } if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { uint32_t *lp = (uint32_t *)cmd->sense_buffer; -- cgit v1.2.3 From 2a622bfbe1d95664ecd4bc1cfe6dacf4788dfc10 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:40:06 -0500 Subject: [SCSI] lpfc 8.3.21: Debugfs additions - Add the driver debugfs framework for supporting debugfs read and write operations, and iDiag command structure. - Add read and write to SLI4 device PCI config space registers. - Add the driver support of debugfs PCI config space register bits set/clear methods to the provided bitmask. - Add iDiag driver support for SLI4 device queue diagnostic. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 6 +- drivers/scsi/lpfc/lpfc_debugfs.c | 954 ++++++++++++++++++++++++++++++++++++--- drivers/scsi/lpfc/lpfc_debugfs.h | 60 ++- drivers/scsi/lpfc/lpfc_sli.c | 5 + drivers/scsi/lpfc/lpfc_sli4.h | 2 +- 5 files changed, 972 insertions(+), 55 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f41722e8652a..b64c6da870d3 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -799,6 +799,10 @@ struct lpfc_hba { struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; + struct dentry *idiag_que_info; #endif /* Used for deferred freeing of ELS data buffers */ diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 3c8c91a7609c..a753581509d6 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2009 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -57,8 +57,8 @@ * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: - * lpfc/lpfcX/vportY - * where X is the lpfc hba unique_id + * /sys/kernel/debug/lpfc/fnX/vportY + * where X is the lpfc hba function unique_id * where Y is the vport VPI on that hba * * Debugging services available per vport: @@ -104,30 +104,12 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, #include -/* size of output line, for discovery_trace and slow_ring_trace */ -#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 - -/* nodelist output buffer size */ -#define LPFC_NODELIST_SIZE 8192 -#define LPFC_NODELIST_ENTRY_SIZE 120 - -/* dumpHBASlim output buffer size */ -#define LPFC_DUMPHBASLIM_SIZE 4096 - -/* dumpHostSlim output buffer size */ -#define LPFC_DUMPHOSTSLIM_SIZE 4096 - -/* hbqinfo output buffer size */ -#define LPFC_HBQINFO_SIZE 8192 - -struct lpfc_debug { - char *buffer; - int len; -}; - static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; +/* iDiag */ +static struct lpfc_idiag idiag; + /** * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer * @vport: The vport to gather the log info from. @@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } - - /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. @@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; + return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, debug->len); } @@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) return 0; } +/* + * iDiag debugfs file access methods + */ + +/* + * iDiag PCI config space register access methods: + * + * The PCI config space register accessees of read, write, read-modify-write + * for set bits, and read-modify-write for clear bits to SLI4 PCI functions + * are provided. In the proper SLI4 PCI function's debugfs iDiag directory, + * + * /sys/kernel/debug/lpfc/fn<#>/iDiag + * + * the access is through the debugfs entry pciCfg: + * + * 1. For PCI config space register read access, there are two read methods: + * A) read a single PCI config space register in the size of a byte + * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through + * the 4K extended PCI config space. + * + * A) Read a single PCI config space register consists of two steps: + * + * Step-1: Set up PCI config space register read command, the command + * syntax is, + * + * echo 1 > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, is the + * offset from the beginning of the device's PCI config space to read from, + * and is the size of PCI config space register data to read back, + * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits + * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes). + * + * Setp-2: Perform the debugfs read operation to execute the idiag command + * set up in Step-1, + * + * cat pciCfg + * + * Examples: + * To read PCI device's vendor-id and device-id from PCI config space, + * + * echo 1 0 4 > pciCfg + * cat pciCfg + * + * To read PCI device's currnt command from config space, + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * B) Browse through the entire 4K extended PCI config space also consists + * of two steps: + * + * Step-1: Set up PCI config space register browsing command, the command + * syntax is, + * + * echo 1 0 4096 > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, 0 must be used + * as the offset for PCI config space register browse, and 4096 must be + * used as the count for PCI config space register browse. + * + * Step-2: Repeately issue the debugfs read operation to browse through + * the entire PCI config space registers: + * + * cat pciCfg + * cat pciCfg + * cat pciCfg + * ... + * + * When browsing to the end of the 4K PCI config space, the browse method + * shall wrap around to start reading from beginning again, and again... + * + * 2. For PCI config space register write access, it supports a single PCI + * config space register write in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 2 > pciCfg + * + * where, 2 is the iDiag command for PCI config space write, is + * the offset from the beginning of the device's PCI config space to write + * into, is the size of data to write into the PCI config space, + * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits + * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and + * is the data to be written into the PCI config space register at the + * offset. + * + * Examples: + * To disable PCI device's interrupt assertion, + * + * 1) Read in device's PCI config space register command field : + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * 2) Set bit 10 (Interrupt Disable bit) in the : + * + * = | (1 < 10) + * + * 3) Write the modified command back: + * + * echo 2 4 2 > pciCfg + * + * 3. For PCI config space register set bits access, it supports a single PCI + * config space register set bits in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 3 > pciCfg + * + * where, 3 is the iDiag command for PCI config space set bits, is + * the offset from the beginning of the device's PCI config space to set + * bits into, is the size of the bitmask to set into the PCI config + * space, it will be 1 for setting a byte (8 bits), 2 for setting a word + * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and + * is the bitmask, indicating the bits to be set into the PCI + * config space register at the offset. The logic performed to the content + * of the PCI config space register, regval, is, + * + * regval |= + * + * 4. For PCI config space register clear bits access, it supports a single + * PCI config space register clear bits in the size of a byte (8 bits), + * a word (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 4 > pciCfg + * + * where, 4 is the iDiag command for PCI config space clear bits, + * is the offset from the beginning of the device's PCI config space to + * clear bits from, is the size of the bitmask to set into the PCI + * config space, it will be 1 for setting a byte (8 bits), 2 for setting + * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 + * bytes), and is the bitmask, indicating the bits to be cleared + * from the PCI config space register at the offset. the logic performed + * to the content of the PCI config space register, regval, is, + * + * regval &= ~ + * + * Note, for all single register read, write, set bits, or clear bits access, + * the offset () must be aligned with the size of the data: + * + * For data size of byte (8 bits), the offset must be aligned to the byte + * boundary; for data size of word (16 bits), the offset must be aligned + * to the word boundary; while for data size of dword (32 bits), the offset + * must be aligned to the dword boundary. Otherwise, the interface will + * return the error: + * + * "-bash: echo: write error: Invalid argument". + * + * For example: + * + * echo 1 2 4 > pciCfg + * -bash: echo: write error: Invalid argument + * + * Note also, all of the numbers in the command fields for all read, write, + * set bits, and clear bits PCI config space register command fields can be + * either decimal or hex. + * + * For example, + * echo 1 0 4096 > pciCfg + * + * will be the same as + * echo 1 0 0x1000 > pciCfg + * + * And, + * echo 2 155 1 10 > pciCfg + * + * will be + * echo 2 0x9b 1 0xa > pciCfg + */ + +/** + * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space + * @buf: The pointer to the user space buffer. + * @nbytes: The number of bytes in the user space buffer. + * @idiag_cmd: pointer to the idiag command struct. + * + * This routine reads data from debugfs user space buffer and parses the + * buffer for getting the idiag command and arguments. The while space in + * between the set of data is used as the parsing separator. + * + * This routine returns 0 when successful, it returns proper error code + * back to the user space in error conditions. + */ +static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, + struct lpfc_idiag_cmd *idiag_cmd) +{ + char mybuf[64]; + char *pbuf, *step_str; + int bsize, i; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + memset(idiag_cmd, 0, sizeof(*idiag_cmd)); + bsize = min(nbytes, (sizeof(mybuf)-1)); + + if (copy_from_user(mybuf, buf, bsize)) + return -EFAULT; + pbuf = &mybuf[0]; + step_str = strsep(&pbuf, "\t "); + + /* The opcode must present */ + if (!step_str) + return -EINVAL; + + idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); + if (idiag_cmd->opcode == 0) + return -EINVAL; + + for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { + step_str = strsep(&pbuf, "\t "); + if (!step_str) + return 0; + idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); + } + return 0; +} + +/** + * lpfc_idiag_open - idiag open debugfs + * @inode: The inode pointer that contains a pointer to phba. + * @file: The file pointer to attach the file operation. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It + * gets the reference to phba from the i_private field in @inode, it then + * allocates buffer for the file operation, performs the necessary PCI config + * space read into the allocated buffer according to the idiag user command + * setup, and then returns a pointer to buffer in the private_data field in + * @file. + * + * Returns: + * This function returns zero if successful. On error it will return an + * negative error value. + **/ +static int +lpfc_idiag_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + debug->buffer = NULL; + file->private_data = debug; + + return 0; +} + +/** + * lpfc_idiag_release - Release idiag access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine is the generic release routine for the idiag access file + * operation, it frees the buffer that was allocated when the debugfs file + * was opened. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_cmd_release - Release idiag cmd access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine frees the buffer that was allocated when the debugfs file + * was opened. It also reset the fields in the idiag command struct in the + * case the command is not continuous browsing of the data structure. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_cmd_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Read PCI config register, if not read all, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_RD) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) + if ((idiag.cmd.data[1] == sizeof(uint8_t)) || + (idiag.cmd.data[1] == sizeof(uint16_t)) || + (idiag.cmd.data[1] == sizeof(uint32_t))) + memset(&idiag, 0, sizeof(idiag)); + + /* Write PCI config register, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_WR) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) + memset(&idiag, 0, sizeof(idiag)); + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci config space according to the + * idiag command, and copies to user @buf. Depending on the PCI config space + * read command setup, it does either a single register read of a byte + * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all + * registers from the 4K extended PCI config space. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; + int where, count; + char *pbuffer; + struct pci_dev *pdev; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + } else + return 0; + + /* Read single PCI config space register */ + switch (count) { + case SIZE_U8: /* byte (8 bits) */ + pci_read_config_byte(pdev, where, &u8val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %02x\n", where, u8val); + break; + case SIZE_U16: /* word (16 bits) */ + pci_read_config_word(pdev, where, &u16val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %04x\n", where, u16val); + break; + case SIZE_U32: /* double word (32 bits) */ + pci_read_config_dword(pdev, where, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %08x\n", where, u32val); + break; + case LPFC_PCI_CFG_SIZE: /* browse all */ + goto pcicfg_browse; + break; + default: + /* illegal count */ + len = 0; + break; + } + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +pcicfg_browse: + + /* Browse all PCI config space registers */ + offset_label = idiag.offset.last_rd; + offset = offset_label; + + /* Read PCI config space */ + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: ", offset_label); + while (index > 0) { + pci_read_config_dword(pdev, offset, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%08x ", u32val); + offset += sizeof(uint32_t); + index -= sizeof(uint32_t); + if (!index) + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n%03x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci cfg read */ + idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; + if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + idiag.offset.last_rd = 0; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI config space read or write command + * accordingly. In the case of PCI config space read command, it sets up + * the command in the idiag command struct for the debugfs read operation. + * In the case of PCI config space write operation, it executes the write + * operation into the PCI config space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t where, value, count; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + struct pci_dev *pdev; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc) + return rc; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + /* Read command from PCI config space, set up command fields */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + if (count == LPFC_PCI_CFG_SIZE) { + if (where != 0) + goto error_out; + } else if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + } + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + /* Write command to PCI config space, read-modify-write */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + value = idiag.cmd.data[2]; + /* Sanity checks */ + if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_byte(pdev, where, + (uint8_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val |= (uint8_t)value; + pci_write_config_byte(pdev, where, + u8val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val &= (uint8_t)(~value); + pci_write_config_byte(pdev, where, + u8val); + } + } + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_word(pdev, where, + (uint16_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val |= (uint16_t)value; + pci_write_config_word(pdev, where, + u16val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val &= (uint16_t)(~value); + pci_write_config_word(pdev, where, + u16val); + } + } + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_dword(pdev, where, value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val |= value; + pci_write_config_dword(pdev, where, + u32val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val &= ~value; + pci_write_config_dword(pdev, where, + u32val); + } + } + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_queinfo_read - idiag debugfs read queue information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba SLI4 PCI function queue information, + * and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int len = 0, fcp_qidx; + char *pbuffer; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + /* Get slow-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path EQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.sp_eq->queue_id, + phba->sli4_hba.sp_eq->entry_count, + phba->sli4_hba.sp_eq->host_index, + phba->sli4_hba.sp_eq->hba_index); + + /* Get fast-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path EQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, + phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, + phba->sli4_hba.fp_eq[fcp_qidx]->host_index, + phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.mbx_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.mbx_cq->entry_count, + phba->sli4_hba.mbx_cq->host_index, + phba->sli4_hba.mbx_cq->hba_index); + + /* Get slow-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.els_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.els_cq->entry_count, + phba->sli4_hba.els_cq->host_index, + phba->sli4_hba.els_cq->hba_index); + + /* Get fast-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path CQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox MQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.mbx_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], MQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_wq->entry_count, + phba->sli4_hba.mbx_wq->host_index, + phba->sli4_hba.mbx_wq->hba_index); + + /* Get slow-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path WQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.els_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_wq->entry_count, + phba->sli4_hba.els_wq->host_index, + phba->sli4_hba.els_wq->hba_index); + + /* Get fast-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path WQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get receive queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path RQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.hdr_rq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RHQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.hdr_rq->entry_count, + phba->sli4_hba.hdr_rq->host_index, + phba->sli4_hba.hdr_rq->hba_index); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RDQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.dat_rq->entry_count, + phba->sli4_hba.dat_rq->host_index, + phba->sli4_hba.dat_rq->hba_index); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { static struct dentry *lpfc_debugfs_root = NULL; static atomic_t lpfc_debugfs_hba_count; + +/* + * File operations for the iDiag debugfs + */ +#undef lpfc_idiag_op_pciCfg +static const struct file_operations lpfc_idiag_op_pciCfg = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_pcicfg_read, + .write = lpfc_idiag_pcicfg_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_queInfo +static const struct file_operations lpfc_idiag_op_queInfo = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .read = lpfc_idiag_queinfo_read, + .release = lpfc_idiag_release, +}; + #endif /** @@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; - /* Setup lpfcX directory for specific HBA */ - snprintf(name, sizeof(name), "lpfc%d", phba->brd_no); + /* Setup funcX directory for specific HBA PCI function */ + snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); @@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } /* Setup dumpHBASlim */ - snprintf(name, sizeof(name), "dumpHBASlim"); - phba->debug_dumpHBASlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHBASlim); - if (!phba->debug_dumpHBASlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0413 Cannot create debugfs dumpHBASlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHBASlim"); + phba->debug_dumpHBASlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHBASlim); + if (!phba->debug_dumpHBASlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0413 Cannot create debugfs " + "dumpHBASlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpHostSlim */ - snprintf(name, sizeof(name), "dumpHostSlim"); - phba->debug_dumpHostSlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHostSlim); - if (!phba->debug_dumpHostSlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0414 Cannot create debugfs dumpHostSlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHostSlim"); + phba->debug_dumpHostSlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHostSlim); + if (!phba->debug_dumpHostSlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0414 Cannot create debugfs " + "dumpHostSlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); @@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } - - /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; @@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } } - snprintf(name, sizeof(name), "slow_ring_trace"); phba->debug_slow_ring_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) "0409 Cant create debugfs nodelist\n"); goto debug_failed; } + + /* + * iDiag debugfs root entry points for SLI4 device only + */ + if (phba->sli_rev < LPFC_SLI_REV4) + goto debug_failed; + + snprintf(name, sizeof(name), "iDiag"); + if (!phba->idiag_root) { + phba->idiag_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + if (!phba->idiag_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2922 Can't create idiag debugfs\n"); + goto debug_failed; + } + /* Initialize iDiag data structure */ + memset(&idiag, 0, sizeof(idiag)); + } + + /* iDiag read PCI config space */ + snprintf(name, sizeof(name), "pciCfg"); + if (!phba->idiag_pci_cfg) { + phba->idiag_pci_cfg = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); + if (!phba->idiag_pci_cfg) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2923 Can't create idiag debugfs\n"); + goto debug_failed; + } + idiag.offset.last_rd = 0; + } + + /* iDiag get PCI function queue information */ + snprintf(name, sizeof(name), "queInfo"); + if (!phba->idiag_que_info) { + phba->idiag_que_info = + debugfs_create_file(name, S_IFREG|S_IRUGO, + phba->idiag_root, phba, &lpfc_idiag_op_queInfo); + if (!phba->idiag_que_info) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2924 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + debug_failed: return; #endif @@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) phba->debug_slow_ring_trc = NULL; } + /* + * iDiag release + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->idiag_que_info) { + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + } + if (phba->idiag_pci_cfg) { + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; + } + + /* Finally remove the iDiag debugfs root */ + if (phba->idiag_root) { + /* iDiag root */ + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; + } + } + if (phba->hba_debugfs_root) { - debugfs_remove(phba->hba_debugfs_root); /* lpfcX */ + debugfs_remove(phba->hba_debugfs_root); /* fnX */ phba->hba_debugfs_root = NULL; atomic_dec(&lpfc_debugfs_hba_count); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 03c7313a1012..91b9a9427cda 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -22,6 +22,44 @@ #define _H_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS + +/* size of output line, for discovery_trace and slow_ring_trace */ +#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 + +/* nodelist output buffer size */ +#define LPFC_NODELIST_SIZE 8192 +#define LPFC_NODELIST_ENTRY_SIZE 120 + +/* dumpHBASlim output buffer size */ +#define LPFC_DUMPHBASLIM_SIZE 4096 + +/* dumpHostSlim output buffer size */ +#define LPFC_DUMPHOSTSLIM_SIZE 4096 + +/* hbqinfo output buffer size */ +#define LPFC_HBQINFO_SIZE 8192 + +/* rdPciConf output buffer size */ +#define LPFC_PCI_CFG_SIZE 4096 +#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) +#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) + +/* queue info output buffer size */ +#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 + +#define SIZE_U8 sizeof(uint8_t) +#define SIZE_U16 sizeof(uint16_t) +#define SIZE_U32 sizeof(uint32_t) + +struct lpfc_debug { + char *i_private; + char op; +#define LPFC_IDIAG_OP_RD 1 +#define LPFC_IDIAG_OP_WR 2 + char *buffer; + int len; +}; + struct lpfc_debugfs_trc { char *fmt; uint32_t data1; @@ -30,6 +68,26 @@ struct lpfc_debugfs_trc { uint32_t seq_cnt; unsigned long jif; }; + +struct lpfc_idiag_offset { + uint32_t last_rd; +}; + +#define LPFC_IDIAG_CMD_DATA_SIZE 4 +struct lpfc_idiag_cmd { + uint32_t opcode; +#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 +#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 +#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 +#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 + uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; +}; + +struct lpfc_idiag { + uint32_t active; + struct lpfc_idiag_cmd cmd; + struct lpfc_idiag_offset offset; +}; #endif /* Mask for discovery_trace */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index b85c40e3bf55..2ee0374a9908 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10471,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->type = type; cq->subtype = subtype; cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->assoc_qid = eq->queue_id; cq->host_index = 0; cq->hba_index = 0; @@ -10665,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, goto out; } mq->type = LPFC_MQ; + mq->assoc_qid = cq->queue_id; mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; @@ -10752,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, goto out; } wq->type = LPFC_WQ; + wq->assoc_qid = cq->queue_id; wq->subtype = subtype; wq->host_index = 0; wq->hba_index = 0; @@ -10869,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; @@ -10929,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 435a09d331ee..595056b89608 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -125,9 +125,9 @@ struct lpfc_queue { uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t queue_id; /* Queue ID assigned by the hardware */ + uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ struct list_head page_list; uint32_t page_count; /* Number of pages allocated for this queue */ - uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ union sli4_qe qe[1]; /* array to index entries (must be last) */ -- cgit v1.2.3 From 6fc124697a0428148b9a8179e8431fa538d2994c Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 16 Feb 2011 12:40:15 -0500 Subject: [SCSI] lpfc 8.3.21: Change lpfc driver version to 8.3.21 Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 386cf92de492..0a4d376dbca5 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.20" +#define LPFC_DRIVER_VERSION "8.3.21" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" -- cgit v1.2.3 From ec8933b4bc27c6e143ca3ed4159f2e2c69b5499b Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:25 -0600 Subject: [SCSI] bnx2i: Allow ep CONNECT_FAILED condition to go through proper cleanup Allow CNIC to go through the proper cleanup procedure for an endpoint which failed to connect. Proper cleanup is necessary for the chip to reset back to the initial state for the offloaded endpoint. Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f0dce26593eb..f9415aa35300 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1935,13 +1935,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) cnic_dev_10g = 1; switch (bnx2i_ep->state) { - case EP_STATE_CONNECT_FAILED: case EP_STATE_CLEANUP_FAILED: case EP_STATE_OFLD_FAILED: case EP_STATE_DISCONN_TIMEDOUT: ret = 0; break; case EP_STATE_CONNECT_START: + case EP_STATE_CONNECT_FAILED: case EP_STATE_CONNECT_COMPL: case EP_STATE_ULP_UPDATE_START: case EP_STATE_ULP_UPDATE_COMPL: -- cgit v1.2.3 From ee15bd2da6a7c671e1b0095a3f128be9366b4db9 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:26 -0600 Subject: [SCSI] bnx2i: Fixed the 32-bit swapping of the LUN field for nopouts for 5771X Fixed a bug where the 64-bit LUN field for nopouts were 32-bit swapped. This only pertains to 5771X devices. Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_hwi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 603db9d64956..c8eff57d3c00 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -498,10 +498,10 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - u32 tmp = nopout_hdr->lun[0]; + u32 tmp = nopout_wqe->lun[0]; /* 57710 requires LUN field to be swapped */ - nopout_hdr->lun[0] = nopout_hdr->lun[1]; - nopout_hdr->lun[1] = tmp; + nopout_wqe->lun[0] = nopout_wqe->lun[1]; + nopout_wqe->lun[1] = tmp; } nopout_wqe->itt = ((u16)task->itt | -- cgit v1.2.3 From a977d2c9dc463ab080e0aa8ebe563bdc414ee44f Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:27 -0600 Subject: [SCSI] bnx2i: Added handling for unsupported iSCSI offload hba The hba will now be unregistered and freed when iSCSI offload is not supported by the NIC. Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 72a7b2d4a439..6afaa34222af 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -161,6 +161,21 @@ void bnx2i_start(void *handle) struct bnx2i_hba *hba = handle; int i = HZ; + if (!hba->cnic->max_iscsi_conn) { + printk(KERN_ALERT "bnx2i: dev %s does not support " + "iSCSI\n", hba->netdev->name); + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + mutex_lock(&bnx2i_dev_lock); + list_del_init(&hba->link); + adapter_count--; + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + mutex_unlock(&bnx2i_dev_lock); + bnx2i_free_hba(hba); + } + return; + } bnx2i_send_fw_iscsi_init_msg(hba); while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); -- cgit v1.2.3 From 8a4a0f3ad071e258a9637c5491c34005a9a97903 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:28 -0600 Subject: [SCSI] bnx2i: Added support for the 57712(E) devices Moved all PCI_DEVICE_ID_NX2_57712(E) definitions to pci_ids.h Signed-off-by: Eddie Wai Acked-by: Michael Chan Acked-by: Eilon Greenstein Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/net/bnx2x/bnx2x_main.c | 7 ------- drivers/scsi/bnx2i/bnx2i_init.c | 10 ++++++---- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 8cdcf5b39d1e..9791dc53375d 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -145,13 +145,6 @@ static struct { { "Broadcom NetXtreme II BCM57712E XGb" } }; -#ifndef PCI_DEVICE_ID_NX2_57712 -#define PCI_DEVICE_ID_NX2_57712 0x1662 -#endif -#ifndef PCI_DEVICE_ID_NX2_57712E -#define PCI_DEVICE_ID_NX2_57712E 0x1663 -#endif - static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 6afaa34222af..5ef01c98f48e 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -29,7 +29,7 @@ static char version[] __devinitdata = MODULE_AUTHOR("Anil Veerabhadrappa and " "Eddie Wai "); -MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711" +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712" " iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba) (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba->mail_queue_access = BNX2I_MQ_BIN_MODE; - } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711E) + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711E || + hba->pci_did == PCI_DEVICE_ID_NX2_57712 || + hba->pci_did == PCI_DEVICE_ID_NX2_57712E) set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); else printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", -- cgit v1.2.3 From 45188354ebd15be7fb15d2b144a1a0ecd2e797a9 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:29 -0600 Subject: [SCSI] bnx2i: Added jumbo MTU support for the no shost case For scenarios where the shost is not being passed to bnx2i for the iSCSI offload connection request, the code would consult the routing table to select the CNIC device. This code path will erroneously error out if the corresponding L2 interface's MTU has been setup to > 1500. Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index e1ca5fe7e6bb..275685392e01 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -360,7 +360,7 @@ struct bnx2i_hba { #define ADAPTER_STATE_LINK_DOWN 2 #define ADAPTER_STATE_INIT_FAILED 31 unsigned int mtu_supported; - #define BNX2I_MAX_MTU_SUPPORTED 1500 + #define BNX2I_MAX_MTU_SUPPORTED 9000 struct Scsi_Host *shost; -- cgit v1.2.3 From 09813ba5bc1a09e39402d66c1671715af0124942 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:30 -0600 Subject: [SCSI] bnx2i: Added iSCSI text pdu support for iSCSI offload This is part of an effort to support send target discovery via the iSCSI offload path. Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i.h | 2 + drivers/scsi/bnx2i/bnx2i_hwi.c | 116 +++++++++++++++++++++++++++++++++++++++ drivers/scsi/bnx2i/bnx2i_iscsi.c | 6 +- 3 files changed, 123 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 275685392e01..cfd59023227b 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, struct iscsi_task *mtask); extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn, + struct iscsi_task *mtask); extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, struct bnx2i_cmd *cmnd); extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index c8eff57d3c00..1da34c019b8a 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -444,6 +444,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, return 0; } +/** + * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware + * @conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Text request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_text_request *text_wqe; + struct iscsi_text *text_hdr; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; + text_hdr = (struct iscsi_text *)mtask->hdr; + text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; + + memset(text_wqe, 0, sizeof(struct bnx2i_text_request)); + + text_wqe->op_code = text_hdr->opcode; + text_wqe->op_attr = text_hdr->flags; + text_wqe->data_length = ntoh24(text_hdr->dlength); + text_wqe->itt = mtask->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT); + text_wqe->ttt = be32_to_cpu(text_hdr->ttt); + + text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn); + + text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + text_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + text_wqe->resp_buffer = dword; + text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + text_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + text_wqe->num_bds = 1; + text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + /** * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware * @conn: iscsi connection @@ -1428,6 +1478,68 @@ done: return 0; } + +/** + * bnx2i_process_text_resp - this function handles iscsi text response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Text Response CQE& complete it to open-iscsi user daemon + */ +static int bnx2i_process_text_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_text_response *text; + struct iscsi_text_rsp *resp_hdr; + int pld_len; + int pad_len; + + text = (struct bnx2i_text_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = text->op_code; + resp_hdr->flags = text->response_flags; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, text->data_length); + resp_hdr->itt = task->hdr->itt; + resp_hdr->ttt = cpu_to_be32(text->ttt); + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn); + pld_len = text->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf + + pld_len; + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - + bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->lock); + return 0; +} + + /** * bnx2i_process_tmf_resp - this function handles iscsi TMF response * @session: iscsi session pointer @@ -1769,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) bnx2i_process_tmf_resp(session, bnx2i_conn, qp->cq_cons_qe); break; + case ISCSI_OP_TEXT_RSP: + bnx2i_process_text_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; case ISCSI_OP_LOGOUT_RSP: bnx2i_process_logout_resp(session, bnx2i_conn, qp->cq_cons_qe); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f9415aa35300..05bb8086069a 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) case ISCSI_OP_SCSI_TMFUNC: rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); break; + case ISCSI_OP_TEXT: + rc = bnx2i_send_iscsi_text(bnx2i_conn, task); + break; default: iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "send_gen: unsupported op 0x%x\n", @@ -2167,7 +2170,8 @@ struct iscsi_transport bnx2i_iscsi_transport = { .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | - CAP_DATA_PATH_OFFLOAD, + CAP_DATA_PATH_OFFLOAD | + CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | -- cgit v1.2.3 From fc336387dbf02bd89cc5ea38da07e4058cfb4de0 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 16 Feb 2011 15:04:31 -0600 Subject: [SCSI] bnx2i: Updated to version 2.6.2.3 Signed-off-by: Eddie Wai Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 5ef01c98f48e..1d24a2819736 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.6.2.2" -#define DRV_MODULE_RELDATE "Nov 23, 2010" +#define DRV_MODULE_VERSION "2.6.2.3" +#define DRV_MODULE_RELDATE "Dec 31, 2010" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ -- cgit v1.2.3 From fdafd4dfc7bbdd40a4692192b77299b28c8a948f Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:32 -0600 Subject: [SCSI] cxgbi: enable TEXT PDU support cxgb3i and cxgb4i support TEXT PDU offloading, so set the bits to enable it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 2 +- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index e2362b97f329..69a6769c633e 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { /* owner and name should be set already */ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 5b1f0785dafd..719aa71f5b10 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .name = DRV_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | -- cgit v1.2.3 From 22a39fbbfecfea703b686a4626a631d706ccb3ee Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:33 -0600 Subject: [SCSI] iscsi: fix iscsi_endpoint leak When iscsid restarts it does not know the connection's endpoint, so it is getting leaked. This fixes the problem by having the iscsi class force a disconnect before a new connection is bound. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 63 +++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f905ecb5704d..4e09b68a0789 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) if (dd_size) conn->dd_data = &conn[1]; + mutex_init(&conn->ep_mutex); INIT_LIST_HEAD(&conn->conn_list); conn->transport = transport; conn->cid = cid; @@ -1430,6 +1431,29 @@ release_host: return err; } +static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, + u64 ep_handle) +{ + struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep; + + if (!transport->ep_disconnect) + return -EINVAL; + + ep = iscsi_lookup_endpoint(ep_handle); + if (!ep) + return -EINVAL; + conn = ep->conn; + if (conn) { + mutex_lock(&conn->ep_mutex); + conn->ep = NULL; + mutex_unlock(&conn->ep_mutex); + } + + transport->ep_disconnect(ep); + return 0; +} + static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) @@ -1454,14 +1478,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, ev->u.ep_poll.timeout_ms); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: - if (!transport->ep_disconnect) - return -EINVAL; - - ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle); - if (!ep) - return -EINVAL; - - transport->ep_disconnect(ep); + rc = iscsi_if_ep_disconnect(transport, + ev->u.ep_disconnect.ep_handle); break; } return rc; @@ -1609,12 +1627,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) session = iscsi_session_lookup(ev->u.b_conn.sid); conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); - if (session && conn) - ev->r.retcode = transport->bind_conn(session, conn, - ev->u.b_conn.transport_eph, - ev->u.b_conn.is_leading); - else + if (conn && conn->ep) + iscsi_if_ep_disconnect(transport, conn->ep->id); + + if (!session || !conn) { err = -EINVAL; + break; + } + + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_eph, + ev->u.b_conn.is_leading); + if (ev->r.retcode || !transport->ep_connect) + break; + + ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); + if (ep) { + ep->conn = conn; + + mutex_lock(&conn->ep_mutex); + conn->ep = ep; + mutex_unlock(&conn->ep_mutex); + } else + iscsi_cls_conn_printk(KERN_ERR, conn, + "Could not set ep conn " + "binding\n"); break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_set_param(transport, ev); -- cgit v1.2.3 From bbc5261b2cb5e69754c935ea2466fb22775f0e48 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:34 -0600 Subject: [SCSI] iscsi class: remove unused active variable The active variable on the iscsi_cls_conn is not used so this patch removes it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 4e09b68a0789..a631e58894f1 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -976,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); - conn->active = 1; spin_unlock_irqrestore(&connlock, flags); ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n"); @@ -1002,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) unsigned long flags; spin_lock_irqsave(&connlock, flags); - conn->active = 0; list_del(&conn->conn_list); spin_unlock_irqrestore(&connlock, flags); -- cgit v1.2.3 From 00f3708e6ed1698d6aee3901ea991197e31a8007 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:35 -0600 Subject: [SCSI] libiscsi: add helper to convert addr to string This adds a helper to convert a addr struct to a string. This will be used by the drivers in the next patches. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index da8b61543ee4..0c550d5b9133 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_session_get_param); +int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, + enum iscsi_param param, char *buf) +{ + struct sockaddr_in6 *sin6 = NULL; + struct sockaddr_in *sin = NULL; + int len; + + switch (addr->ss_family) { + case AF_INET: + sin = (struct sockaddr_in *)addr; + break; + case AF_INET6: + sin6 = (struct sockaddr_in6 *)addr; + break; + default: + return -EINVAL; + } + + switch (param) { + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + if (sin) + len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); + else + len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); + break; + case ISCSI_PARAM_CONN_PORT: + if (sin) + len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + else + len = sprintf(buf, "%hu\n", + be16_to_cpu(sin6->sin6_port)); + break; + default: + return -EINVAL; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param); + int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { @@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ihost->initiatorname); break; - case ISCSI_HOST_PARAM_IPADDRESS: - len = sprintf(buf, "%s\n", ihost->local_address); - break; default: return -ENOSYS; } -- cgit v1.2.3 From a79af8a64d395bd89de8695a5ea5e1a7f01f02a8 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:36 -0600 Subject: [SCSI] iscsi_tcp: use iscsi_conn_get_addr_param libiscsi function This has iscsi_tcp use the iscsi_conn_get_addr_param libiscsi function. It also drops the use of the libiscsi session portal buffers, so they can be removed in the next patches. Instead of copying the values at bind time we get them during get() time. If we are not connected userspace will now get -ENOTCONN, so it knows that connection is disconnected instead of a possible stale value. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/iscsi_tcp.c | 134 +++++++++++++++++++++++------------------------ drivers/scsi/iscsi_tcp.h | 4 ++ 2 files changed, 71 insertions(+), 67 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index fec47de72535..a860452a8f71 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iscsi_sw_tcp_release_conn(conn); } -static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, - char *buf, int *port, - int (*getname)(struct socket *, - struct sockaddr *, - int *addrlen)) -{ - struct sockaddr_storage *addr; - struct sockaddr_in6 *sin6; - struct sockaddr_in *sin; - int rc = 0, len; - - addr = kmalloc(sizeof(*addr), GFP_KERNEL); - if (!addr) - return -ENOMEM; - - if (getname(sock, (struct sockaddr *) addr, &len)) { - rc = -ENODEV; - goto free_addr; - } - - switch (addr->ss_family) { - case AF_INET: - sin = (struct sockaddr_in *)addr; - spin_lock_bh(&conn->session->lock); - sprintf(buf, "%pI4", &sin->sin_addr.s_addr); - *port = be16_to_cpu(sin->sin_port); - spin_unlock_bh(&conn->session->lock); - break; - case AF_INET6: - sin6 = (struct sockaddr_in6 *)addr; - spin_lock_bh(&conn->session->lock); - sprintf(buf, "%pI6", &sin6->sin6_addr); - *port = be16_to_cpu(sin6->sin6_port); - spin_unlock_bh(&conn->session->lock); - break; - } -free_addr: - kfree(addr); - return rc; -} - static int iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct iscsi_host *ihost = shost_priv(shost); + struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; @@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, "sockfd_lookup failed %d\n", err); return -EEXIST; } - /* - * copy these values now because if we drop the session - * userspace may still want to query the values since we will - * be using them for the reconnect - */ - err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address, - &conn->portal_port, kernel_getpeername); - if (err) - goto free_socket; - - err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address, - &ihost->local_port, kernel_getsockname); - if (err) - goto free_socket; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (err) goto free_socket; + spin_lock_bh(&session->lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; + spin_unlock_bh(&session->lock); /* setup Socket parameters */ sk = sock->sk; @@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; - int len; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sockaddr_in6 addr; + int rc, len; switch(param) { case ISCSI_PARAM_CONN_PORT: - spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%hu\n", conn->portal_port); - spin_unlock_bh(&conn->session->lock); - break; case ISCSI_PARAM_CONN_ADDRESS: spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%s\n", conn->portal_address); + if (!tcp_sw_conn || !tcp_sw_conn->sock) { + spin_unlock_bh(&conn->session->lock); + return -ENOTCONN; + } + rc = kernel_getpeername(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); spin_unlock_bh(&conn->session->lock); - break; + if (rc) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, param, buf); default: return iscsi_conn_get_param(cls_conn, param, buf); } - return len; + return 0; +} + +static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct sockaddr_in6 addr; + int rc, len; + + switch (param) { + case ISCSI_HOST_PARAM_IPADDRESS: + spin_lock_bh(&session->lock); + conn = session->leadconn; + if (!conn) { + spin_unlock_bh(&session->lock); + return -ENOTCONN; + } + tcp_conn = conn->dd_data; + + tcp_sw_conn = tcp_conn->dd_data; + if (!tcp_sw_conn->sock) { + spin_unlock_bh(&session->lock); + return -ENOTCONN; + } + + rc = kernel_getsockname(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); + spin_unlock_bh(&session->lock); + if (rc) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, param, buf); + default: + return iscsi_host_get_param(shost, param, buf); + } + + return 0; } static void @@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, { struct iscsi_cls_session *cls_session; struct iscsi_session *session; + struct iscsi_sw_tcp_host *tcp_sw_host; struct Scsi_Host *shost; if (ep) { @@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, return NULL; } - shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1); + shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, + sizeof(struct iscsi_sw_tcp_host), 1); if (!shost) return NULL; shost->transportt = iscsi_sw_tcp_scsi_transport; @@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, if (!cls_session) goto remove_host; session = cls_session->dd_data; + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; shost->can_queue = session->scsi_cmds_max; if (iscsi_tcp_r2tpool_alloc(session)) @@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { .start_conn = iscsi_conn_start, .stop_conn = iscsi_sw_tcp_conn_stop, /* iscsi host params */ - .get_host_param = iscsi_host_get_param, + .get_host_param = iscsi_sw_tcp_host_get_param, .set_host_param = iscsi_host_set_param, /* IO */ .send_pdu = iscsi_conn_send_pdu, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 94644bad0ed7..666fe09378fa 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn { ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); }; +struct iscsi_sw_tcp_host { + struct iscsi_session *session; +}; + struct iscsi_sw_tcp_hdrbuf { struct iscsi_hdr hdrbuf; char hdrextbuf[ISCSI_MAX_AHS_SIZE + -- cgit v1.2.3 From 289324b0c6007171d67bf1ab0827355ae3374773 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:37 -0600 Subject: [SCSI] iscsi class: add callout to get iscsi_endpoint values For drivers using the ep callbacks the addr and port are attached to the endpoint instead of the conn. This adds a callout to the iscsi_transport to get ep values. It also adds locking around the get param call to make sure that ep_disconnect does not free the LLD's ep interconnect structs from under us (the ep has a refcount so it will not go away but the LLD may have structs from other subsystems that are not allocated in the ep so we need to protect them from getting freed). Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 39 +++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index a631e58894f1..b4218390941e 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1782,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); -iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT); iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); -iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); +#define iscsi_conn_ep_attr_show(param) \ +static ssize_t show_conn_ep_param_##param(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ + struct iscsi_transport *t = conn->transport; \ + struct iscsi_endpoint *ep; \ + ssize_t rc; \ + \ + /* \ + * Need to make sure ep_disconnect does not free the LLD's \ + * interconnect resources while we are trying to read them. \ + */ \ + mutex_lock(&conn->ep_mutex); \ + ep = conn->ep; \ + if (!ep && t->ep_connect) { \ + mutex_unlock(&conn->ep_mutex); \ + return -ENOTCONN; \ + } \ + \ + if (ep) \ + rc = t->get_ep_param(ep, param, buf); \ + else \ + rc = t->get_conn_param(conn, param, buf); \ + mutex_unlock(&conn->ep_mutex); \ + return rc; \ +} + +#define iscsi_conn_ep_attr(field, param) \ + iscsi_conn_ep_attr_show(param) \ +static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ + show_conn_ep_param_##param, NULL); + +iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); +iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); + /* * iSCSI session attrs */ -- cgit v1.2.3 From c71b9b669e1243623f7ed4332877d3f2beafc6ab Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:38 -0600 Subject: [SCSI] cxgbi: convert to use iscsi_conn_get_addr_param This has cxgbi use the iscsi_conn_get_addr_param helper and the get ep callback. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 3 ++- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 3 ++- drivers/scsi/cxgbi/libcxgbi.c | 36 +++++++++++++++++------------------- drivers/scsi/cxgbi/libcxgbi.h | 2 +- 4 files changed, 22 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 69a6769c633e..fc2cdb62f53b 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, - .get_conn_param = cxgbi_get_conn_param, + .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ @@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 719aa71f5b10..f3a4cd7cf782 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, - .get_conn_param = cxgbi_get_conn_param, + .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ @@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index b2acdef3dcb7..fedf1be278ff 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -543,6 +543,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->dst = dst; csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; csk->daddr.sin_port = daddr->sin_port; + csk->daddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = rt->rt_src; return csk; @@ -2200,32 +2201,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, } EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); -int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf) { - struct iscsi_conn *iconn = cls_conn->dd_data; + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_sock *csk; int len; log_debug(1 << CXGBI_DBG_ISCSI, - "cls_conn 0x%p, param %d.\n", cls_conn, param); + "cls_conn 0x%p, param %d.\n", ep, param); switch (param) { case ISCSI_PARAM_CONN_PORT: - spin_lock_bh(&iconn->session->lock); - len = sprintf(buf, "%hu\n", iconn->portal_port); - spin_unlock_bh(&iconn->session->lock); - break; case ISCSI_PARAM_CONN_ADDRESS: - spin_lock_bh(&iconn->session->lock); - len = sprintf(buf, "%s\n", iconn->portal_address); - spin_unlock_bh(&iconn->session->lock); - break; + if (!cep) + return -ENOTCONN; + + csk = cep->csk; + if (!csk) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &csk->daddr, param, buf); default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } return len; } -EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); +EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); struct iscsi_cls_conn * cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) @@ -2292,11 +2295,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, cxgbi_conn_max_xmit_dlength(conn); cxgbi_conn_max_recv_dlength(conn); - spin_lock_bh(&conn->session->lock); - sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); - conn->portal_port = ntohs(csk->daddr.sin_port); - spin_unlock_bh(&conn->session->lock); - log_debug(1 << CXGBI_DBG_ISCSI, "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", cls_session, cls_conn, ep, cconn, csk); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 23cbc5854503..0a20fd5f7102 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -712,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task); void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); int cxgbi_set_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *, int); -int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *); struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); int cxgbi_bind_conn(struct iscsi_cls_session *, struct iscsi_cls_conn *, u64, int); -- cgit v1.2.3 From d8585bcd7da071f4278710f1c39e18dfe7cb0280 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:39 -0600 Subject: [SCSI] bnx2i: fix null ptr ref in conn get param bnx2i has some checks to try and make sure the ep is not destroyed while the addr/port is getting read. However, if after this check: if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba)) goto out; bnx2i_conn->ep is cleared by ep_disconnect then we will oops. This patches fixes the problem by having the driver use the get_ep_param callback instead of get_conn_param. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 45 ++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 05bb8086069a..1809f9ccc4ce 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1458,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) /** - * bnx2i_conn_get_param - return iscsi connection parameter to caller - * @cls_conn: pointer to iscsi cls conn + * bnx2i_ep_get_param - return iscsi ep parameter to caller + * @ep: pointer to iscsi endpoint * @param: parameter type identifier * @buf: buffer pointer * - * returns iSCSI connection parameters + * returns iSCSI ep parameters */ -static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) +static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) { - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - int len = 0; + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + struct bnx2i_hba *hba = bnx2i_ep->hba; + int len = -ENOTCONN; - if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba)) - goto out; + if (!hba) + return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: - mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); - if (bnx2i_conn->ep->cm_sk) - len = sprintf(buf, "%hu\n", - bnx2i_conn->ep->cm_sk->dst_port); - mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); + mutex_unlock(&hba->net_dev_lock); break; case ISCSI_PARAM_CONN_ADDRESS: - mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); - if (bnx2i_conn->ep->cm_sk) - len = sprintf(buf, "%pI4\n", - &bnx2i_conn->ep->cm_sk->dst_ip); - mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); + mutex_unlock(&hba->net_dev_lock); break; default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } -out: + return len; } @@ -2204,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { .bind_conn = bnx2i_conn_bind, .destroy_conn = bnx2i_conn_destroy, .set_param = iscsi_set_param, - .get_conn_param = bnx2i_conn_get_param, + .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = bnx2i_host_get_param, .start_conn = bnx2i_conn_start, @@ -2213,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { .xmit_task = bnx2i_task_xmit, .get_stats = bnx2i_conn_get_stats, /* TCP connect - disconnect - option-2 interface calls */ + .get_ep_param = bnx2i_ep_get_param, .ep_connect = bnx2i_ep_connect, .ep_poll = bnx2i_ep_poll, .ep_disconnect = bnx2i_ep_disconnect, -- cgit v1.2.3 From 7c53c6f89d7a6487986c51cd73ae9a9be338a8f4 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:40 -0600 Subject: [SCSI] iser: export addr and port This pactch has iser export the address and port of the endpoint. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/infiniband/ulp/iser/iscsi_iser.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 7b2fc98e2f2b..8db008de5392 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s stats->custom[3].value = conn->fmr_unalign_cnt; } +static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct iser_conn *ib_conn = ep->dd_data; + int len; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!ib_conn || !ib_conn->cma_id) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &ib_conn->cma_id->route.addr.dst_addr, + param, buf); + break; + default: + return -ENOSYS; + } + + return len; +} + static struct iscsi_endpoint * iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) @@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = { ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | ISCSI_DATASEQ_INORDER_EN | + ISCSI_CONN_PORT | + ISCSI_CONN_ADDRESS | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | ISCSI_PERSISTENT_ADDRESS | @@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = { .destroy_conn = iscsi_iser_conn_destroy, .set_param = iscsi_iser_set_param, .get_conn_param = iscsi_conn_get_param, + .get_ep_param = iscsi_iser_get_ep_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_iser_conn_stop, -- cgit v1.2.3 From c7f7fd5b7ea114e0f85fc4f2a853f6564410588d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 16 Feb 2011 15:04:41 -0600 Subject: [SCSI] be2iscsi: fix null ptr ref in conn get param The ep_disconnect function could be freeing the ep while beiscsi_conn_get_param is running. This has the driver use the get ep param callback instead of the get conn param to fix this. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_iscsi.c | 18 +++++------------- drivers/scsi/be2iscsi/be_iscsi.h | 4 ++-- drivers/scsi/be2iscsi/be_main.c | 3 ++- 3 files changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index eaaa8813067d..868cc5590145 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, } /** - * beiscsi_conn_get_param - get the iscsi parameter - * @cls_conn: pointer to iscsi cls conn + * beiscsi_ep_get_param - get the iscsi parameter + * @ep: pointer to iscsi ep * @param: parameter type identifier * @buf: buffer pointer * * returns iscsi parameter */ -int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { - struct beiscsi_endpoint *beiscsi_ep; - struct iscsi_conn *conn = cls_conn->dd_data; - struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; int len = 0; SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param); - beiscsi_ep = beiscsi_conn->ep; - if (!beiscsi_ep) { - SE_DEBUG(DBG_LVL_1, - "In beiscsi_conn_get_param , no beiscsi_ep\n"); - return -ENODEV; - } switch (param) { case ISCSI_PARAM_CONN_PORT: @@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr); break; default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } return len; } diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 8950a702b9f4..9c532797c29e 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading); -int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf); +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf); int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 79cefbe31367..bd50145ab71b 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { .bind_conn = beiscsi_conn_bind, .destroy_conn = iscsi_conn_teardown, .set_param = beiscsi_set_param, - .get_conn_param = beiscsi_conn_get_param, + .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = beiscsi_get_host_param, .start_conn = beiscsi_conn_start, @@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { .alloc_pdu = beiscsi_alloc_pdu, .parse_pdu_itt = beiscsi_parse_pdu, .get_stats = beiscsi_conn_get_stats, + .get_ep_param = beiscsi_ep_get_param, .ep_connect = beiscsi_ep_connect, .ep_poll = beiscsi_ep_poll, .ep_disconnect = beiscsi_ep_disconnect, -- cgit v1.2.3 From 4e7d7af4dfea65b50b0fc694e3febc202e4e9839 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 22 Feb 2011 19:54:38 +0100 Subject: [SCSI] zfcp: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted here. Signed-off-by: Tobias Klauser Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 60ff9d172c79..14a427457fa1 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1552,7 +1552,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) SBAL_FLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } @@ -1605,7 +1605,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) SBAL_FLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } -- cgit v1.2.3 From 7c35e77b96b2f0af8c278c13d484d42dad3c7422 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:39 +0100 Subject: [SCSI] zfcp: Remove unused flag ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_fsf.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ae1d0a6f627..89e43e172918 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -89,7 +89,6 @@ struct zfcp_reqlist; #define ZFCP_STATUS_LUN_READONLY 0x00000008 /* FSF request status (this does not have a common part) */ -#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 14a427457fa1..6efaea9207ca 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2284,7 +2284,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, goto out; } - req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; req->data = scmnd; req->handler = zfcp_fsf_fcp_task_mgmt_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; -- cgit v1.2.3 From c7b279ae51942c14529bf2806685e9c658f28611 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:40 +0100 Subject: [SCSI] zfcp: Replace kmem_cache for "status read" data zfcp requires a mempool for the status read data blocks to resubmit the "status read" requests at any time. Each status read data block has the size of a page (4096 bytes) and needs to be placed in one page. Instead of having a kmem_cache for allocating page sized chunks, use mempool_create_page_pool to create a mempool returning pages and remove the zfcp kmem_cache. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 20 ++++++-------------- drivers/s390/scsi/zfcp_def.h | 3 +-- drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 12 +++++++----- 4 files changed, 15 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 51c666fb67a4..81e185602bb2 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -132,11 +132,6 @@ static int __init zfcp_module_init(void) if (!zfcp_data.qtcb_cache) goto out_qtcb_cache; - zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", - sizeof(struct fsf_status_read_buffer)); - if (!zfcp_data.sr_buffer_cache) - goto out_sr_cache; - zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", sizeof(struct zfcp_fc_gid_pn)); if (!zfcp_data.gid_pn_cache) @@ -181,8 +176,6 @@ out_transport: out_adisc_cache: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: - kmem_cache_destroy(zfcp_data.sr_buffer_cache); -out_sr_cache: kmem_cache_destroy(zfcp_data.qtcb_cache); out_qtcb_cache: kmem_cache_destroy(zfcp_data.gpn_ft_cache); @@ -199,7 +192,6 @@ static void __exit zfcp_module_exit(void) fc_release_transport(zfcp_data.scsi_transport_template); kmem_cache_destroy(zfcp_data.adisc_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache); - kmem_cache_destroy(zfcp_data.sr_buffer_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache); } @@ -264,10 +256,10 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) if (!adapter->pool.qtcb_pool) return -ENOMEM; - adapter->pool.status_read_data = - mempool_create_slab_pool(FSF_STATUS_READS_RECOM, - zfcp_data.sr_buffer_cache); - if (!adapter->pool.status_read_data) + BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE); + adapter->pool.sr_data = + mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0); + if (!adapter->pool.sr_data) return -ENOMEM; adapter->pool.gid_pn = @@ -290,8 +282,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) mempool_destroy(adapter->pool.qtcb_pool); if (adapter->pool.status_read_req) mempool_destroy(adapter->pool.status_read_req); - if (adapter->pool.status_read_data) - mempool_destroy(adapter->pool.status_read_data); + if (adapter->pool.sr_data) + mempool_destroy(adapter->pool.sr_data); if (adapter->pool.gid_pn) mempool_destroy(adapter->pool.gid_pn); } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 89e43e172918..93ce500f8978 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -107,7 +107,7 @@ struct zfcp_adapter_mempool { mempool_t *scsi_req; mempool_t *scsi_abort; mempool_t *status_read_req; - mempool_t *status_read_data; + mempool_t *sr_data; mempool_t *gid_pn; mempool_t *qtcb_pool; }; @@ -319,7 +319,6 @@ struct zfcp_data { struct scsi_transport_template *scsi_transport_template; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; - struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; struct kmem_cache *adisc_cache; }; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e003e306f870..6c1cddf0d0a0 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) return ZFCP_ERP_FAILED; - if (mempool_resize(act->adapter->pool.status_read_data, + if (mempool_resize(act->adapter->pool.sr_data, act->adapter->stat_read_buf_num, GFP_KERNEL)) return ZFCP_ERP_FAILED; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6efaea9207ca..a2b0e8435fc3 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_dbf_hba_fsf_uss("fssrh_1", req); - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); return; } @@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; } - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; + struct page *page; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) goto out; } - sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); - if (!sr_buf) { + page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); + if (!page) { retval = -ENOMEM; goto failed_buf; } + sr_buf = page_address(page); memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; @@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) failed_req_send: req->data = NULL; - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); failed_buf: zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_fsf_req_free(req); -- cgit v1.2.3 From 087897e36982ef8536dc9c8baed159a31517b5e6 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:41 +0100 Subject: [SCSI] zfcp: Introduce new kmem_cache for FC request and response data A data buffer that is passed to the hardware must not cross a page boundary. zfcp uses a series of kmem_caches to align the data to not cross a page boundary. Introduce a new kmem_cache for the FC requests sent from the zfcp driver and use it for the ELS ADISC data. The goal is to migrate to the FC kmem_cache in later patches and remove the request specific kmem_caches. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 14 +++++++------- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_ext.h | 1 + drivers/s390/scsi/zfcp_fc.c | 46 +++++++++++++++++++++++--------------------- drivers/s390/scsi/zfcp_fc.h | 27 ++++++++++++++------------ 5 files changed, 47 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 81e185602bb2..adbc05c44362 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -137,10 +137,10 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc", - sizeof(struct zfcp_fc_els_adisc)); - if (!zfcp_data.adisc_cache) - goto out_adisc_cache; + zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", + sizeof(struct zfcp_fc_req)); + if (!zfcp_fc_req_cache) + goto out_fc_cache; zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); @@ -172,8 +172,8 @@ out_ccw_register: out_misc: fc_release_transport(zfcp_data.scsi_transport_template); out_transport: - kmem_cache_destroy(zfcp_data.adisc_cache); -out_adisc_cache: + kmem_cache_destroy(zfcp_fc_req_cache); +out_fc_cache: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: kmem_cache_destroy(zfcp_data.qtcb_cache); @@ -190,7 +190,7 @@ static void __exit zfcp_module_exit(void) ccw_driver_unregister(&zfcp_ccw_driver); misc_deregister(&zfcp_cfdc_misc); fc_release_transport(zfcp_data.scsi_transport_template); - kmem_cache_destroy(zfcp_data.adisc_cache); + kmem_cache_destroy(zfcp_fc_req_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 93ce500f8978..233cbf16259f 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -320,7 +320,6 @@ struct zfcp_data { struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; struct kmem_cache *gid_pn_cache; - struct kmem_cache *adisc_cache; }; #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6e325284fbe7..fa1834f211aa 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ +extern struct kmem_cache *zfcp_fc_req_cache; extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, enum fc_host_event_code event_code, u32); extern void zfcp_fc_post_event(struct work_struct *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 30cf91a787a3..e020dec85294 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -16,6 +16,8 @@ #include "zfcp_ext.h" #include "zfcp_fc.h" +struct kmem_cache *zfcp_fc_req_cache; + static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_PORT] = 0xFFFFFF, [ELS_ADDR_FMT_AREA] = 0xFFFF00, @@ -419,11 +421,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) static void zfcp_fc_adisc_handler(void *data) { - struct zfcp_fc_els_adisc *adisc = data; - struct zfcp_port *port = adisc->els.port; - struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; + struct zfcp_fc_req *fc_req = data; + struct zfcp_port *port = fc_req->ct_els.port; + struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp; - if (adisc->els.status) { + if (fc_req->ct_els.status) { /* request rejected or timed out */ zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "fcadh_1"); @@ -445,42 +447,42 @@ static void zfcp_fc_adisc_handler(void *data) out: atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); } static int zfcp_fc_adisc(struct zfcp_port *port) { - struct zfcp_fc_els_adisc *adisc; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; + struct Scsi_Host *shost = adapter->scsi_host; int ret; - adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC); - if (!adisc) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - adisc->els.port = port; - adisc->els.req = &adisc->req; - adisc->els.resp = &adisc->resp; - sg_init_one(adisc->els.req, &adisc->adisc_req, + fc_req->ct_els.port = port; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req, sizeof(struct fc_els_adisc)); - sg_init_one(adisc->els.resp, &adisc->adisc_resp, + sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp, sizeof(struct fc_els_adisc)); - adisc->els.handler = zfcp_fc_adisc_handler; - adisc->els.handler_data = adisc; + fc_req->ct_els.handler = zfcp_fc_adisc_handler; + fc_req->ct_els.handler_data = fc_req; /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ - adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); - adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); - adisc->adisc_req.adisc_cmd = ELS_ADISC; - hton24(adisc->adisc_req.adisc_port_id, - fc_host_port_id(adapter->scsi_host)); + fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost); + fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost); + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, + ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); return ret; } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index b464ae01086c..4351b4e81b3c 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -123,19 +123,22 @@ struct zfcp_fc_gpn_ft { }; /** - * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC - * @els: data required for issuing els fsf command - * @req: scatterlist entry for ELS ADISC request - * @resp: scatterlist entry for ELS ADISC response - * @adisc_req: ELS ADISC request data - * @adisc_resp: ELS ADISC response data + * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp + * @ct_els: data required for issuing fsf command + * @sg_req: scatterlist entry for request data + * @sg_rsp: scatterlist entry for response data + * @u: request specific data */ -struct zfcp_fc_els_adisc { - struct zfcp_fsf_ct_els els; - struct scatterlist req; - struct scatterlist resp; - struct fc_els_adisc adisc_req; - struct fc_els_adisc adisc_resp; +struct zfcp_fc_req { + struct zfcp_fsf_ct_els ct_els; + struct scatterlist sg_req; + struct scatterlist sg_rsp; + union { + struct { + struct fc_els_adisc req; + struct fc_els_adisc rsp; + } adisc; + } u; }; /** -- cgit v1.2.3 From fcf7e6144df60cd5082e5bc52f1ca5d1ca99a2d6 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:42 +0100 Subject: [SCSI] zfcp: Allocate GID_PN data through new FC kmem_cache Allocate the data for the GID_PN request through the new FC kmem_cache. While updating the GID_PN code, also introduce a helper function for initializing the CT header for FC nameserver requests. Remove the "paranoia" check as well, the GID_PN request data does not suddenly change. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 10 +----- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_fc.c | 80 +++++++++++++++++++++----------------------- drivers/s390/scsi/zfcp_fc.h | 25 ++++---------- 4 files changed, 46 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index adbc05c44362..6d2beb6ad9ee 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -132,11 +132,6 @@ static int __init zfcp_module_init(void) if (!zfcp_data.qtcb_cache) goto out_qtcb_cache; - zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", - sizeof(struct zfcp_fc_gid_pn)); - if (!zfcp_data.gid_pn_cache) - goto out_gid_cache; - zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", sizeof(struct zfcp_fc_req)); if (!zfcp_fc_req_cache) @@ -174,8 +169,6 @@ out_misc: out_transport: kmem_cache_destroy(zfcp_fc_req_cache); out_fc_cache: - kmem_cache_destroy(zfcp_data.gid_pn_cache); -out_gid_cache: kmem_cache_destroy(zfcp_data.qtcb_cache); out_qtcb_cache: kmem_cache_destroy(zfcp_data.gpn_ft_cache); @@ -191,7 +184,6 @@ static void __exit zfcp_module_exit(void) misc_deregister(&zfcp_cfdc_misc); fc_release_transport(zfcp_data.scsi_transport_template); kmem_cache_destroy(zfcp_fc_req_cache); - kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache); } @@ -263,7 +255,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.gid_pn = - mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); + mempool_create_slab_pool(1, zfcp_fc_req_cache); if (!adapter->pool.gid_pn) return -ENOMEM; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 233cbf16259f..dbf59108e1f4 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -319,7 +319,6 @@ struct zfcp_data { struct scsi_transport_template *scsi_transport_template; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; - struct kmem_cache *gid_pn_cache; }; #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index e020dec85294..982455603349 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -262,24 +262,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) zfcp_fc_incoming_rscn(fsf_req); } -static void zfcp_fc_ns_gid_pn_eval(void *data) +static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req) { - struct zfcp_fc_gid_pn *gid_pn = data; - struct zfcp_fsf_ct_els *ct = &gid_pn->ct; - struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req); - struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp); - struct zfcp_port *port = gid_pn->port; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; - if (ct->status) + if (ct_els->status) return; - if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC) + if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC) return; - /* paranoia */ - if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn) - return; /* looks like a valid d_id */ - port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); + ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid); } static void zfcp_fc_complete(void *data) @@ -287,69 +281,73 @@ static void zfcp_fc_complete(void *data) complete(data); } +static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size) +{ + ct_hdr->ct_rev = FC_CT_REV; + ct_hdr->ct_fs_type = FC_FST_DIR; + ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE; + ct_hdr->ct_cmd = cmd; + ct_hdr->ct_mr_size = mr_size / 4; +} + static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, - struct zfcp_fc_gid_pn *gid_pn) + struct zfcp_fc_req *fc_req) { struct zfcp_adapter *adapter = port->adapter; DECLARE_COMPLETION_ONSTACK(completion); + struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; int ret; /* setup parameters for send generic command */ - gid_pn->port = port; - gid_pn->ct.handler = zfcp_fc_complete; - gid_pn->ct.handler_data = &completion; - gid_pn->ct.req = &gid_pn->sg_req; - gid_pn->ct.resp = &gid_pn->sg_resp; - sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, - sizeof(struct zfcp_fc_gid_pn_req)); - sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp, - sizeof(struct zfcp_fc_gid_pn_resp)); - - /* setup nameserver request */ - gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV; - gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR; - gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - gid_pn->gid_pn_req.ct_hdr.ct_options = 0; - gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN; - gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4; - gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; - - ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, + fc_req->ct_els.port = port; + fc_req->ct_els.handler = zfcp_fc_complete; + fc_req->ct_els.handler_data = &completion; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req)); + sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp)); + + zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr, + FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE); + gid_pn_req->gid_pn.fn_wwpn = port->wwpn; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els, adapter->pool.gid_pn_req, ZFCP_FC_CTELS_TMO); if (!ret) { wait_for_completion(&completion); - zfcp_fc_ns_gid_pn_eval(gid_pn); + zfcp_fc_ns_gid_pn_eval(fc_req); } return ret; } /** - * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request + * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request * @port: port where GID_PN request is needed * return: -ENOMEM on error, 0 otherwise */ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; - struct zfcp_fc_gid_pn *gid_pn; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; - gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); - if (!gid_pn) + fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - memset(gid_pn, 0, sizeof(*gid_pn)); + memset(fc_req, 0, sizeof(*fc_req)); ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) goto out; - ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); + ret = zfcp_fc_ns_gid_pn_request(port, fc_req); zfcp_fc_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.gid_pn); + mempool_free(fc_req, adapter->pool.gid_pn); return ret; } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 4351b4e81b3c..200fe2501332 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -64,32 +64,15 @@ struct zfcp_fc_gid_pn_req { } __packed; /** - * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response + * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response * @ct_hdr: FC GS common transport header * @gid_pn: GID_PN response */ -struct zfcp_fc_gid_pn_resp { +struct zfcp_fc_gid_pn_rsp { struct fc_ct_hdr ct_hdr; struct fc_gid_pn_resp gid_pn; } __packed; -/** - * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatterlist entry for request data - * @sg_resp: scatterlist entry for response data - * @gid_pn_req: GID_PN request data - * @gid_pn_resp: GID_PN response data - */ -struct zfcp_fc_gid_pn { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp; - struct zfcp_fc_gid_pn_req gid_pn_req; - struct zfcp_fc_gid_pn_resp gid_pn_resp; - struct zfcp_port *port; -}; - /** * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request * @ct_hdr: FC GS common transport header @@ -138,6 +121,10 @@ struct zfcp_fc_req { struct fc_els_adisc req; struct fc_els_adisc rsp; } adisc; + struct { + struct zfcp_fc_gid_pn_req req; + struct zfcp_fc_gid_pn_rsp rsp; + } gid_pn; } u; }; -- cgit v1.2.3 From f9773229be6d8a3caa4c9dfc2961a63ab51a4e2a Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:43 +0100 Subject: [SCSI] zfcp: Use common FC kmem_cache for GPN_FT request Switch the allocation of the GPN_FT request data to the FC kmem_cache and remove the zfcp_gpn kmem_cache. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 8 ---- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_fc.c | 87 ++++++++++++++++---------------------------- drivers/s390/scsi/zfcp_fc.h | 26 ++----------- 4 files changed, 35 insertions(+), 87 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 6d2beb6ad9ee..324fb1aab6f5 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -122,11 +122,6 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", - sizeof(struct zfcp_fc_gpn_ft_req)); - if (!zfcp_data.gpn_ft_cache) - goto out; - zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", sizeof(struct fsf_qtcb)); if (!zfcp_data.qtcb_cache) @@ -171,8 +166,6 @@ out_transport: out_fc_cache: kmem_cache_destroy(zfcp_data.qtcb_cache); out_qtcb_cache: - kmem_cache_destroy(zfcp_data.gpn_ft_cache); -out: return retval; } @@ -185,7 +178,6 @@ static void __exit zfcp_module_exit(void) fc_release_transport(zfcp_data.scsi_transport_template); kmem_cache_destroy(zfcp_fc_req_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); - kmem_cache_destroy(zfcp_data.gpn_ft_cache); } module_exit(zfcp_module_exit); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index dbf59108e1f4..d7e2534ed66e 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -317,7 +317,6 @@ struct zfcp_fsf_req { struct zfcp_data { struct scsi_host_template scsi_host_template; struct scsi_transport_template *scsi_transport_template; - struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; }; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 982455603349..c839a3b6879d 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -528,68 +528,42 @@ void zfcp_fc_test_link(struct zfcp_port *port) put_device(&port->dev); } -static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) +static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num) { - struct scatterlist *sg = &gpn_ft->sg_req; - - kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); - zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); - - kfree(gpn_ft); -} - -static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num) -{ - struct zfcp_fc_gpn_ft *gpn_ft; - struct zfcp_fc_gpn_ft_req *req; + struct zfcp_fc_req *fc_req; - gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); - if (!gpn_ft) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) return NULL; - req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); - if (!req) { - kfree(gpn_ft); - gpn_ft = NULL; - goto out; + if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) { + kmem_cache_free(zfcp_fc_req_cache, fc_req); + return NULL; } - sg_init_one(&gpn_ft->sg_req, req, sizeof(*req)); - if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) { - zfcp_free_sg_env(gpn_ft, buf_num); - gpn_ft = NULL; - } -out: - return gpn_ft; -} + sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req, + sizeof(struct zfcp_fc_gpn_ft_req)); + return fc_req; +} -static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_bytes) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req; DECLARE_COMPLETION_ONSTACK(completion); int ret; - /* prepare CT IU for GPN_FT */ - req->ct_hdr.ct_rev = FC_CT_REV; - req->ct_hdr.ct_fs_type = FC_FST_DIR; - req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - req->ct_hdr.ct_options = 0; - req->ct_hdr.ct_cmd = FC_NS_GPN_FT; - req->ct_hdr.ct_mr_size = max_bytes / 4; - req->gpn_ft.fn_domain_id_scope = 0; - req->gpn_ft.fn_area_id_scope = 0; + zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes); req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; - /* prepare zfcp_send_ct */ - ct->handler = zfcp_fc_complete; - ct->handler_data = &completion; - ct->req = &gpn_ft->sg_req; - ct->resp = gpn_ft->sg_resp; + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; - ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, ZFCP_FC_CTELS_TMO); if (!ret) wait_for_completion(&completion); @@ -610,11 +584,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) list_move_tail(&port->list, lh); } -static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_entries) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct scatterlist *sg = gpn_ft->sg_resp; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct scatterlist *sg = &fc_req->sg_rsp; struct fc_ct_hdr *hdr = sg_virt(sg); struct fc_gpn_ft_resp *acc = sg_virt(sg); struct zfcp_port *port, *tmp; @@ -623,7 +597,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, u32 d_id; int ret = 0, x, last = 0; - if (ct->status) + if (ct_els->status) return -EIO; if (hdr->ct_cmd != FC_FS_ACC) { @@ -687,7 +661,7 @@ void zfcp_fc_scan_ports(struct work_struct *work) struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, scan_work); int ret, i; - struct zfcp_fc_gpn_ft *gpn_ft; + struct zfcp_fc_req *fc_req; int chain, max_entries, buf_num, max_bytes; chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; @@ -702,21 +676,22 @@ void zfcp_fc_scan_ports(struct work_struct *work) if (zfcp_fc_wka_port_get(&adapter->gs->ds)) return; - gpn_ft = zfcp_alloc_sg_env(buf_num); - if (!gpn_ft) + fc_req = zfcp_alloc_sg_env(buf_num); + if (!fc_req) goto out; for (i = 0; i < 3; i++) { - ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); + ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes); if (!ret) { - ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries); + ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries); if (ret == -EAGAIN) ssleep(1); else break; } } - zfcp_free_sg_env(gpn_ft, buf_num); + zfcp_sg_free_table(&fc_req->sg_rsp, buf_num); + kmem_cache_free(zfcp_fc_req_cache, fc_req); out: zfcp_fc_wka_port_put(&adapter->gs->ds); } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 200fe2501332..2066f9d9ceb5 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -83,28 +83,6 @@ struct zfcp_fc_gpn_ft_req { struct fc_ns_gid_ft gpn_ft; } __packed; -/** - * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response - * @ct_hdr: FC GS common transport header - * @gpn_ft: Array of gpn_ft response data to fill one memory page - */ -struct zfcp_fc_gpn_ft_resp { - struct fc_ct_hdr ct_hdr; - struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE]; -} __packed; - -/** - * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatter list entry for gpn_ft request - * @sg_resp: scatter list entries for gpn_ft responses (per memory page) - */ -struct zfcp_fc_gpn_ft { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; -}; - /** * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp * @ct_els: data required for issuing fsf command @@ -125,6 +103,10 @@ struct zfcp_fc_req { struct zfcp_fc_gid_pn_req req; struct zfcp_fc_gid_pn_rsp rsp; } gid_pn; + struct { + struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1]; + struct zfcp_fc_gpn_ft_req req; + } gpn_ft; } u; }; -- cgit v1.2.3 From 259afe2ed92c179e0a85da10ca63bf927b9851ca Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:44 +0100 Subject: [SCSI] zfcp: Move qtcb kmem_cache to zfcp_fsf.c Move the kmem_cache for allocating the qtcb to zfcp_fsf.c and rename it accordingly. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 12 ++++++------ drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_ext.h | 1 + drivers/s390/scsi/zfcp_fsf.c | 6 ++++-- 4 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 324fb1aab6f5..c94b666057cf 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -122,9 +122,9 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", - sizeof(struct fsf_qtcb)); - if (!zfcp_data.qtcb_cache) + zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb", + sizeof(struct fsf_qtcb)); + if (!zfcp_fsf_qtcb_cache) goto out_qtcb_cache; zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", @@ -164,7 +164,7 @@ out_misc: out_transport: kmem_cache_destroy(zfcp_fc_req_cache); out_fc_cache: - kmem_cache_destroy(zfcp_data.qtcb_cache); + kmem_cache_destroy(zfcp_fsf_qtcb_cache); out_qtcb_cache: return retval; } @@ -177,7 +177,7 @@ static void __exit zfcp_module_exit(void) misc_deregister(&zfcp_cfdc_misc); fc_release_transport(zfcp_data.scsi_transport_template); kmem_cache_destroy(zfcp_fc_req_cache); - kmem_cache_destroy(zfcp_data.qtcb_cache); + kmem_cache_destroy(zfcp_fsf_qtcb_cache); } module_exit(zfcp_module_exit); @@ -236,7 +236,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.qtcb_pool = - mempool_create_slab_pool(4, zfcp_data.qtcb_cache); + mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index d7e2534ed66e..6a764337f06e 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -317,7 +317,6 @@ struct zfcp_fsf_req { struct zfcp_data { struct scsi_host_template scsi_host_template; struct scsi_transport_template *scsi_transport_template; - struct kmem_cache *qtcb_cache; }; #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index fa1834f211aa..58c7e57ed615 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -98,6 +98,7 @@ extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); /* zfcp_fsf.c */ +extern struct kmem_cache *zfcp_fsf_qtcb_cache; extern int zfcp_fsf_open_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index a2b0e8435fc3..43ee0288c0e9 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -18,6 +18,8 @@ #include "zfcp_qdio.h" #include "zfcp_reqlist.h" +struct kmem_cache *zfcp_fsf_qtcb_cache; + static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; @@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) } if (likely(req->qtcb)) - kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); + kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); kfree(req); } @@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) if (likely(pool)) qtcb = mempool_alloc(pool, GFP_ATOMIC); else - qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); + qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); if (unlikely(!qtcb)) return NULL; -- cgit v1.2.3 From 2443c8b23aea83d529868030e28f45a6fa6553b2 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:45 +0100 Subject: [SCSI] zfcp: Merge FCP task management setup with regular FCP command setup For task management commands, only LUN and flags are required. The regular FCP setup already sets the LUN in the fcp_cmnd. All is required for merging the function is setting up the TM flags. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fc.h | 22 ++++++++-------------- drivers/s390/scsi/zfcp_fsf.c | 4 ++-- 2 files changed, 10 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 2066f9d9ceb5..5243ce44aa42 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -164,14 +164,21 @@ struct zfcp_fc_wka_ports { * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB + * @tm: task management flags to setup task management command */ static inline -void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) +void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, + u8 tm_flags) { char tag[2]; int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); + if (unlikely(tm_flags)) { + fcp->fc_tm_flags = tm_flags; + return; + } + if (scsi_populate_tag_msg(scsi, tag)) { switch (tag[0]) { case MSG_ORDERED_TAG: @@ -197,19 +204,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8; } -/** - * zfcp_fc_fcp_tm - setup FCP command as task management command - * @fcp: fcp_cmnd to setup - * @dev: scsi_device where to send the task management command - * @tm: task management flags to setup tm command - */ -static inline -void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags) -{ - int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun); - fcp->fc_tm_flags |= tm_flags; -} - /** * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly * @fcp_rsp: FCP RSP IU to evaluate diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 43ee0288c0e9..a0e05ef65924 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2210,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); if (scsi_prot_sg_count(scsi_cmnd)) { zfcp_qdio_set_data_div(qdio, &req->qdio_req, @@ -2299,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) -- cgit v1.2.3 From 1947c72a122a8c367fdfc650c39a98ba76cc573a Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:46 +0100 Subject: [SCSI] zfcp: Move SCSI host and transport templates out of struct zfcp_data The SCSI host and transport templates are the only members left in the global zfcp_data struct. Move them out of zfcp_data and remove the now unused zfcp_data struct. Also update the names of the register and unregister functions to use the zfcp_scsi prefix. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 14 ++++----- drivers/s390/scsi/zfcp_def.h | 6 ---- drivers/s390/scsi/zfcp_ext.h | 6 ++-- drivers/s390/scsi/zfcp_scsi.c | 70 +++++++++++++++++++++++-------------------- 4 files changed, 48 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index c94b666057cf..88691adc1283 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -132,11 +132,11 @@ static int __init zfcp_module_init(void) if (!zfcp_fc_req_cache) goto out_fc_cache; - zfcp_data.scsi_transport_template = + zfcp_scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); - if (!zfcp_data.scsi_transport_template) + if (!zfcp_scsi_transport_template) goto out_transport; - scsi_transport_reserve_device(zfcp_data.scsi_transport_template, + scsi_transport_reserve_device(zfcp_scsi_transport_template, sizeof(struct zfcp_scsi_dev)); @@ -160,7 +160,7 @@ static int __init zfcp_module_init(void) out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: - fc_release_transport(zfcp_data.scsi_transport_template); + fc_release_transport(zfcp_scsi_transport_template); out_transport: kmem_cache_destroy(zfcp_fc_req_cache); out_fc_cache: @@ -175,7 +175,7 @@ static void __exit zfcp_module_exit(void) { ccw_driver_unregister(&zfcp_ccw_driver); misc_deregister(&zfcp_cfdc_misc); - fc_release_transport(zfcp_data.scsi_transport_template); + fc_release_transport(zfcp_scsi_transport_template); kmem_cache_destroy(zfcp_fc_req_cache); kmem_cache_destroy(zfcp_fsf_qtcb_cache); } @@ -413,7 +413,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; - if (!zfcp_adapter_scsi_register(adapter)) + if (!zfcp_scsi_adapter_register(adapter)) return adapter; failed: @@ -430,7 +430,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) zfcp_destroy_adapter_work_queue(adapter); zfcp_fc_wka_ports_force_offline(adapter->gs); - zfcp_adapter_scsi_unregister(adapter); + zfcp_scsi_adapter_unregister(adapter); sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); zfcp_erp_thread_kill(adapter); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 6a764337f06e..1566208997b6 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -313,10 +313,4 @@ struct zfcp_fsf_req { void (*handler)(struct zfcp_fsf_req *); }; -/* driver data */ -struct zfcp_data { - struct scsi_host_template scsi_host_template; - struct scsi_transport_template *scsi_transport_template; -}; - #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 58c7e57ed615..410d9ddc175a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -141,9 +141,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *, struct qdio_buffer *); /* zfcp_scsi.c */ -extern struct zfcp_data zfcp_data; -extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); -extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); +extern struct scsi_transport_template *zfcp_scsi_transport_template; +extern int zfcp_scsi_adapter_register(struct zfcp_adapter *); +extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *); extern struct fc_function_template zfcp_transport_functions; extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index ddb5800823a9..dbba0827127f 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) return SUCCESS; } -int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) +struct scsi_transport_template *zfcp_scsi_transport_template; + +static struct scsi_host_template zfcp_scsi_host_template = { + .module = THIS_MODULE, + .name = "zfcp", + .queuecommand = zfcp_scsi_queuecommand, + .eh_abort_handler = zfcp_scsi_eh_abort_handler, + .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, + .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, + .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, + .slave_alloc = zfcp_scsi_slave_alloc, + .slave_configure = zfcp_scsi_slave_configure, + .slave_destroy = zfcp_scsi_slave_destroy, + .change_queue_depth = zfcp_scsi_change_queue_depth, + .proc_name = "zfcp", + .can_queue = 4096, + .this_id = -1, + .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, + .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), + .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, + .cmd_per_lun = 1, + .use_clustering = 1, + .shost_attrs = zfcp_sysfs_shost_attrs, + .sdev_attrs = zfcp_sysfs_sdev_attrs, +}; + +/** + * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer + * @adapter: The zfcp adapter to register with the SCSI midlayer + */ +int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; @@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ - adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, + adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, @@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ - adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; + adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; @@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) return 0; } -void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) +/** + * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer + * @adapter: The zfcp adapter to unregister. + */ +void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; @@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; - - return; } static struct fc_host_statistics* @@ -692,29 +724,3 @@ struct fc_function_template zfcp_transport_functions = { .show_host_port_id = 1, .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), }; - -struct zfcp_data zfcp_data = { - .scsi_host_template = { - .name = "zfcp", - .module = THIS_MODULE, - .proc_name = "zfcp", - .change_queue_depth = zfcp_scsi_change_queue_depth, - .slave_alloc = zfcp_scsi_slave_alloc, - .slave_configure = zfcp_scsi_slave_configure, - .slave_destroy = zfcp_scsi_slave_destroy, - .queuecommand = zfcp_scsi_queuecommand, - .eh_abort_handler = zfcp_scsi_eh_abort_handler, - .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, - .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, - .can_queue = 4096, - .this_id = -1, - .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, - .cmd_per_lun = 1, - .use_clustering = 1, - .sdev_attrs = zfcp_sysfs_sdev_attrs, - .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), - .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, - .shost_attrs = zfcp_sysfs_shost_attrs, - }, -}; -- cgit v1.2.3 From 038d9446a9e601d7972926ca69fee10eeda6f3c7 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 22 Feb 2011 19:54:48 +0100 Subject: [SCSI] zfcp: Add information to symbolic port name when running in NPIV mode Query the FC symbolic port name for reporting in the fc_host sysfs and enable the symbolic_name attribute in the fc_host sysfs. When running in NPIV mode, extend the symbolic port name with the devno and the hostname. This allows better identification of Linux systems for SAN and storage administrators. Signed-off-by: Christof Schmitt Signed-off-by: Steffen Maier Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 2 + drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_erp.c | 2 + drivers/s390/scsi/zfcp_ext.h | 1 + drivers/s390/scsi/zfcp_fc.c | 120 ++++++++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_fc.h | 42 +++++++++++++++ drivers/s390/scsi/zfcp_scsi.c | 1 + 7 files changed, 169 insertions(+) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 88691adc1283..645b0fcbb370 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -362,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); + INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); if (zfcp_qdio_setup(adapter)) goto failed; @@ -427,6 +428,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) cancel_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); + cancel_work_sync(&adapter->ns_up_work); zfcp_destroy_adapter_work_queue(adapter); zfcp_fc_wka_ports_force_offline(adapter->gs); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 1566208997b6..527ba48eea57 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -189,6 +189,7 @@ struct zfcp_adapter { struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; + struct work_struct ns_up_work; struct service_level service_level; struct workqueue_struct *work_queue; struct device_dma_parameters dma_parms; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 6c1cddf0d0a0..e1b4f800e226 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) if (result == ZFCP_ERP_SUCCEEDED) { register_service_level(&adapter->service_level); queue_work(adapter->work_queue, &adapter->scan_work); + queue_work(adapter->work_queue, &adapter->ns_up_work); } else unregister_service_level(&adapter->service_level); + kref_put(&adapter->ref, zfcp_adapter_release); break; } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 410d9ddc175a..03627cfd81cd 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -96,6 +96,7 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); +extern void zfcp_fc_sym_name_update(struct work_struct *); /* zfcp_fsf.c */ extern struct kmem_cache *zfcp_fsf_qtcb_cache; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index c839a3b6879d..297e6b71ce9c 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include "zfcp_ext.h" @@ -696,6 +697,125 @@ out: zfcp_fc_wka_port_put(&adapter->gs->ds); } +static int zfcp_fc_gspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + char devno[] = "DEVNO:"; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req; + struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp; + int ret; + + zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host)); + + sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req)); + sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (ret) + return ret; + + wait_for_completion(&completion); + if (ct_els->status) + return ct_els->status; + + if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV && + !(strstr(gspn_rsp->gspn.fp_name, devno))) + snprintf(fc_host_symbolic_name(adapter->scsi_host), + FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s", + gspn_rsp->gspn.fp_name, devno, + dev_name(&adapter->ccw_device->dev), + init_utsname()->nodename); + else + strlcpy(fc_host_symbolic_name(adapter->scsi_host), + gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE); + + return 0; +} + +static void zfcp_fc_rspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + struct Scsi_Host *shost = adapter->scsi_host; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req; + struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp; + int ret, len; + + zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost)); + len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost), + FC_SYMBOLIC_NAME_SIZE); + rspn_req->rspn.fr_name_len = len; + + sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req)); + sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (!ret) + wait_for_completion(&completion); +} + +/** + * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name + * @work: ns_up_work of the adapter where to update the symbolic port name + * + * Retrieve the current symbolic port name that may have been set by + * the hardware using the GSPN request and update the fc_host + * symbolic_name sysfs attribute. When running in NPIV mode (and hence + * the port name is unique for this system), update the symbolic port + * name to add Linux specific information and update the FC nameserver + * using the RSPN request. + */ +void zfcp_fc_sym_name_update(struct work_struct *work) +{ + struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, + ns_up_work); + int ret; + struct zfcp_fc_req *fc_req; + + if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && + fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + return; + + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) + return; + + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); + if (ret) + goto out_free; + + ret = zfcp_fc_gspn(adapter, fc_req); + if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + goto out_ds_put; + + memset(fc_req, 0, sizeof(*fc_req)); + zfcp_fc_rspn(adapter, fc_req); + +out_ds_put: + zfcp_fc_wka_port_put(&adapter->gs->ds); +out_free: + kmem_cache_free(zfcp_fc_req_cache, fc_req); +} + static void zfcp_fc_ct_els_job_handler(void *data) { struct fc_bsg_job *job = data; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 5243ce44aa42..4561f3bf7300 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -83,6 +83,40 @@ struct zfcp_fc_gpn_ft_req { struct fc_ns_gid_ft gpn_ft; } __packed; +/** + * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request + * @ct_hdr: FC GS common transport header + * @gspn: GSPN_ID request + */ +struct zfcp_fc_gspn_req { + struct fc_ct_hdr ct_hdr; + struct fc_gid_pn_resp gspn; +} __packed; + +/** + * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response + * @ct_hdr: FC GS common transport header + * @gspn: GSPN_ID response + * @name: The name string of the GSPN_ID response + */ +struct zfcp_fc_gspn_rsp { + struct fc_ct_hdr ct_hdr; + struct fc_gspn_resp gspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; + +/** + * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request + * @ct_hdr: FC GS common transport header + * @rspn: RSPN_ID request + * @name: The name string of the RSPN_ID request + */ +struct zfcp_fc_rspn_req { + struct fc_ct_hdr ct_hdr; + struct fc_ns_rspn rspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; + /** * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp * @ct_els: data required for issuing fsf command @@ -107,6 +141,14 @@ struct zfcp_fc_req { struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1]; struct zfcp_fc_gpn_ft_req req; } gpn_ft; + struct { + struct zfcp_fc_gspn_req req; + struct zfcp_fc_gspn_rsp rsp; + } gspn; + struct { + struct zfcp_fc_rspn_req req; + struct fc_ct_hdr rsp; + } rspn; } u; }; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index dbba0827127f..2a4991d6d4d5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -720,6 +720,7 @@ struct fc_function_template zfcp_transport_functions = { /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, + .show_host_symbolic_name = 1, .show_host_speed = 1, .show_host_port_id = 1, .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), -- cgit v1.2.3 From f5e3e40b398d3593e26fcced987d4bd7b8f9e57a Mon Sep 17 00:00:00 2001 From: Madhuranath Iyengar Date: Wed, 23 Feb 2011 15:27:06 -0800 Subject: [SCSI] qla2xxx: Remove host_lock in queuecommand function Also in qla_os.c, rename the function, remove DEF_SCSI_QCMD, etc. Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_os.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..a37ac8b73e55 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) static inline srb_t * qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, - struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) + struct scsi_cmnd *cmd) { srb_t *sp; struct qla_hw_data *ha = vha->hw; @@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->cmd = cmd; sp->flags = 0; CMD_SP(cmd) = (void *)sp; - cmd->scsi_done = done; sp->ctx = NULL; return sp; } static int -qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) srb_t *sp; int rval; - spin_unlock_irq(vha->host->host_lock); if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; @@ -570,40 +568,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) goto qc24_target_busy; } - sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done); + sp = qla2x00_get_new_sp(base_vha, fcport, cmd); if (!sp) - goto qc24_host_busy_lock; + goto qc24_host_busy; rval = ha->isp_ops->start_scsi(sp); if (rval != QLA_SUCCESS) goto qc24_host_busy_free_sp; - spin_lock_irq(vha->host->host_lock); - return 0; qc24_host_busy_free_sp: qla2x00_sp_free_dma(sp); mempool_free(sp, ha->srb_mempool); -qc24_host_busy_lock: - spin_lock_irq(vha->host->host_lock); +qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; qc24_target_busy: - spin_lock_irq(vha->host->host_lock); return SCSI_MLQUEUE_TARGET_BUSY; qc24_fail_command: - spin_lock_irq(vha->host->host_lock); - done(cmd); + cmd->scsi_done(cmd); return 0; } -static DEF_SCSI_QCMD(qla2xxx_queuecommand) - - /* * qla2x00_eh_wait_on_command * Waits for the command to be returned by the Firmware for some -- cgit v1.2.3 From 7c8cf2fee1027caaadf0e96145ff97f931ba4bdd Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 23 Feb 2011 15:27:07 -0800 Subject: [SCSI] qla2xxx: Fix array subscript is above array bounds in qla2xx_build_scsi_type_6_iocbs(). The additional increment of dsd_seg was causing the compiler to throw an array out of bounds warning. This patch moves to a direct assignment of cmd_pkt->fcp_data_dseg_len so that the compiler doesn't generate an array out bounds warning. Signed-off-by: Chad Dupuis Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_nx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index fdb96a3584a5..b37445d4ea2f 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2547,7 +2547,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *dsd_seg++ = dsd_list_len; + cmd_pkt->fcp_data_dseg_len = dsd_list_len; } else { *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); -- cgit v1.2.3 From 0060ddf8f7b10799e0789d10ac505cd3fb7576cb Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Wed, 23 Feb 2011 15:27:08 -0800 Subject: [SCSI] qla2xxx: Reinitialize the device initialize timeout value after reset on ISP82xx. Signed-off-by: Giridhar Malavali Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_nx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index b37445d4ea2f..4ff80ee1d416 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -3557,6 +3557,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) break; case QLA82XX_DEV_NEED_RESET: qla82xx_need_reset_handler(vha); + dev_init_timeout = jiffies + + (ha->nx_dev_init_timeout * HZ); break; case QLA82XX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); -- cgit v1.2.3 From cf2d771255251bfd4570b01bea491a631b4033c9 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Wed, 23 Feb 2011 15:27:09 -0800 Subject: [SCSI] qla2xxx: Display nport_id when any SNS command fails. Signed-off-by: Giridhar Malavali Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gs.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4c083928c2fb..a3ea6f03a534 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, rval = QLA_FUNCTION_FAILED; if (ms_pkt->entry_status != 0) { - DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", - vha->host_no, routine, ms_pkt->entry_status)); + DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " + "(%x) on port_id: %02x%02x%02x.\n", + vha->host_no, routine, ms_pkt->entry_status, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); } else { if (IS_FWI2_CAPABLE(ha)) comp_status = le16_to_cpu( @@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): %s failed, " - "rejected request:\n", vha->host_no, - routine)); + "rejected request on port_id: %02x%02x%02x\n", + vha->host_no, routine, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); DEBUG2_3(qla2x00_dump_buffer( (uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); @@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, break; default: DEBUG2_3(printk("scsi(%ld): %s failed, completion " - "status (%x).\n", vha->host_no, routine, - comp_status)); + "status (%x) on port_id: %02x%02x%02x.\n", + vha->host_no, routine, comp_status, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); break; } } -- cgit v1.2.3 From 7190575f7a48c82fd99363748544023739dc4bb8 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Wed, 23 Feb 2011 15:27:10 -0800 Subject: [SCSI] qla2xxx: Abort pending commands for faster recovery during ISP reset. Signed-off-by: Giridhar Malavali Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 9 ++- drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 11 +-- drivers/scsi/qla2xxx/qla_mbx.c | 4 +- drivers/scsi/qla2xxx/qla_nx.c | 155 ++++++++++++++++++++++++++++------------ drivers/scsi/qla2xxx/qla_os.c | 6 +- 6 files changed, 125 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ccfc8e78be21..6c51c0a35b9e 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2402,13 +2402,13 @@ struct qla_hw_data { volatile struct { uint32_t mbox_int :1; uint32_t mbox_busy :1; - uint32_t disable_risc_code_load :1; uint32_t enable_64bit_addressing :1; uint32_t enable_lip_reset :1; uint32_t enable_target_reset :1; uint32_t enable_lip_full_login :1; uint32_t enable_led_scheme :1; + uint32_t msi_enabled :1; uint32_t msix_enabled :1; uint32_t disable_serdes :1; @@ -2417,6 +2417,7 @@ struct qla_hw_data { uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; uint32_t fac_supported :1; + uint32_t chip_reset_done :1; uint32_t port0 :1; uint32_t running_gold_fw :1; @@ -2424,9 +2425,11 @@ struct qla_hw_data { uint32_t cpu_affinity_enabled :1; uint32_t disable_msix_handshake :1; uint32_t fcp_prio_enabled :1; - uint32_t fw_hung :1; - uint32_t quiesce_owner:1; + uint32_t isp82xx_fw_hung:1; + + uint32_t quiesce_owner:1; uint32_t thermal_supported:1; + uint32_t isp82xx_reset_hdlr_active:1; /* 26 bits */ } flags; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 89e900adb679..d48326ee3f61 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(srb_t *); extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); +extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73aec..6370cdc193d5 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) } else { /* Mailbox cmd failed. Timeout on min_wait. */ if (time_after_eq(jiffies, mtime) || - (IS_QLA82XX(ha) && ha->flags.fw_hung)) + ha->flags.isp82xx_fw_hung) break; } @@ -3980,13 +3980,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_QLA82XX(ha)) { - if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, - WAIT_HOST) == QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Done wait for pending commands\n")); - } - } + if (IS_QLA82XX(ha)) + qla82xx_chip_reset_cleanup(vha); /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e473e9fb363c..a7f7346c783a 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -83,7 +83,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - if (IS_QLA82XX(ha) && ha->flags.fw_hung) { + if (ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; rval = QLA_FUNCTION_FAILED; @@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - if (IS_QLA82XX(ha) && ha->flags.fw_hung) { + if (ha->flags.isp82xx_fw_hung) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 4ff80ee1d416..f1ffee4b44ca 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -3457,46 +3457,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) } } -static void +int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { - uint32_t fw_heartbeat_counter, halt_status; - struct qla_hw_data *ha = vha->hw; + uint32_t fw_heartbeat_counter; + int status = 0; - fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + fw_heartbeat_counter = qla82xx_rd_32(vha->hw, + QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) - return; + return status; if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; - halt_status = qla82xx_rd_32(ha, - QLA82XX_PEG_HALT_STATUS1); - if (halt_status & HALT_STATUS_UNRECOVERABLE) { - set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); - } else { - qla_printk(KERN_INFO, ha, - "scsi(%ld): %s - detect abort needed\n", - vha->host_no, __func__); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - } - qla2xxx_wake_dpc(vha); - ha->flags.fw_hung = 1; - if (ha->flags.mbox_busy) { - ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "Due to fw hung, doing premature " - "completion of mbx command\n")); - if (test_bit(MBX_INTR_WAIT, - &ha->mbx_cmd_flags)) - complete(&ha->mbx_intr_comp); - } + status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; } /* @@ -3598,30 +3580,18 @@ exit: void qla82xx_watchdog(scsi_qla_host_t *vha) { - uint32_t dev_state; + uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - /* don't poll if reset is going on */ - if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || - test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) { - if (dev_state == QLA82XX_DEV_NEED_RESET) { + if (!ha->flags.isp82xx_reset_hdlr_active) { + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (dev_state == QLA82XX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { qla_printk(KERN_WARNING, ha, - "%s(): Adapter reset needed!\n", __func__); + "%s(): Adapter reset needed!\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); - ha->flags.fw_hung = 1; - if (ha->flags.mbox_busy) { - ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "Need reset, doing premature " - "completion of mbx command\n")); - if (test_bit(MBX_INTR_WAIT, - &ha->mbx_cmd_flags)) - complete(&ha->mbx_intr_comp); - } } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { DEBUG(qla_printk(KERN_INFO, ha, @@ -3631,6 +3601,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) qla2xxx_wake_dpc(vha); } else { qla82xx_check_fw_alive(vha); + if (qla82xx_check_fw_alive(vha)) { + halt_status = qla82xx_rd_32(ha, + QLA82XX_PEG_HALT_STATUS1); + if (halt_status & HALT_STATUS_UNRECOVERABLE) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + qla_printk(KERN_INFO, ha, + "scsi(%ld): %s - detect abort needed\n", + vha->host_no, __func__); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + qla2xxx_wake_dpc(vha); + ha->flags.isp82xx_fw_hung = 1; + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + DEBUG2(qla_printk(KERN_ERR, ha, + "Due to fw hung, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); + } + } } } } @@ -3665,6 +3660,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) "Exiting.\n", __func__, vha->host_no); return QLA_SUCCESS; } + ha->flags.isp82xx_reset_hdlr_active = 1; qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); @@ -3685,7 +3681,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; + ha->flags.isp82xx_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } @@ -3793,3 +3790,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) return status; } + +void +qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) +{ + int i; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Check if 82XX firmware is alive or not + * We may have arrived here from NEED_RESET + * detection only + */ + if (!ha->flags.isp82xx_fw_hung) { + for (i = 0; i < 2; i++) { + msleep(1000); + if (qla82xx_check_fw_alive(vha)) { + ha->flags.isp82xx_fw_hung = 1; + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + complete(&ha->mbx_intr_comp); + } + break; + } + } + } + + /* Abort all commands gracefully if fw NOT hung */ + if (!ha->flags.isp82xx_fw_hung) { + int cnt, que; + srb_t *sp; + struct req_que *req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + continue; + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp) { + if (!sp->ctx || + (sp->flags & SRB_FCP_CMND_DMA_VALID)) { + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + qla_printk(KERN_INFO, ha, + "scsi(%ld): mbx abort command failed in %s\n", + vha->host_no, __func__); + } else { + qla_printk(KERN_INFO, ha, + "scsi(%ld): mbx abort command success in %s\n", + vha->host_no, __func__); + } + spin_lock_irqsave(&ha->hardware_lock, flags); + } + } + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait for pending cmds (physical and virtual) to complete */ + if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, + WAIT_HOST) == QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_INFO, ha, + "Done wait for pending commands\n")); + } + } +} diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a37ac8b73e55..032d494da0db 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3793,7 +3793,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) ha->flags.eeh_busy = 1; /* For ISP82XX complete any pending mailbox cmd */ if (IS_QLA82XX(ha)) { - ha->flags.fw_hung = 1; + ha->flags.isp82xx_fw_hung = 1; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, @@ -3933,7 +3933,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); qla82xx_idc_unlock(ha); - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; rval = qla82xx_restart_isp(base_vha); qla82xx_idc_lock(ha); /* Clear driver state register */ @@ -3946,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == QLA82XX_DEV_READY)) { - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; rval = qla82xx_restart_isp(base_vha); qla82xx_idc_lock(ha); qla82xx_set_drv_active(base_vha); -- cgit v1.2.3 From 862cd01e5c2f3f5cf1ca989aa1184d61b6988ba2 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Wed, 23 Feb 2011 15:27:11 -0800 Subject: [SCSI] qla2xxx: Don't wait for active mailbox command completion when firmware is hung. Signed-off-by: Giridhar Malavali Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a7f7346c783a..b4a82b5f0f8b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } + if (ha->flags.isp82xx_fw_hung) { + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + rval = QLA_FUNCTION_FAILED; + goto premature_exit; + } + /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during @@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - if (ha->flags.isp82xx_fw_hung) { - /* Setting Link-Down error */ - mcp->mb[0] = MBS_LINK_DOWN_ERROR; - rval = QLA_FUNCTION_FAILED; - goto premature_exit; - } - ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp = mcp; -- cgit v1.2.3 From 7a78ceda1c294addb42a54e10d614c6ff0c4a6c9 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 23 Feb 2011 15:27:12 -0800 Subject: [SCSI] qla2xxx: Pass right CT command string for CT status processing. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index a3ea6f03a534..74a91b6dfc68 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1972,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) "scsi(%ld): GFF_ID issue IOCB failed " "(%d).\n", vha->host_no, rval)); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, - "GPN_ID") != QLA_SUCCESS) { + "GFF_ID") != QLA_SUCCESS) { DEBUG2_3(printk(KERN_INFO "scsi(%ld): GFF_ID IOCB status had a " "failure status code\n", vha->host_no)); -- cgit v1.2.3 From 6aef87bea9d37d5adf45f3d1c674ccf6f95799ca Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 23 Feb 2011 15:27:13 -0800 Subject: [SCSI] qla2xxx: Clear any stale login-states during an adapter reset. By not clearing the 'login needed' nor 'login outstanding' states for an fcport after a big-hammer (adapter reset), the driver may not properly perform a PLOGI/PRLI sequence (and lose visibility to the rport) during a follow-on SNS scan. Signed-off-by: Andrew Vasquez Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 6370cdc193d5..9b6e39dc0d79 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3943,6 +3943,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; unsigned long flags; + fc_port_t *fcport; vha->flags.online = 0; ha->flags.chip_reset_done = 0; @@ -3978,6 +3979,22 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) LOOP_DOWN_TIME); } + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + list_for_each_entry(fcport, &vp->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ if (IS_QLA82XX(ha)) -- cgit v1.2.3 From 97dec564fd4948e0e560869c80b76e166ca2a83e Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 23 Feb 2011 15:27:14 -0800 Subject: [SCSI] qla2xxx: Adjust FCP_RSP response-info field check after TMF completion. Based on reading of the FCP2/4 specification, the driver cannot expect the storage device to send FCP_RSP information. Instead, the driver should interpret the data which is present in the frame to base decisions on the success or failure of the system. Signed-off-by: Andrew Vasquez Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b4a82b5f0f8b..7a7c0ecfe7dd 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, "-- completion status (%x).\n", __func__, vha->host_no, le16_to_cpu(sts->comp_status))); rval = QLA_FUNCTION_FAILED; - } else if (!(le16_to_cpu(sts->scsi_status) & - SS_RESPONSE_INFO_LEN_VALID)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- no response info (%x).\n", __func__, vha->host_no, - le16_to_cpu(sts->scsi_status))); - rval = QLA_FUNCTION_FAILED; - } else if (le32_to_cpu(sts->rsp_data_len) < 4) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- not enough response info (%d).\n", __func__, - vha->host_no, le32_to_cpu(sts->rsp_data_len))); - rval = QLA_FUNCTION_FAILED; - } else if (sts->data[3]) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- response (%x).\n", __func__, - vha->host_no, sts->data[3])); - rval = QLA_FUNCTION_FAILED; + } else if (le16_to_cpu(sts->scsi_status) & + SS_RESPONSE_INFO_LEN_VALID) { + if (le32_to_cpu(sts->rsp_data_len) < 4) { + DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " + "data length -- not enough response info (%d).\n", + __func__, vha->host_no, + le32_to_cpu(sts->rsp_data_len))); + } else if (sts->data[3]) { + DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " + "-- response (%x).\n", __func__, + vha->host_no, sts->data[3])); + rval = QLA_FUNCTION_FAILED; + } } /* Issue marker IOCB. */ -- cgit v1.2.3 From ff2fc42e74e43721310bff710416230aae6ce0b9 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 23 Feb 2011 15:27:15 -0800 Subject: [SCSI] qla2xxx: Propagate block-layer tags on submitted I/Os. Signed-off-by: Andrew Vasquez Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_iocb.c | 52 +++++++++++++++++++++++++++++++++++++++-- drivers/scsi/qla2xxx/qla_nx.c | 32 +++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 4c1ba6263eb3..d78d5896fc33 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp) struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp) cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); /* Update tagged queuing modifier */ - cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_HEAD_TAG); + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_ORDERED_TAG); + break; + default: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_SIMPLE_TAG); + break; + } + } /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); @@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t fcp_cmnd_len; struct fcp_cmnd *fcp_cmnd; dma_addr_t crc_ctx_dma; + char tag[2]; cmd = sp->cmd; @@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); - fcp_cmnd->task_attribute = 0; fcp_cmnd->task_management = 0; + /* + * Update tagged queuing modifier if using command tag queuing + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + fcp_cmnd->task_attribute = TSK_ORDERED; + break; + default: + fcp_cmnd->task_attribute = 0; + break; + } + } else { + fcp_cmnd->task_attribute = 0; + } + cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data" @@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = sp->cmd; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp) int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->task = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->task = TSK_ORDERED; + break; + } + } + /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index f1ffee4b44ca..76ec876e6b21 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -7,6 +7,7 @@ #include "qla_def.h" #include #include +#include #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ @@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -2770,6 +2772,22 @@ sufficient_dsds: int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* + * Update tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + ctx->fcp_cmnd->task_attribute = + TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + ctx->fcp_cmnd->task_attribute = + TSK_ORDERED; + break; + } + } + /* build FCP_CMND IU */ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); @@ -2835,6 +2853,20 @@ sufficient_dsds: host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* + * Update tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->task = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->task = TSK_ORDERED; + break; + } + } + /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); -- cgit v1.2.3 From e46ef004f8938eb7bc80f350d8efed9bf57f04b3 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Wed, 23 Feb 2011 15:27:16 -0800 Subject: [SCSI] qla2xxx: The ISP82XX should be online while waiting for commands completion. If adapter is not online, the driver will not process the response queue, even on getting an interrupt. Signed-off-by: Saurav Kashyap Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9b6e39dc0d79..85b86b82b964 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3945,7 +3945,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) unsigned long flags; fc_port_t *fcport; - vha->flags.online = 0; + /* For ISP82XX, driver waits for completion of the commands. + * online flag should be set. + */ + if (!IS_QLA82XX(ha)) + vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); ha->qla_stats.total_isp_aborts++; @@ -3953,7 +3957,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) qla_printk(KERN_INFO, ha, "Performing ISP error recovery - ha= %p.\n", ha); - /* Chip reset does not apply to 82XX */ + /* For ISP82XX, reset_chip is just disabling interrupts. + * Driver waits for the completion of the commands. + * the interrupts need to be enabled. + */ if (!IS_QLA82XX(ha)) ha->isp_ops->reset_chip(vha); @@ -3997,9 +4004,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_QLA82XX(ha)) + if (IS_QLA82XX(ha)) { qla82xx_chip_reset_cleanup(vha); + /* Done waiting for pending commands. + * Reset the online flag. + */ + vha->flags.online = 0; + } + /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } -- cgit v1.2.3 From 4e98d3b8ac81696d38eb7b54e8566e763f7aa97a Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Wed, 23 Feb 2011 15:27:17 -0800 Subject: [SCSI] qla2xxx: Check return value of fc_block_scsi_eh() The fc_block_scsi_eh() might return with status FAST_IO_FAIL indicating I/O has been terminated due to fast_io_fail timeout. In this case the rport is still blocked, so any error recovery will be failing on this port. Hence we need to check if the return value from fc_block_scsi_eh() is something other than 0, in which case it should just return with that status. Signed-off-by: Andrew Vasquez Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_os.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 032d494da0db..110cac08121d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -812,17 +812,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); srb_t *sp; - int ret = SUCCESS; + int ret; unsigned int id, lun; unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - fc_block_scsi_eh(cmd); - if (!CMD_SP(cmd)) return SUCCESS; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = SUCCESS; + id = cmd->device->id; lun = cmd->device->lun; @@ -931,11 +934,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int err; - fc_block_scsi_eh(cmd); - if (!fcport) return FAILED; + err = fc_block_scsi_eh(cmd); + if (err != 0) + return err; + qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", vha->host_no, cmd->device->id, cmd->device->lun, name); @@ -1009,14 +1014,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) int ret = FAILED; unsigned int id, lun; - fc_block_scsi_eh(cmd); - id = cmd->device->id; lun = cmd->device->lun; if (!fcport) return ret; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = FAILED; + qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); @@ -1069,14 +1077,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) unsigned int id, lun; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - fc_block_scsi_eh(cmd); - id = cmd->device->id; lun = cmd->device->lun; if (!fcport) return ret; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = FAILED; + qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); -- cgit v1.2.3 From 76f0c37a26b48522a1f4051a7b8d091fc3e545d5 Mon Sep 17 00:00:00 2001 From: Madhuranath Iyengar Date: Wed, 23 Feb 2011 15:27:18 -0800 Subject: [SCSI] qla2xxx: Update version number to 8.03.07.00 We'll be using the new version number scheme from now on. The 'k' in the version in the past will be removed. The format will be: ... A scsi-misc submission increments and resets . An scsi-rc-fixes submission, increments . Signed-off-by: Madhuranath Iyengar Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index cf0075a2d0c2..3a260c3f055a 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.05-k0" +#define QLA2XXX_VERSION "8.03.07.00" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 5 +#define QLA_DRIVER_PATCH_VER 7 #define QLA_DRIVER_BETA_VER 0 -- cgit v1.2.3 From f55ca84d92d0c249ce4f4b8d493657a59c359c5d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 25 Feb 2011 14:04:27 -0600 Subject: [SCSI] lpfc: force retry in queuecommand when port is transitioning If the port takes a while to transition we could exhaust the retries when using DID_TRANSPORT_DISRUPTED. For this case we do not want to use any of the cmd's retries, because if the command was running then when it got failed the retry counter was already incremented. And if this is the first time we are seeing the command, (it got queued because it slipped through during the race) then it should not have its retries incremented. The fc class will decide the correct handling later. Signed-off-by: Mike Christie Acked-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_scsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1255b9d1b1b0..ff20abdc5095 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3058,7 +3058,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * transport is still transitioning. */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); + cmnd->result = ScsiResult(DID_IMM_RETRY, 0); goto out_fail_command; } if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) -- cgit v1.2.3 From 3496343df5062d36354a106f19c9688a6ecf9734 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 25 Feb 2011 14:04:28 -0600 Subject: [SCSI] lpfc: block target when port queueing limit is hit Instead of blocking the entire host when the port's queueing limit is hit, we should only block the port's target. This will allow IO to other ports to execute. Signed-off-by: Mike Christie Acked-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_scsi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index ff20abdc5095..bf34178b80bf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3062,7 +3062,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) goto out_fail_command; } if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) - goto out_host_busy; + goto out_tgt_busy; lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); if (lpfc_cmd == NULL) { @@ -3179,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; + out_tgt_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + out_fail_command: done(cmnd); return 0; -- cgit v1.2.3 From 183ce896d726fb987da589fa8e36fb7214a494cc Mon Sep 17 00:00:00 2001 From: jack_wang Date: Sat, 19 Feb 2011 18:20:53 +0800 Subject: [SCSI] libsas: fix loopback topology bug during discovery In some test envirenment, there is loopback topology test. We should handle this during discovery. Signed-off-by: Jack Wang Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_expander.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 505ffe358293..f3f693b772ac 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, * dev to host FIS as described in section G.5 of * sas-2 r 04b */ dr = &((struct smp_resp *)disc_resp)->disc; + if (memcmp(dev->sas_addr, dr->attached_sas_addr, + SAS_ADDR_SIZE) == 0) { + sas_printk("Found loopback topology, just ignore it!\n"); + return 0; + } if (!(dr->attached_dev_type == 0 && dr->attached_sata_dev)) break; -- cgit v1.2.3 From eb1b12377376e43aae4be338ffbbc32931ca4d10 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:55:56 -0800 Subject: [SCSI] megaraid_sas: Fix failure gotos Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 17 ++++++++--------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 6 ++++-- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b71caa3f44ea..c4ac4a5a075a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3424,7 +3424,6 @@ fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: - iounmap(instance->reg_set); return 1; } @@ -3494,7 +3493,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) - return -ENODEV; + goto fail_init_adapter; printk(KERN_ERR "megasas: INIT adapter done\n"); @@ -3553,6 +3552,7 @@ static int megasas_init_fw(struct megasas_instance *instance) MEGASAS_COMPLETION_TIMER_INTERVAL); return 0; +fail_init_adapter: fail_ready_state: iounmap(instance->reg_set); @@ -4105,10 +4105,13 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); +fail_irq: if (instance->msi_flag) pci_disable_msix(instance->pdev); - - fail_irq: + if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) + megasas_release_fusion(instance); + else + megasas_release_mfi(instance); fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) @@ -4116,13 +4119,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->evt_detail, instance->evt_detail_h); - if (instance->producer) { + if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); - megasas_release_mfi(instance); - } else { - megasas_release_fusion(instance); - } if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c1e09d5a6196..26865290c22a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -984,13 +984,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) return 0; -fail_alloc_cmds: -fail_alloc_mfi_cmds: fail_map_info: if (i == 1) dma_free_coherent(&instance->pdev->dev, fusion->map_sz, fusion->ld_map[0], fusion->ld_map_phys[0]); fail_ioc_init: + megasas_free_cmds_fusion(instance); +fail_alloc_cmds: + megasas_free_cmds(instance); +fail_alloc_mfi_cmds: return 1; } -- cgit v1.2.3 From 53ef2bbd2068097ac453dff4a3d82858446be5bb Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:05 -0800 Subject: [SCSI] megaraid_sas: Add missing check_and_restore_queue_depth call Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 26865290c22a..eb4f4de19338 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -82,6 +82,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); + +void +megasas_check_and_restore_queue_depth(struct megasas_instance *instance); + u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, @@ -1746,7 +1750,7 @@ complete_cmd_fusion(struct megasas_instance *instance) wmb(); writel(fusion->last_reply_idx, &instance->reg_set->reply_post_host_index); - + megasas_check_and_restore_queue_depth(instance); return IRQ_HANDLED; } -- cgit v1.2.3 From 0a77066acc78b4048b0afc9d70b7e91c06e63356 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:12 -0800 Subject: [SCSI] megaraid_sas: Enable MSI-X before calling megasas_init_fw Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c4ac4a5a075a..78c263337b0b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4039,12 +4039,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else INIT_WORK(&instance->work_init, process_fw_state_change_wq); - /* - * Initialize MFI Firmware - */ - if (megasas_init_fw(instance)) - goto fail_init_mfi; - /* Try to enable MSI-X */ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) && (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) && @@ -4053,6 +4047,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) &instance->msixentry, 1)) instance->msi_flag = 1; + /* + * Initialize MFI Firmware + */ + if (megasas_init_fw(instance)) + goto fail_init_mfi; + /* * Register IRQ */ @@ -4106,13 +4106,13 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); fail_irq: - if (instance->msi_flag) - pci_disable_msix(instance->pdev); if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) megasas_release_fusion(instance); else megasas_release_mfi(instance); fail_init_mfi: + if (instance->msi_flag) + pci_disable_msix(instance->pdev); fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), @@ -4295,6 +4295,10 @@ megasas_resume(struct pci_dev *pdev) if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; + /* Now re-enable MSI-X */ + if (instance->msi_flag) + pci_enable_msix(instance->pdev, &instance->msixentry, 1); + /* * Initialize MFI Firmware */ @@ -4331,10 +4335,6 @@ megasas_resume(struct pci_dev *pdev) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); - /* Now re-enable MSI-X */ - if (instance->msi_flag) - pci_enable_msix(instance->pdev, &instance->msixentry, 1); - /* * Register IRQ */ -- cgit v1.2.3 From e1419191d8d38098fb6ef29e94aadd15dabff3da Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:21 -0800 Subject: [SCSI] megaraid_sas: Call tasklet_schedule for MSI-X The following patch for megaraid_sas calls tasklet_schedule() even if outbound_intr_status == 0 for MFI based boards in MSI-X mode. Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 78c263337b0b..050ec1694e68 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2503,7 +2503,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, if ((mfiStatus = instance->instancet->clear_intr( instance->reg_set) ) == 0) { - return IRQ_NONE; + /* Hardware may not set outbound_intr_status in MSI-X mode */ + if (!instance->msi_flag) + return IRQ_NONE; } instance->mfiStatus = mfiStatus; -- cgit v1.2.3 From 66192dfe1e74eae31a76cfc36092dabdba1324e6 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:28 -0800 Subject: [SCSI] megaraid_sas: Fix probe_one to clear MSI-X flags in kdump The following patch for megaraid_sas fixes megasas_probe_one() to clear MSI-X flags in kdump when the 'reset_devices' kernel parameter is passed in. Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.h | 3 +++ drivers/scsi/megaraid/megaraid_sas_base.c | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 1b5e375732c0..e00ee4afdbc7 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1477,4 +1477,7 @@ struct megasas_mgmt_info { int max_index; }; +#define msi_control_reg(base) (base + PCI_MSI_FLAGS) +#define PCI_MSIX_FLAGS_ENABLE (1 << 15) + #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 050ec1694e68..a521e1afd738 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3901,9 +3901,26 @@ fail_set_dma_mask: static int __devinit megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int rval; + int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; + u16 control = 0; + + /* Reset MSI-X in the kdump kernel */ + if (reset_devices) { + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_word(pdev, msi_control_reg(pos), + &control); + if (control & PCI_MSIX_FLAGS_ENABLE) { + dev_info(&pdev->dev, "resetting MSI-X\n"); + pci_write_config_word(pdev, + msi_control_reg(pos), + control & + ~PCI_MSIX_FLAGS_ENABLE); + } + } + } /* * Announce PCI information -- cgit v1.2.3 From f512440589632c73e7c2f42d9d723994cd45958b Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:43 -0800 Subject: [SCSI] megaraid_sas: Fix megasas_build_dcdb_fusion to not filter by TYPE_DISK Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index eb4f4de19338..4ac1f6837532 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1438,8 +1438,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; /* Check if this is a system PD I/O */ - if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && - (instance->pd_list[pd_index].driveType == TYPE_DISK)) { + if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { io_request->Function = 0; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; -- cgit v1.2.3 From eaa3c240de25c6e52ee1329e4acadfd99d7d104b Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:49 -0800 Subject: [SCSI] megaraid_sas: Fix megasas_build_dcdb_fusion to use correct LUN field Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 4ac1f6837532..905c1e9317b6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1461,7 +1461,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } io_request->RaidContext.VirtualDiskTgtId = device_id; - io_request->LUN[0] = scmd->device->lun; + io_request->LUN[1] = scmd->device->lun; io_request->DataLength = scsi_bufflen(scmd); } @@ -1485,7 +1485,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, device_id = MEGASAS_DEV_INDEX(instance, scp); /* Zero out some fields so they don't get reused */ - io_request->LUN[0] = 0; + io_request->LUN[1] = 0; io_request->CDB.EEDP32.PrimaryReferenceTag = 0; io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; io_request->EEDPFlags = 0; -- cgit v1.2.3 From 4c598b23807a3bf0e4f7e65f7934965acc47e1b9 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:56:53 -0800 Subject: [SCSI] megaraid_sas: Add CFG_CLEARED AEN The following patch for megaraid_sas adds a missing check for MR_EVT_CFG_CLEARED in megasas_aen_polling(). Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a521e1afd738..1789a1c13085 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5196,6 +5196,7 @@ megasas_aen_polling(struct work_struct *work) break; case MR_EVT_LD_OFFLINE: + case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { -- cgit v1.2.3 From f86c5424b02717a9eb9b1049a67ff3e7e9e92edf Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:57:00 -0800 Subject: [SCSI] megaraid_sas: Fix tasklet_init call The following patch fixes an incorrect tasklet_init() call in megasas_init_fw() to use instancet->tasklet. Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 1789a1c13085..22ec9f049181 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3544,7 +3544,7 @@ static int megasas_init_fw(struct megasas_instance *instance) * Setup tasklet for cmd completion */ - tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* Initialize the cmd completion timer */ -- cgit v1.2.3 From 1ac515ef3f2f7ab32498b0e4907933ff8b9b98c0 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:57:04 -0800 Subject: [SCSI] megaraid_sas: Fix fault state handling The following patch for megaraid_sas fixes fault state handling in megasas_transition_to_ready(). Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 22ec9f049181..c9d82b56c6d7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2613,7 +2613,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) case MFI_STATE_FAULT: printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); - return -ENODEV; + max_wait = MEGASAS_RESET_WAIT_TIME; + cur_state = MFI_STATE_FAULT; + break; case MFI_STATE_WAIT_HANDSHAKE: /* -- cgit v1.2.3 From 42a8d2b34d107df34533ea4840daf8d62bdc90aa Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:57:09 -0800 Subject: [SCSI] megaraid_sas: Fix max_sectors for IEEE SGL Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index e00ee4afdbc7..c782bbc57cc8 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -723,6 +723,7 @@ struct megasas_ctrl_info { MEGASAS_MAX_DEV_PER_CHANNEL) #define MEGASAS_MAX_SECTORS (2*1024) +#define MEGASAS_MAX_SECTORS_IEEE (2*128) #define MEGASAS_DBG_LVL 1 #define MEGASAS_FW_BUSY 1 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c9d82b56c6d7..960b88cb4ff6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3824,6 +3824,10 @@ static int megasas_io_attach(struct megasas_instance *instance) instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; + + if (instance->fw_support_ieee) + instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; + /* * Check if the module parameter value for max_sectors can be used */ -- cgit v1.2.3 From ebf054b00b0a6dfa81dc4472d8b19301318b7f47 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:57:15 -0800 Subject: [SCSI] megaraid_sas: Fix iMR OCR support to work correctly Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas_base.c | 62 ++++++++++++++++++------------- 1 file changed, 37 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 960b88cb4ff6..970d635574b3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -134,7 +134,11 @@ spinlock_t poll_aen_lock; void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); - +static u32 +megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); +static int +megasas_adp_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); @@ -554,6 +558,8 @@ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; + u32 mfiStatus = 0; + /* * Check if it is our interrupt */ @@ -563,6 +569,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) return 0; } + /* + * Check if it is our interrupt + */ + if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) == + MFI_STATE_FAULT) { + mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + } else + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + /* * Clear the interrupt by writing back the same value */ @@ -573,7 +588,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) */ readl(®s->outbound_intr_status); - return 1; + return mfiStatus; } /** @@ -596,17 +611,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, spin_unlock_irqrestore(&instance->hba_lock, flags); } -/** - * megasas_adp_reset_skinny - For controller reset - * @regs: MFI register set - */ -static int -megasas_adp_reset_skinny(struct megasas_instance *instance, - struct megasas_register_set __iomem *regs) -{ - return 0; -} - /** * megasas_check_reset_skinny - For controller reset check * @regs: MFI register set @@ -625,7 +629,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = { .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, - .adp_reset = megasas_adp_reset_skinny, + .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, @@ -740,20 +744,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance, { u32 retry = 0 ; u32 HostDiag; + u32 *seq_offset = ®_set->seq_offset; + u32 *hostdiag_offset = ®_set->host_diag; + + if (instance->instancet == &megasas_instance_template_skinny) { + seq_offset = ®_set->fusion_seq_offset; + hostdiag_offset = ®_set->fusion_host_diag; + } + + writel(0, seq_offset); + writel(4, seq_offset); + writel(0xb, seq_offset); + writel(2, seq_offset); + writel(7, seq_offset); + writel(0xd, seq_offset); - writel(0, ®_set->seq_offset); - writel(4, ®_set->seq_offset); - writel(0xb, ®_set->seq_offset); - writel(2, ®_set->seq_offset); - writel(7, ®_set->seq_offset); - writel(0xd, ®_set->seq_offset); msleep(1000); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { msleep(100); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); @@ -764,14 +776,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance, printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); - writel((HostDiag | DIAG_RESET_ADAPTER), ®_set->host_diag); + writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { msleep(100); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); -- cgit v1.2.3 From 00fa2b191b4bd74e9d22ac177e3d9e8ecd3582d3 Mon Sep 17 00:00:00 2001 From: adam radford Date: Thu, 24 Feb 2011 20:57:21 -0800 Subject: [SCSI] megaraid_sas: Version and Changelog update Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.h | 6 +++--- drivers/scsi/megaraid/megaraid_sas_base.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index c782bbc57cc8..635b228c3ead 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.05.29-rc1" -#define MEGASAS_RELDATE "Dec. 7, 2010" -#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010" +#define MEGASAS_VERSION "00.00.05.34-rc1" +#define MEGASAS_RELDATE "Feb. 24, 2011" +#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011" /* * Device IDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 970d635574b3..f875e818905f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,12 +18,13 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v00.00.05.29-rc1 + * Version : v00.00.05.34-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote * Sumant Patro * Bo Yang + * Adam Radford * * Send feedback to: * -- cgit v1.2.3 From fe5e3f1aec310779a4b830022a26842b8d587228 Mon Sep 17 00:00:00 2001 From: "Parikh, Neerav" Date: Fri, 25 Feb 2011 15:02:51 -0800 Subject: [SCSI] libfc: Fixing a memory leak when destroying an interface When an fcoe interface is being destroyed; in the process the fcoe driver will try to release all the resources it had allocated for that interface including rports. But, it seems that it does not release the reference held for the name server rport in that process resulting into a memory leak. This patch fixes that memory leak. Signed-off-by: Neerav Parikh Tested-by: Ross Brattain Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 59b16bbb66a3..49e1ccca09d5 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -357,6 +357,7 @@ static void fc_rport_work(struct work_struct *work) if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); } else if ((rdata->flags & FC_RP_STARTED) && rdata->major_retries < lport->max_rport_retry_count) { -- cgit v1.2.3 From f31624831a79b9e3f129f6c3b0a1b83903a7b61e Mon Sep 17 00:00:00 2001 From: "Parikh, Neerav" Date: Fri, 25 Feb 2011 15:02:56 -0800 Subject: [SCSI] Revert "[SCSI] libfc: fix exchange being deleted when the abort itself is timed out" When abort for an exchange timed out it didn't release the reference to the exchange resulting in a memory leak. After discussion with the author of the patch (CC) that introduced this bug it was suggested to revert that patch. This reverts commit ea3e2e72eeb3e8a9440a5da965914f9b12088626. Signed-off by: Neerav Parikh Tested-by: Ross Brattain Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 08bf5fa6afd4..10a5436aff77 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -666,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work) if (e_stat & ESB_ST_ABNORMAL) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); - if (!rc) { - /* delete the exchange if it's already being aborted */ - fc_exch_delete(ep); - return; - } fc_seq_exch_abort(sp, 2 * ep->r_a_tov); goto done; } -- cgit v1.2.3 From 72fa396bf57b31e8e2a401a21a3a088c0cc6b043 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Fri, 25 Feb 2011 15:03:01 -0800 Subject: [SCSI] fcoe, libfc: initialize EM anchors list and then update npiv EMs EM anchors list initialization for only master port was not enough to keep npiv working as described here:- https://lists.open-fcoe.org/pipermail/devel/2011-January/011063.html So this patch moves fc_exch_mgr_list_clone to update npiv ports EMs once EM anchors list initialized. Also some cleanup, no need to set lport = NULL as that always get initialized later. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 50 ++++++++++++++++++++++--------------------- drivers/scsi/libfc/fc_exch.c | 1 + drivers/scsi/libfc/fc_lport.c | 1 + drivers/scsi/libfc/fc_npiv.c | 9 +------- 4 files changed, 29 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 495456fe4520..0b5fbb8940c4 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -928,8 +928,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, struct device *parent, int npiv) { struct net_device *netdev = fcoe->netdev; - struct fc_lport *lport = NULL; + struct fc_lport *lport, *n_port; struct fcoe_port *port; + struct Scsi_Host *shost; int rc; /* * parent is only a vport if npiv is 1, @@ -939,13 +940,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - if (!npiv) { - lport = libfc_host_alloc(&fcoe_shost_template, - sizeof(struct fcoe_port)); - } else { - lport = libfc_vport_create(vport, - sizeof(struct fcoe_port)); - } + if (!npiv) + lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); + if (!lport) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); rc = -ENOMEM; @@ -998,24 +997,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_lp_destroy; } - if (!npiv) { - /* - * fcoe_em_alloc() and fcoe_hostlist_add() both - * need to be atomic with respect to other changes to the - * hostlist since fcoe_em_alloc() looks for an existing EM - * instance on host list updated by fcoe_hostlist_add(). - * - * This is currently handled through the fcoe_config_mutex - * begin held. - */ - + /* + * fcoe_em_alloc() and fcoe_hostlist_add() both + * need to be atomic with respect to other changes to the + * hostlist since fcoe_em_alloc() looks for an existing EM + * instance on host list updated by fcoe_hostlist_add(). + * + * This is currently handled through the fcoe_config_mutex + * begin held. + */ + if (!npiv) /* lport exch manager allocation */ rc = fcoe_em_config(lport); - if (rc) { - FCOE_NETDEV_DBG(netdev, "Could not configure the EM " - "for the interface\n"); - goto out_lp_destroy; - } + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n"); + goto out_lp_destroy; } fcoe_interface_get(fcoe); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 10a5436aff77..28231badd9e6 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2175,6 +2175,7 @@ err: fc_exch_mgr_del(ema); return -ENOMEM; } +EXPORT_SYMBOL(fc_exch_mgr_list_clone); /** * fc_exch_mgr_alloc() - Allocate an exchange manager diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 735f1f82ccfd..8c08b210001d 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1590,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) */ int fc_lport_config(struct fc_lport *lport) { + INIT_LIST_HEAD(&lport->ema_list); INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c index 076cd5ff0e40..f33b897e4784 100644 --- a/drivers/scsi/libfc/fc_npiv.c +++ b/drivers/scsi/libfc/fc_npiv.c @@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) vn_port = libfc_host_alloc(shost->hostt, privsize); if (!vn_port) - goto err_out; - if (fc_exch_mgr_list_clone(n_port, vn_port)) - goto err_put; + return vn_port; vn_port->vport = vport; vport->dd_data = vn_port; @@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) mutex_unlock(&n_port->lp_mutex); return vn_port; - -err_put: - scsi_host_put(vn_port->host); -err_out: - return NULL; } EXPORT_SYMBOL(libfc_vport_create); -- cgit v1.2.3 From f4d2b2b6ea8abd0df72a31b4724522a277af6a6c Mon Sep 17 00:00:00 2001 From: Bhanu Prakash Gollapudi Date: Fri, 25 Feb 2011 15:03:12 -0800 Subject: [SCSI] libfcoe: Move FCOE_MTU definition from fcoe.h to libfcoe.h both fcoe and bnx2fc drivers can access the common definition of FCOE_MTU. Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index d775128398e9..408a6fd78fb4 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -40,12 +40,6 @@ #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ -/* - * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload) - * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes - */ -#define FCOE_MTU 2158 - unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -- cgit v1.2.3 From 70be6344ea1ad9110a5b422aeab47a3fbb01ba7f Mon Sep 17 00:00:00 2001 From: Bhanu Prakash Gollapudi Date: Fri, 25 Feb 2011 15:03:17 -0800 Subject: [SCSI] libfcoe: Remove stale fcoe-netdev entries When L2 driver is unloaded, libfcoe_destroy tries to access the fcoe transport structure matching the netdev. However, since the netdev is unregistered by that time, it fails to do so. Hence the stale mappings exists in the fcoe-netdev list. Handle NETDEV_UREGISTER device notification mechanism to remove the stale fcoe-netdev mapping. Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe_transport.c | 46 +++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 745eb9a22d64..258684101bfd 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -39,10 +39,13 @@ static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); static int fcoe_transport_enable(const char *, struct kernel_param *); static int fcoe_transport_disable(const char *, struct kernel_param *); +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr); static LIST_HEAD(fcoe_transports); -static LIST_HEAD(fcoe_netdevs); static DEFINE_MUTEX(ft_mutex); +static LIST_HEAD(fcoe_netdevs); +static DEFINE_MUTEX(fn_mutex); unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); @@ -75,6 +78,11 @@ module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(disable, "string"); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); +/* notification function for packets from net device */ +static struct notifier_block libfcoe_notifier = { + .notifier_call = libfcoe_device_notification, +}; + /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed @@ -375,6 +383,7 @@ static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) static int __init fcoe_transport_init(void) { + register_netdevice_notifier(&libfcoe_notifier); return 0; } @@ -382,6 +391,7 @@ static int __exit fcoe_transport_exit(void) { struct fcoe_transport *ft; + unregister_netdevice_notifier(&libfcoe_notifier); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) printk(KERN_ERR "FCoE transport %s is still attached!\n", @@ -405,7 +415,9 @@ static int fcoe_add_netdev_mapping(struct net_device *netdev, nm->netdev = netdev; nm->ft = ft; + mutex_lock(&fn_mutex); list_add(&nm->list, &fcoe_netdevs); + mutex_unlock(&fn_mutex); return 0; } @@ -414,13 +426,16 @@ static void fcoe_del_netdev_mapping(struct net_device *netdev) { struct fcoe_netdev_mapping *nm = NULL, *tmp; + mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->netdev == netdev) { list_del(&nm->list); kfree(nm); + mutex_unlock(&fn_mutex); return; } } + mutex_unlock(&fn_mutex); } @@ -438,13 +453,16 @@ static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) struct fcoe_transport *ft = NULL; struct fcoe_netdev_mapping *nm; + mutex_lock(&fn_mutex); list_for_each_entry(nm, &fcoe_netdevs, list) { if (netdev == nm->netdev) { ft = nm->ft; + mutex_unlock(&fn_mutex); return ft; } } + mutex_unlock(&fn_mutex); return NULL; } @@ -469,6 +487,32 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) return NULL; } +/** + * libfcoe_device_notification() - Handler for net device events + * @notifier: The context of the notification + * @event: The type of event + * @ptr: The net device that the event was on + * + * This function is called by the Ethernet driver in case of link change event. + * + * Returns: 0 for success + */ +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct net_device *netdev = ptr; + + switch (event) { + case NETDEV_UNREGISTER: + printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n", + netdev->name); + fcoe_del_netdev_mapping(netdev); + break; + } + return NOTIFY_OK; +} + + /** * fcoe_transport_create() - Create a fcoe interface * @buffer: The name of the Ethernet interface to create on -- cgit v1.2.3 From f2f96d20708c53c6825f842beb7bed06f5f15aca Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 25 Feb 2011 15:03:23 -0800 Subject: [SCSI] fcoe: precedence bug in fcoe_filter_frames() Negate has higher precedence than bitwise AND. FCPHF_CRC_UNCHECKED is 0x1 so the original code is equivalent to: if (!fr_flags(fp)) { ... Signed-off-by: Dan Carpenter Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0b5fbb8940c4..0b44d057ccc3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1548,7 +1548,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return -EINVAL; } - if (!fr_flags(fp) & FCPHF_CRC_UNCHECKED || + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) || le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; return 0; -- cgit v1.2.3 From d2f809528a3534ea295b6d855c33cbbb3369d8c9 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Fri, 25 Feb 2011 15:03:28 -0800 Subject: [SCSI] fcoe: fix broken fcoe interface reset Reset using "fcoeadm -r" also needs to restart FIP before doing libfc lport reset, this is needed for new switch firmware requiring FIP solicitation before doing FLOGI again during reset. So this patch does this by doing fcoe_ctlr_link_down and then fcoe_ctlr_link_up to reset the interface. The fcoe_ctlr_link_down call path also does lport reset and then fcoe_ctlr_link_up re-starts the fabric login after doing FIP solicitation first to get reset feature working again. Signed-off-by: Vasu Dev Tested-by: Ross Brattain Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0b44d057ccc3..94fb4802e580 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2130,7 +2130,13 @@ void fcoe_percpu_clean(struct fc_lport *lport) int fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); - fc_lport_reset(lport); + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + + fcoe_ctlr_link_down(&fcoe->ctlr); + fcoe_clean_pending_queue(fcoe->ctlr.lp); + if (!fcoe_link_ok(fcoe->ctlr.lp)) + fcoe_ctlr_link_up(&fcoe->ctlr); return 0; } -- cgit v1.2.3 From 853e2bd2103aaa91d1ba1c0b57ba17628d836f03 Mon Sep 17 00:00:00 2001 From: Bhanu Gollapudi Date: Fri, 4 Feb 2011 12:10:34 -0800 Subject: [SCSI] bnx2fc: Broadcom FCoE offload driver This driver is for Broadcom Netxtreme II 57712 chip. The following patch contains the driver sources for bnx2fc driver. libfc/libfcoe changes to enable bnx2fc have already gone through the fcoe tree. bnx2fc is a SCSI low level driver that interfaces with SCSI midlayer, libfc, libfcoe, cnic modules. bnx2fc driver uses services of libfc for slow path operations such as FIP and fabric discovery. The fast path IO perations are performed after offloading the session information to the underlying FCoE firmware. Signed-off-by: Bhanu Prakash Gollapudi Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 1 + drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | 1080 ++++++++++++++ drivers/scsi/bnx2fc/Kconfig | 11 + drivers/scsi/bnx2fc/Makefile | 3 + drivers/scsi/bnx2fc/bnx2fc.h | 511 +++++++ drivers/scsi/bnx2fc/bnx2fc_constants.h | 206 +++ drivers/scsi/bnx2fc/bnx2fc_debug.h | 70 + drivers/scsi/bnx2fc/bnx2fc_els.c | 515 +++++++ drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 2535 ++++++++++++++++++++++++++++++++ drivers/scsi/bnx2fc/bnx2fc_hwi.c | 1868 +++++++++++++++++++++++ drivers/scsi/bnx2fc/bnx2fc_io.c | 1833 +++++++++++++++++++++++ drivers/scsi/bnx2fc/bnx2fc_tgt.c | 844 +++++++++++ 13 files changed, 9478 insertions(+) create mode 100644 drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h create mode 100644 drivers/scsi/bnx2fc/Kconfig create mode 100644 drivers/scsi/bnx2fc/Makefile create mode 100644 drivers/scsi/bnx2fc/bnx2fc.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_constants.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_debug.h create mode 100644 drivers/scsi/bnx2fc/bnx2fc_els.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_fcoe.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_hwi.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_io.c create mode 100644 drivers/scsi/bnx2fc/bnx2fc_tgt.c (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8616496ffc02..4a1f029c4fe9 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS source "drivers/scsi/cxgbi/Kconfig" source "drivers/scsi/bnx2i/Kconfig" +source "drivers/scsi/bnx2fc/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" config SGIWD93_SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 2e9a87e8e7d8..b57c53256163 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/ obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ +obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h new file mode 100644 index 000000000000..69d031d98469 --- /dev/null +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -0,0 +1,1080 @@ +#ifndef __57XX_FCOE_HSI_LINUX_LE__ +#define __57XX_FCOE_HSI_LINUX_LE__ + +/* + * common data for all protocols + */ +struct b577xx_doorbell_hdr { + u8 header; +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + */ +struct fcoe_abts_rsp_union { + u32 r_ctl; + u32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size + */ +struct fcoe_bd_ctx { + u32 buf_addr_hi; + u32 buf_addr_lo; +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 buf_len; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len; + u16 rsrv0; +#endif +#if defined(__BIG_ENDIAN) + u16 rsrv1; + u16 flags; +#elif defined(__LITTLE_ENDIAN) + u16 flags; + u16 rsrv1; +#endif +}; + + +struct fcoe_cleanup_flow_info { +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 task_id; +#elif defined(__LITTLE_ENDIAN) + u16 task_id; + u16 reserved1; +#endif + u32 reserved2[7]; +}; + + +struct fcoe_fcp_cmd_payload { + u32 opaque[8]; +}; + +struct fcoe_fc_hdr { +#if defined(__BIG_ENDIAN) + u8 cs_ctl; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 cs_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 r_ctl; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 r_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 seq_id; + u8 df_ctl; + u16 seq_cnt; +#elif defined(__LITTLE_ENDIAN) + u16 seq_cnt; + u8 df_ctl; + u8 seq_id; +#endif +#if defined(__BIG_ENDIAN) + u8 type; + u8 f_ctl[3]; +#elif defined(__LITTLE_ENDIAN) + u8 f_ctl[3]; + u8 type; +#endif + u32 parameters; +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 rx_id; +#elif defined(__LITTLE_ENDIAN) + u16 rx_id; + u16 ox_id; +#endif +}; + +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + u32 reserved0[2]; +}; + +union fcoe_cmd_flow_info { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcoe_fc_frame mp_fc_frame; +}; + + + +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + + +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + u32 fcp_resid; +#if defined(__BIG_ENDIAN) + u16 retry_delay_timer; + struct fcoe_fcp_rsp_flags fcp_flags; + u8 scsi_status_code; +#elif defined(__LITTLE_ENDIAN) + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + u16 retry_delay_timer; +#endif + u32 fcp_rsp_len; + u32 fcp_sns_len; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + + +struct fcoe_fcp_xfr_rdy_payload { + u32 burst_len; + u32 data_ro; +}; + +struct fcoe_read_flow_info { + struct fcoe_fc_hdr fc_data_in_hdr; + u32 reserved[2]; +}; + +struct fcoe_write_flow_info { + struct fcoe_fc_hdr fc_data_out_hdr; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload; +}; + +union fcoe_rsp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; +}; + +/* + * 32 bytes used for general purposes + */ +union fcoe_general_task_ctx { + union fcoe_cmd_flow_info cmd_info; + struct fcoe_read_flow_info read_info; + struct fcoe_write_flow_info write_info; + union fcoe_rsp_flow_info rsp_info; + struct fcoe_cleanup_flow_info cleanup_info; + u32 comp_info[8]; +}; + + +/* + * FCoE KCQ CQE parameters + */ +union fcoe_kcqe_params { + u32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE + */ +struct fcoe_kcqe { + u32 fcoe_conn_id; + u32 completion_status; + u32 fcoe_conn_context_id; + union fcoe_kcqe_params params; +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +#endif +}; + +/* + * FCoE KWQE header + */ +struct fcoe_kwqe_header { +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 + u8 op_code; +#elif defined(__LITTLE_ENDIAN) + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +#endif +}; + +/* + * FCoE firmware init request 1 + */ +struct fcoe_kwqe_init1 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 num_tasks; +#elif defined(__LITTLE_ENDIAN) + u16 num_tasks; + struct fcoe_kwqe_header hdr; +#endif + u32 task_list_pbl_addr_lo; + u32 task_list_pbl_addr_hi; + u32 dummy_buffer_addr_lo; + u32 dummy_buffer_addr_hi; +#if defined(__BIG_ENDIAN) + u16 rq_num_wqes; + u16 sq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_wqes; + u16 rq_num_wqes; +#endif +#if defined(__BIG_ENDIAN) + u16 cq_num_wqes; + u16 rq_buffer_log_size; +#elif defined(__LITTLE_ENDIAN) + u16 rq_buffer_log_size; + u16 cq_num_wqes; +#endif +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 + u8 num_sessions_log; + u16 mtu; +#elif defined(__LITTLE_ENDIAN) + u16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +#endif +}; + +/* + * FCoE firmware init request 2 + */ +struct fcoe_kwqe_init2 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 hash_tbl_pbl_addr_lo; + u32 hash_tbl_pbl_addr_hi; + u32 t2_hash_tbl_addr_lo; + u32 t2_hash_tbl_addr_hi; + u32 t2_ptr_hash_tbl_addr_lo; + u32 t2_ptr_hash_tbl_addr_hi; + u32 free_list_count; +}; + +/* + * FCoE firmware init request 3 + */ +struct fcoe_kwqe_init3 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 error_bit_map_lo; + u32 error_bit_map_hi; +#if defined(__BIG_ENDIAN) + u8 reserved21[3]; + u8 cached_session_enable; +#elif defined(__LITTLE_ENDIAN) + u8 cached_session_enable; + u8 reserved21[3]; +#endif + u32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 + */ +struct fcoe_kwqe_conn_offload1 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 fcoe_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; +#endif + u32 sq_addr_lo; + u32 sq_addr_hi; + u32 rq_pbl_addr_lo; + u32 rq_pbl_addr_hi; + u32 rq_first_pbe_addr_lo; + u32 rq_first_pbe_addr_hi; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 reserved0; +#endif +}; + +/* + * FCoE connection offload request 2 + */ +struct fcoe_kwqe_conn_offload2 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 tx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; +#endif + u32 cq_addr_lo; + u32 cq_addr_hi; + u32 xferq_addr_lo; + u32 xferq_addr_hi; + u32 conn_db_addr_lo; + u32 conn_db_addr_hi; + u32 reserved1; +}; + +/* + * FCoE connection offload request 3 + */ +struct fcoe_kwqe_conn_offload3 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; +#endif +#if defined(__BIG_ENDIAN) + u8 tx_max_conc_seqs_c3; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; +#endif +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 +#endif + u32 reserved; + u32 confq_first_pbe_addr_lo; + u32 confq_first_pbe_addr_hi; +#if defined(__BIG_ENDIAN) + u16 rx_max_fc_pay_len; + u16 tx_total_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 tx_total_conc_seqs; + u16 rx_max_fc_pay_len; +#endif +#if defined(__BIG_ENDIAN) + u8 rx_open_seqs_exch_c3; + u8 rx_max_conc_seqs_c3; + u16 rx_total_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +#endif +}; + +/* + * FCoE connection offload request 4 + */ +struct fcoe_kwqe_conn_offload4 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u8 reserved2; + u8 e_d_tov_timer_val; +#elif defined(__LITTLE_ENDIAN) + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; +#endif + u8 src_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u8 dst_mac_addr_hi16[2]; + u8 src_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 src_mac_addr_hi16[2]; + u8 dst_mac_addr_hi16[2]; +#endif + u8 dst_mac_addr_lo32[4]; + u32 lcq_addr_lo; + u32 lcq_addr_hi; + u32 confq_pbl_base_addr_lo; + u32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request + */ +struct fcoe_kwqe_conn_enable_disable { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u8 src_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 src_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 src_mac_addr_hi16[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 +#endif + u8 dst_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 dst_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 dst_mac_addr_hi16[2]; + u16 reserved1; +#endif +#if defined(__BIG_ENDIAN) + u8 vlan_flag; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 vlan_flag; +#endif +#if defined(__BIG_ENDIAN) + u8 reserved3; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 reserved3; +#endif + u32 context_id; + u32 conn_id; + u32 reserved4; +}; + +/* + * FCoE connection destroy request + */ +struct fcoe_kwqe_conn_destroy { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 context_id; + u32 conn_id; + u32 reserved1[5]; +}; + +/* + * FCoe destroy request + */ +struct fcoe_kwqe_destroy { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 reserved1[7]; +}; + +/* + * FCoe statistics request + */ +struct fcoe_kwqe_stat { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 stat_params_addr_lo; + u32 stat_params_addr_hi; + u32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 cur_sge_idx; + u16 cur_sge_off; +#elif defined(__LITTLE_ENDIAN) + u16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +#endif +}; + +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +struct fcoe_seq_ctx { +#if defined(__BIG_ENDIAN) + u16 low_seq_cnt; + struct fcoe_s_stat_ctx s_stat; + u8 seq_id; +#elif defined(__LITTLE_ENDIAN) + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + u16 low_seq_cnt; +#endif +#if defined(__BIG_ENDIAN) + u16 err_seq_cnt; + u16 high_seq_cnt; +#elif defined(__LITTLE_ENDIAN) + u16 high_seq_cnt; + u16 err_seq_cnt; +#endif + u32 low_exp_ro; + u32 high_exp_ro; +}; + + +struct fcoe_single_sge_ctx { + struct regpair cur_buf_addr; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 cur_buf_rem; +#elif defined(__LITTLE_ENDIAN) + u16 cur_buf_rem; + u16 reserved0; +#endif +}; + +union fcoe_sgl_ctx { + struct fcoe_single_sge_ctx single_sge; + struct fcoe_mul_sges_ctx mul_sges; +}; + + + +/* + * FCoE SQ element + */ +struct fcoe_sqe { + u16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +struct fcoe_task_ctx_entry_tx_only { + union fcoe_sgl_ctx sgl_ctx; +}; + +struct fcoe_task_ctx_entry_txwr_rxrd { +#if defined(__BIG_ENDIAN) + u16 verify_tx_seq; + u8 init_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 + u8 tx_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 +#elif defined(__LITTLE_ENDIAN) + u8 tx_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 + u8 init_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 + u16 verify_tx_seq; +#endif +}; + +/* + * Common section. Both TX and RX processing might write and read from it in + * different flows + */ +struct fcoe_task_ctx_entry_tx_rx_cmn { + u32 data_2_trns; + union fcoe_general_task_ctx general; +#if defined(__BIG_ENDIAN) + u16 tx_low_seq_cnt; + struct fcoe_s_stat_ctx tx_s_stat; + u8 tx_seq_id; +#elif defined(__LITTLE_ENDIAN) + u8 tx_seq_id; + struct fcoe_s_stat_ctx tx_s_stat; + u16 tx_low_seq_cnt; +#endif + u32 common_flags; +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29 +}; + +struct fcoe_task_ctx_entry_rxwr_txrd { +#if defined(__BIG_ENDIAN) + u16 rx_id; + u16 rx_flags; +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 +#elif defined(__LITTLE_ENDIAN) + u16 rx_flags; +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 + u16 rx_id; +#endif +}; + +struct fcoe_task_ctx_entry_rx_only { + struct fcoe_seq_ctx seq_ctx; + struct fcoe_seq_ctx ooo_seq_ctx; + u32 rsrv3; + union fcoe_sgl_ctx sgl_ctx; +}; + +struct fcoe_task_ctx_entry { + struct fcoe_task_ctx_entry_tx_only tx_wr_only; + struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; + struct fcoe_task_ctx_entry_tx_rx_cmn cmn; + struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; + struct fcoe_task_ctx_entry_rx_only rx_wr_only; + u32 reserved[4]; +}; + + +/* + * FCoE XFRQ element + */ +struct fcoe_xfrqe { + u16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE CONFQ element + */ +struct fcoe_confqe { +#if defined(__BIG_ENDIAN) + u16 rx_id; + u16 ox_id; +#elif defined(__LITTLE_ENDIAN) + u16 ox_id; + u16 rx_id; +#endif + u32 param; +}; + + +/* + * FCoE conection data base + */ +struct fcoe_conn_db { +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 rsrv0; +#endif + u32 rsrv1; + struct regpair cq_arm; +}; + + +/* + * FCoE CQ element + */ +struct fcoe_cqe { + u16 wqe; +#define FCOE_CQE_CQE_INFO (0x3FFF<<0) +#define FCOE_CQE_CQE_INFO_SHIFT 0 +#define FCOE_CQE_CQE_TYPE (0x1<<14) +#define FCOE_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE error/warning resporting entry + */ +struct fcoe_err_report_entry { + u32 err_warn_bitmap_lo; + u32 err_warn_bitmap_hi; + u32 tx_buf_off; + u32 rx_buf_off; + struct fcoe_fc_hdr fc_hdr; +}; + + +/* + * FCoE hash table entry (32 bytes) + */ +struct fcoe_hash_table_entry { +#if defined(__BIG_ENDIAN) + u8 d_id_0; + u8 s_id_2; + u8 s_id_1; + u8 s_id_0; +#elif defined(__LITTLE_ENDIAN) + u8 s_id_0; + u8 s_id_1; + u8 s_id_2; + u8 d_id_0; +#endif +#if defined(__BIG_ENDIAN) + u16 dst_mac_addr_hi; + u8 d_id_2; + u8 d_id_1; +#elif defined(__LITTLE_ENDIAN) + u8 d_id_1; + u8 d_id_2; + u16 dst_mac_addr_hi; +#endif + u32 dst_mac_addr_lo; +#if defined(__BIG_ENDIAN) + u16 vlan_id; + u16 src_mac_addr_hi; +#elif defined(__LITTLE_ENDIAN) + u16 src_mac_addr_hi; + u16 vlan_id; +#endif + u32 src_mac_addr_lo; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 reserved0; + u8 vlan_flag; +#elif defined(__LITTLE_ENDIAN) + u8 vlan_flag; + u8 reserved0; + u16 reserved1; +#endif + u32 reserved2; + u32 field_id; +#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) +#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 +#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) +#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24 +#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31) +#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 +}; + +/* + * FCoE pending work request CQE + */ +struct fcoe_pend_wq_cqe { + u16 wqe; +#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) +#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 +#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) +#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE RX statistics parameters section#0 + */ +struct fcoe_rx_stat_params_section0 { + u32 fcoe_ver_cnt; + u32 fcoe_rx_pkt_cnt; + u32 fcoe_rx_byte_cnt; + u32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 + */ +struct fcoe_rx_stat_params_section1 { + u32 fc_crc_cnt; + u32 eofa_del_cnt; + u32 miss_frame_cnt; + u32 seq_timeout_cnt; + u32 drop_seq_cnt; + u32 fcoe_rx_drop_pkt_cnt; + u32 fcp_rx_pkt_cnt; + u32 reserved0; +}; + + +/* + * FCoE TX statistics parameters + */ +struct fcoe_tx_stat_params { + u32 fcoe_tx_pkt_cnt; + u32 fcoe_tx_byte_cnt; + u32 fcp_tx_pkt_cnt; + u32 reserved0; +}; + +/* + * FCoE statistics parameters + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; +}; + + +/* + * FCoE t2 hash table entry (64 bytes) + */ +struct fcoe_t2_hash_table_entry { + struct fcoe_hash_table_entry data; + struct regpair next; + struct regpair reserved0[3]; +}; + +/* + * FCoE unsolicited CQE + */ +struct fcoe_unsolicited_cqe { + u16 wqe; +#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) +#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 +#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) +#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2 +#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14) +#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 +}; + + + +#endif /* __57XX_FCOE_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig new file mode 100644 index 000000000000..6a38080e35ed --- /dev/null +++ b/drivers/scsi/bnx2fc/Kconfig @@ -0,0 +1,11 @@ +config SCSI_BNX2X_FCOE + tristate "Broadcom NetXtreme II FCoE support" + depends on PCI + select NETDEVICES + select NETDEV_1000 + select LIBFC + select LIBFCOE + select CNIC + ---help--- + This driver supports FCoE offload for the Broadcom NetXtreme II + devices. diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile new file mode 100644 index 000000000000..a92695a25176 --- /dev/null +++ b/drivers/scsi/bnx2fc/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o + +bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h new file mode 100644 index 000000000000..df2fc09ba479 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -0,0 +1,511 @@ +#ifndef _BNX2FC_H_ +#define _BNX2FC_H_ +/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "57xx_hsi_bnx2fc.h" +#include "bnx2fc_debug.h" +#include "../../net/cnic_if.h" +#include "bnx2fc_constants.h" + +#define BNX2FC_NAME "bnx2fc" +#define BNX2FC_VERSION "1.0.0" + +#define PFX "bnx2fc: " + +#define BNX2X_DOORBELL_PCI_BAR 2 + +#define BNX2FC_MAX_BD_LEN 0xffff +#define BNX2FC_BD_SPLIT_SZ 0x8000 +#define BNX2FC_MAX_BDS_PER_CMD 256 + +#define BNX2FC_SQ_WQES_MAX 256 + +#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8) +#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2) +#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1) + +#define BNX2FC_RQ_WQES_MAX 16 +#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX) + +#define BNX2FC_NUM_MAX_SESS 128 +#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) + +#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 +#define BNX2FC_MIN_PAYLOAD 256 +#define BNX2FC_MAX_PAYLOAD 2048 + +#define BNX2FC_RQ_BUF_SZ 256 +#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) + +#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe)) +#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe)) +#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) +#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) +#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) +#define BNX2FC_5771X_DB_PAGE_SIZE 128 + +#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS +#define BNX2FC_TASK_SIZE 128 +#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) +#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) + +#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 +#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) + +#define BNX2FC_MAX_SEQS 255 + +#define BNX2FC_READ (1 << 1) +#define BNX2FC_WRITE (1 << 0) + +#define BNX2FC_MIN_XID 0 +#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) +#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) +#define FCOE_MAX_XID \ + (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) +#define BNX2FC_MAX_LUN 0xFFFF +#define BNX2FC_MAX_FCP_TGT 256 +#define BNX2FC_MAX_CMD_LEN 16 + +#define BNX2FC_TM_TIMEOUT 60 /* secs */ +#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ + +#define BNX2FC_WAIT_CNT 120 +#define BNX2FC_FW_TIMEOUT (3 * HZ) + +#define PORT_MAX 2 + +#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) + +/* FC FCP Status */ +#define FC_GOOD 0 + +#define BNX2FC_RNID_HBA 0x7 + +/* bnx2fc driver uses only one instance of fcoe_percpu_s */ +extern struct fcoe_percpu_s bnx2fc_global; + +extern struct workqueue_struct *bnx2fc_wq; + +struct bnx2fc_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t fp_work_lock; +}; + + +struct bnx2fc_hba { + struct list_head link; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *netdev; + struct net_device *phys_dev; + unsigned long reg_with_cnic; + #define BNX2FC_CNIC_REGISTERED 1 + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct workqueue_struct *timer_work_queue; + struct kref kref; + spinlock_t hba_lock; + struct mutex hba_mutex; + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_READY 3 + u32 flags; + unsigned long init_done; + #define BNX2FC_FW_INIT_DONE 0 + #define BNX2FC_CTLR_INIT_DONE 1 + #define BNX2FC_CREATE_DONE 2 + struct fcoe_ctlr ctlr; + u8 vlan_enabled; + int vlan_id; + u32 next_conn_id; + struct fcoe_task_ctx_entry **task_ctx; + dma_addr_t *task_ctx_dma; + struct regpair *task_ctx_bd_tbl; + dma_addr_t task_ctx_bd_dma; + + int hash_tbl_segment_count; + void **hash_tbl_segments; + void *hash_tbl_pbl; + dma_addr_t hash_tbl_pbl_dma; + struct fcoe_t2_hash_table_entry *t2_hash_tbl; + dma_addr_t t2_hash_tbl_dma; + char *t2_hash_tbl_ptr; + dma_addr_t t2_hash_tbl_ptr_dma; + + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + struct fcoe_statistics_params *stats_buffer; + dma_addr_t stats_buf_dma; + + /* + * PCI related info. + */ + u16 pci_did; + u16 pci_vid; + u16 pci_sdid; + u16 pci_svid; + u16 pci_func; + u16 pci_devno; + + struct task_struct *l2_thread; + + /* linkdown handling */ + wait_queue_head_t shutdown_wait; + int wait_for_link_down; + + /*destroy handling */ + struct timer_list destroy_timer; + wait_queue_head_t destroy_wait; + + /* Active list of offloaded sessions */ + struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; + int num_ofld_sess; + + /* statistics */ + struct completion stat_req_done; +}; + +#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) + +struct bnx2fc_cmd_mgr { + struct bnx2fc_hba *hba; + u16 next_idx; + struct list_head *free_list; + spinlock_t *free_list_lock; + struct io_bdt **io_bdt_pool; + struct bnx2fc_cmd **cmds; +}; + +struct bnx2fc_rport { + struct fcoe_port *port; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + u32 fcoe_conn_id; + u32 context_id; + u32 sid; + + unsigned long flags; +#define BNX2FC_FLAG_SESSION_READY 0x1 +#define BNX2FC_FLAG_OFFLOADED 0x2 +#define BNX2FC_FLAG_DISABLED 0x3 +#define BNX2FC_FLAG_DESTROYED 0x4 +#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 +#define BNX2FC_FLAG_DESTROY_CMPL 0x6 +#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 +#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 +#define BNX2FC_FLAG_EXPL_LOGO 0x9 + + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + + struct fcoe_sqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u8 sq_curr_toggle_bit; + u32 sq_mem_size; + + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u32 cq_cons_idx; + u8 cq_curr_toggle_bit; + u32 cq_mem_size; + + void *rq; + dma_addr_t rq_dma; + u32 rq_prod_idx; + u32 rq_cons_idx; + u32 rq_mem_size; + + void *rq_pbl; + dma_addr_t rq_pbl_dma; + u32 rq_pbl_size; + + struct fcoe_xfrqe *xferq; + dma_addr_t xferq_dma; + u32 xferq_mem_size; + + struct fcoe_confqe *confq; + dma_addr_t confq_dma; + u32 confq_mem_size; + + void *confq_pbl; + dma_addr_t confq_pbl_dma; + u32 confq_pbl_size; + + struct fcoe_conn_db *conn_db; + dma_addr_t conn_db_dma; + u32 conn_db_mem_size; + + struct fcoe_sqe *lcq; + dma_addr_t lcq_dma; + u32 lcq_mem_size; + + void *ofld_req[4]; + dma_addr_t ofld_req_dma[4]; + void *enbl_req; + dma_addr_t enbl_req_dma; + + spinlock_t tgt_lock; + spinlock_t cq_lock; + atomic_t num_active_ios; + u32 flush_in_prog; + unsigned long work_time_slice; + unsigned long timestamp; + struct list_head free_task_list; + struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; + atomic_t pi; + atomic_t ci; + struct list_head active_cmd_queue; + struct list_head els_queue; + struct list_head io_retire_queue; + struct list_head active_tm_queue; + + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; + + struct timer_list upld_timer; + wait_queue_head_t upld_wait; +}; + +struct bnx2fc_mp_req { + u8 tm_flags; + + u32 req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_bd_ctx *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + u32 resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_bd_ctx *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct bnx2fc_els_cb_arg { + struct bnx2fc_cmd *aborted_io_req; + struct bnx2fc_cmd *io_req; + u16 l2_oxid; +}; + +/* bnx2fc command structure */ +struct bnx2fc_cmd { + struct list_head link; + u8 on_active_queue; + u8 on_tmf_queue; + u8 cmd_type; +#define BNX2FC_SCSI_CMD 1 +#define BNX2FC_TASK_MGMT_CMD 2 +#define BNX2FC_ABTS 3 +#define BNX2FC_ELS 4 +#define BNX2FC_CLEANUP 5 + u8 io_req_flags; + struct kref refcount; + struct fcoe_port *port; + struct bnx2fc_rport *tgt; + struct scsi_cmnd *sc_cmd; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct bnx2fc_mp_req mp_req; + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg); + struct bnx2fc_els_cb_arg *cb_arg; + struct delayed_work timeout_work; /* timer for ULP timeouts */ + struct completion tm_done; + int wait_for_comp; + u16 xid; + struct fcoe_task_ctx_entry *task; + struct io_bdt *bd_tbl; + struct fcp_rsp *rsp; + size_t data_xfer_len; + unsigned long req_flags; +#define BNX2FC_FLAG_ISSUE_RRQ 0x1 +#define BNX2FC_FLAG_ISSUE_ABTS 0x2 +#define BNX2FC_FLAG_ABTS_DONE 0x3 +#define BNX2FC_FLAG_TM_COMPL 0x4 +#define BNX2FC_FLAG_TM_TIMEOUT 0x5 +#define BNX2FC_FLAG_IO_CLEANUP 0x6 +#define BNX2FC_FLAG_RETIRE_OXID 0x7 +#define BNX2FC_FLAG_EH_ABORT 0x8 +#define BNX2FC_FLAG_IO_COMPL 0x9 +#define BNX2FC_FLAG_ELS_DONE 0xa +#define BNX2FC_FLAG_ELS_TIMEOUT 0xb + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; /* SCSI IO status */ + u8 fcp_status; /* FCP IO status */ + u8 fcp_rsp_code; + u8 scsi_comp_flags; +}; + +struct io_bdt { + struct bnx2fc_cmd *io_req; + struct fcoe_bd_ctx *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct bnx2fc_work { + struct list_head list; + struct bnx2fc_rport *tgt; + u16 wqe; +}; +struct bnx2fc_unsol_els { + struct fc_lport *lport; + struct fc_frame *fp; + struct work_struct unsol_els_work; +}; + + + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); +void bnx2fc_cmd_release(struct kref *ref); +int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt); +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe); +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, + u16 min_xid, u16 max_xid); +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); +void bnx2fc_get_link_state(struct bnx2fc_hba *hba); +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items); +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen); +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req); +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req); +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req); +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec); +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid); +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid); +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt); +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd); +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rport, + enum fc_rport_event event); +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd); + + + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt); +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout); +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id); +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid); +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h new file mode 100644 index 000000000000..fe7769173c43 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -0,0 +1,206 @@ +#ifndef __BNX2FC_CONSTANTS_H_ +#define __BNX2FC_CONSTANTS_H_ + +/** + * This file defines HSI constants for the FCoE flows + */ + +/* KWQ/KCQ FCoE layer code */ +#define FCOE_KWQE_LAYER_CODE (7) + +/* KWQ (kernel work queue) request op codes */ +#define FCOE_KWQE_OPCODE_INIT1 (0) +#define FCOE_KWQE_OPCODE_INIT2 (1) +#define FCOE_KWQE_OPCODE_INIT3 (2) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) +#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) +#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) +#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) +#define FCOE_KWQE_OPCODE_DESTROY (10) +#define FCOE_KWQE_OPCODE_STAT (11) + +/* KCQ (kernel completion queue) response op codes */ +#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) +#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) +#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) +#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) +#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) +#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) +#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) +#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1) +#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) +#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) + +/* Unsolicited CQE type */ +#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 +#define FCOE_ERROR_DETECTION_CQE_TYPE 1 +#define FCOE_WARNING_DETECTION_CQE_TYPE 2 + +/* Task context constants */ +/* After driver has initialize the task in case timer services required */ +#define FCOE_TASK_TX_STATE_INIT 0 +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value */ +#define FCOE_TASK_TX_STATE_NORMAL 1 +/* Task is under abort procedure. Updated in order to stop processing of + * pending WQEs on this task */ +#define FCOE_TASK_TX_STATE_ABORT 2 +/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ +#define FCOE_TASK_TX_STATE_ERROR 3 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_TX_STATE_WARNING 4 +/* For completed unsolicited task */ +#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5 +/* For exchange cleanup request task */ +#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 +/* For sequence cleanup request task */ +#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 +/* Mark task as aborted and indicate that ABTS was not transmitted */ +#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8 +/* Mark task as aborted and indicate that ABTS was transmitted */ +#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9 +/* For completion the ABTS task. */ +#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10 +/* Mark task as aborted and indicate that Exchange cleanup was not transmitted + */ +#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11 +/* Mark task as aborted and indicate that Exchange cleanup was transmitted */ +#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12 + +#define FCOE_TASK_RX_STATE_NORMAL 0 +#define FCOE_TASK_RX_STATE_COMPLETED 1 +/* Obsolete: Intermediate completion (middle path with local completion) */ +#define FCOE_TASK_RX_STATE_INTER_COMP 2 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_RX_STATE_WARNING 3 +/* For E_D_T_TOV timer expiration in Ustorm */ +#define FCOE_TASK_RX_STATE_ERROR 4 +/* ABTS ACC arrived wait for local completion to finally complete the task. */ +#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5 +/* local completion arrived wait for ABTS ACC to finally complete the task. */ +#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6 +/* Special completion indication in case of task was aborted. */ +#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 +/* Special completion indication in case of task was cleaned. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8 +/* Special completion indication (in task requested the exchange cleanup) in + * case cleaned task is in non-valid. */ +#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9 +/* Special completion indication (in task requested the sequence cleanup) in + * case cleaned task was already returned to normal. */ +#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10 +/* Exchange cleanup arrived wait until xfer will be handled to finally + * complete the task. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11 +/* Xfer handled, wait for exchange cleanup to finally complete the task. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12 + +#define FCOE_TASK_TYPE_WRITE 0 +#define FCOE_TASK_TYPE_READ 1 +#define FCOE_TASK_TYPE_MIDPATH 2 +#define FCOE_TASK_TYPE_UNSOLICITED 3 +#define FCOE_TASK_TYPE_ABTS 4 +#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5 +#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6 + +#define FCOE_TASK_DEV_TYPE_DISK 0 +#define FCOE_TASK_DEV_TYPE_TAPE 1 + +#define FCOE_TASK_CLASS_TYPE_3 0 +#define FCOE_TASK_CLASS_TYPE_2 1 + +/* Everest FCoE connection type */ +#define B577XX_FCOE_CONNECTION_TYPE 4 + +/* Error codes for Error Reporting in fast path flows */ +/* XFER error codes */ +#define FCOE_ERROR_CODE_XFER_OOO_RO 0 +#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 +#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 +#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3 +#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4 +#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5 +#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6 +#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7 +#define FCOE_ERROR_CODE_XFER_FCTL 8 + +/* FCP RSP error codes */ +#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9 +#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10 +#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14 +#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15 +#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16 +#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17 +#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18 +#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19 + +/* FCP DATA error codes */ +#define FCOE_ERROR_CODE_DATA_OOO_RO 20 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22 +#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 +#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 +#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 +#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 +#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 +#define FCOE_ERROR_CODE_DATA_FCTL 28 + +/* Middle path error codes */ +#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29 +#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 +#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 +#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 +#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34 +#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 + +/* ABTS error codes */ +#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 +#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 +#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 + +/* Common error codes */ +#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42 +#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43 +#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44 +#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45 +#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46 +#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 +#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 +#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 +#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 + +/* Unsolicited Rx error codes */ +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54 +#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55 + +#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56 +#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57 +#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58 + +/* Timer error codes */ +#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60 +#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61 + + +#endif /* BNX2FC_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h new file mode 100644 index 000000000000..7f6aff68cc53 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -0,0 +1,70 @@ +#ifndef __BNX2FC_DEBUG__ +#define __BNX2FC_DEBUG__ + +/* Log level bit mask */ +#define LOG_IO 0x01 /* scsi cmd error, cleanup */ +#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */ +#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */ +#define LOG_ELS 0x08 /* ELS logs */ +#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/ +#define LOG_ALL 0xff /* LOG all messages */ + +extern unsigned int bnx2fc_debug_level; + +#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \ + do { \ + if (unlikely(bnx2fc_debug_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ + } while (0) + +#define BNX2FC_ELS_DBG(fmt, arg...) \ + BNX2FC_CHK_LOGGING(LOG_ELS, \ + printk(KERN_ALERT PFX fmt, ##arg)) + +#define BNX2FC_MISC_DBG(fmt, arg...) \ + BNX2FC_CHK_LOGGING(LOG_MISC, \ + printk(KERN_ALERT PFX fmt, ##arg)) + +#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ + do { \ + if (!io_req || !io_req->port || !io_req->port->lport || \ + !io_req->port->lport->host) \ + BNX2FC_CHK_LOGGING(LOG_IO, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_IO, \ + shost_printk(KERN_ALERT, \ + (io_req)->port->lport->host, \ + PFX "xid:0x%x " fmt, \ + (io_req)->xid, ##arg)); \ + } while (0) + +#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \ + do { \ + if (!tgt || !tgt->port || !tgt->port->lport || \ + !tgt->port->lport->host || !tgt->rport) \ + BNX2FC_CHK_LOGGING(LOG_TGT, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_TGT, \ + shost_printk(KERN_ALERT, \ + (tgt)->port->lport->host, \ + PFX "port:%x " fmt, \ + (tgt)->rport->port_id, ##arg)); \ + } while (0) + + +#define BNX2FC_HBA_DBG(lport, fmt, arg...) \ + do { \ + if (!lport || !lport->host) \ + BNX2FC_CHK_LOGGING(LOG_HBA, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_HBA, \ + shost_printk(KERN_ALERT, lport->host, \ + PFX fmt, ##arg)); \ + } while (0) + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c new file mode 100644 index 000000000000..7a11a255157f --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -0,0 +1,515 @@ +/* + * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains helper routines that handle ELS requests + * and responses. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); + +static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req; + struct bnx2fc_cmd *rrq_req; + int rc = 0; + + BUG_ON(!cb_arg); + rrq_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BUG_ON(!orig_io_req); + BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", + orig_io_req->xid, rrq_req->xid); + + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. Remove from active_cmd_queue. + */ + BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", + rrq_req->xid); + + if (rrq_req->on_active_queue) { + list_del_init(&rrq_req->link); + rrq_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(rrq_req); + BUG_ON(rc); + } + } + kfree(cb_arg); +} +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct bnx2fc_rport *tgt = aborted_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = tgt->sid; + u32 r_a_tov = lport->r_a_tov; + unsigned long start = jiffies; + int rc; + + BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id); + +retry_rrq: + rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), + bnx2fc_rrq_compl, cb_arg, + r_a_tov); + if (rc == -ENOMEM) { + if (time_after(jiffies, start + (10 * HZ))) { + BNX2FC_ELS_DBG("rrq Failed\n"); + rc = FAILED; + goto rrq_err; + } + msleep(20); + goto retry_rrq; + } +rrq_err: + if (rc) { + BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", + aborted_io_req->xid); + kfree(cb_arg); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + } + return rc; +} + +static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *els_req; + struct bnx2fc_rport *tgt; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u16 l2_oxid; + int frame_len; + int rc = 0; + + l2_oxid = cb_arg->l2_oxid; + BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); + + els_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. libfc will handle the els timeout + */ + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(els_req); + BUG_ON(rc); + } + goto free_arg; + } + + tgt = els_req->tgt; + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "Unable to alloc mp buf\n"); + goto free_arg; + } + hdr_len = sizeof(*fc_hdr); + if (hdr_len + resp_len > PAGE_SIZE) { + printk(KERN_ERR PFX "l2_els_compl: resp len is " + "beyond page size\n"); + goto free_buf; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + frame_len = hdr_len + resp_len; + + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); + +free_buf: + kfree(buf); +free_arg: + kfree(cb_arg); +} + +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + /* adisc is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + logo = fc_frame_payload_get(fp, sizeof(*logo)); + /* logo is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_rls *rls; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + rls = fc_frame_payload_get(fp, sizeof(*rls)); + /* rls is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct fc_rport *rport = tgt->rport; + struct fc_lport *lport = port->lport; + struct bnx2fc_cmd *els_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int rc = 0; + int task_idx, index; + u32 did, sid; + u16 xid; + + rc = fc_remote_port_chkready(rport); + if (rc) { + printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) || + (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) { + printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); + rc = -EINVAL; + goto els_err; + } + els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); + if (!els_req) { + rc = -ENOMEM; + goto els_err; + } + + els_req->sc_cmd = NULL; + els_req->port = port; + els_req->tgt = tgt; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + els_req->cb_arg = cb_arg; + + mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); + rc = bnx2fc_init_mp_req(els_req); + if (rc == FAILED) { + printk(KERN_ALERT PFX "ELS MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -ENOMEM; + goto els_err; + } else { + /* rc SUCCESS */ + rc = 0; + } + + /* Set the data_xfer_len to the size of ELS payload */ + mp_req->req_len = data_len; + els_req->data_xfer_len = mp_req->req_len; + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = tgt->rport->port_id; + sid = tgt->sid; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(els_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_els.. session not ready\n"); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EINVAL; + } + + if (timer_msec) + bnx2fc_cmd_timer_set(els_req, timer_msec); + bnx2fc_add_2_sq(tgt, xid); + + els_req->on_active_queue = 1; + list_add_tail(&els_req->link, &tgt->els_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + +els_err: + return rc; +} + +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + u64 *hdr; + u64 *temp_hdr; + + BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" + "cmd_type = %d\n", els_req->xid, els_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &els_req->req_flags)) { + BNX2FC_ELS_DBG("Timer context finished processing this " + "els - 0x%x\n", els_req->xid); + /* This IO doesnt receive cleanup completion */ + kref_put(&els_req->refcount, bnx2fc_cmd_release); + return; + } + + /* Cancel the timeout_work, as we received the response */ + if (cancel_delayed_work(&els_req->timeout_work)) + kref_put(&els_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + } + + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, bnx2fc_cmd_release); +} + +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + u8 *mac; + struct fc_frame_header *fh; + u8 op; + + if (IS_ERR(fp)) + goto done; + + mac = fr_cb(fp)->granted_mac; + if (is_zero_ether_addr(mac)) { + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_ELS) { + printk(KERN_ERR PFX "bnx2fc_flogi_resp:" + "fh_type != FC_TYPE_ELS\n"); + fc_frame_free(fp); + return; + } + op = fc_frame_payload_op(fp); + if (lport->vport) { + if (op == ELS_LS_RJT) { + printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n"); + fc_vport_terminate(lport->vport); + fc_frame_free(fp); + return; + } + } + if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { + fc_frame_free(fp); + return; + } + } + fip->update_mac(lport, mac); +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fip->update_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fcoe_ctlr *fip = &hba->ctlr; + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, + fip, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c new file mode 100644 index 000000000000..e476e8753079 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -0,0 +1,2535 @@ +/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains the code that interacts with libfc, libfcoe, + * cnic modules to create FCoE instances, send/receive non-offloaded + * FIP/FCoE packets, listen to link events etc. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static struct list_head adapter_list; +static u32 adapter_count; +static DEFINE_MUTEX(bnx2fc_dev_lock); +DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +#define DRV_MODULE_NAME "bnx2fc" +#define DRV_MODULE_VERSION BNX2FC_VERSION +#define DRV_MODULE_RELDATE "Jan 25, 2011" + + +static char version[] __devinitdata = + "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Bhanu Prakash Gollapudi "); +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNX2FC_MAX_QUEUE_DEPTH 256 +#define BNX2FC_MIN_QUEUE_DEPTH 32 +#define FCOE_WORD_TO_BYTE 4 + +static struct scsi_transport_template *bnx2fc_transport_template; +static struct scsi_transport_template *bnx2fc_vport_xport_template; + +struct workqueue_struct *bnx2fc_wq; + +/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. + * Here the io threads are per cpu but the l2 thread is just one + */ +struct fcoe_percpu_s bnx2fc_global; +DEFINE_SPINLOCK(bnx2fc_global_lock); + +static struct cnic_ulp_ops bnx2fc_cnic_cb; +static struct libfc_function_template bnx2fc_libfc_fcn_templ; +static struct scsi_host_template bnx2fc_shost_template; +static struct fc_function_template bnx2fc_transport_function; +static struct fc_function_template bnx2fc_vport_xport_function; +static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static int bnx2fc_destroy(struct net_device *net_device); +static int bnx2fc_enable(struct net_device *netdev); +static int bnx2fc_disable(struct net_device *netdev); + +static void bnx2fc_recv_frame(struct sk_buff *skb); + +static void bnx2fc_start_disc(struct bnx2fc_hba *hba); +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); +static int bnx2fc_net_config(struct fc_lport *lp); +static int bnx2fc_lport_config(struct fc_lport *lport); +static int bnx2fc_em_config(struct fc_lport *lport); +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, + struct device *parent, int npiv); +static void bnx2fc_destroy_work(struct work_struct *work); + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba); +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); + +static void bnx2fc_port_shutdown(struct fc_lport *lport); +static void bnx2fc_stop(struct bnx2fc_hba *hba); +static int __init bnx2fc_mod_init(void); +static void __exit bnx2fc_mod_exit(void); + +unsigned int bnx2fc_debug_level; +module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); + +static int bnx2fc_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu); +/* notification function for CPU hotplug events */ +static struct notifier_block bnx2fc_cpu_notifier = { + .notifier_call = bnx2fc_cpu_callback, +}; + +static void bnx2fc_clean_rx_queue(struct fc_lport *lp) +{ + struct fcoe_percpu_s *bg; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + struct sk_buff *head; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + list = &bg->fcoe_rx_list; + head = list->next; + for (skb = head; skb != (struct sk_buff *)list; + skb = next) { + next = skb->next; + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); +} + +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + spin_lock(&bnx2fc_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); + spin_unlock(&bnx2fc_global_lock); + + return rc; +} + +static void bnx2fc_abort_io(struct fc_lport *lport) +{ + /* + * This function is no-op for bnx2fc, but we do + * not want to leave it as NULL either, as libfc + * can call the default function which is + * fc_fcp_abort_io. + */ +} + +static void bnx2fc_cleanup(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_rport *tgt; + int i; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&hba->hba_mutex); + spin_lock_bh(&hba->hba_lock); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if (tgt) { + /* Cleanup IOs belonging to requested vport */ + if (tgt->port == port) { + spin_unlock_bh(&hba->hba_lock); + BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); + bnx2fc_flush_active_ios(tgt); + spin_lock_bh(&hba->hba_lock); + } + } + } + spin_unlock_bh(&hba->hba_lock); + mutex_unlock(&hba->hba_mutex); +} + +static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, + struct fc_frame *fp) +{ + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " + "r_ctl = 0x%x\n", rdata->ids.port_id, + ntohs(fh->fh_ox_id), fh->fh_r_ctl); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + rc = bnx2fc_send_adisc(tgt, fp); + break; + case ELS_LOGO: + rc = bnx2fc_send_logo(tgt, fp); + break; + case ELS_RLS: + rc = bnx2fc_send_rls(tgt, fp); + break; + default: + break; + } + } else if ((fh->fh_type == FC_TYPE_BLS) && + (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) + BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); + else { + BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " + "rctl 0x%x thru non-offload path\n", + fh->fh_type, fh->fh_r_ctl); + return -ENODEV; + } + if (rc) + return -ENOMEM; + else + return 0; +} + +/** + * bnx2fc_xmit - bnx2fc's FCoE frame transmit function + * + * @lport: the associated local port + * @fp: the fc_frame to be transmitted + */ +static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct fcoe_hdr *hp; + struct bnx2fc_rport *tgt; + struct fcoe_dev_stats *stats; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen, rc = 0; + + port = (struct fcoe_port *)lport_priv(lport); + hba = port->priv; + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); + if (!lport->link_up) { + BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (!hba->ctlr.sel_fcf) { + BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); + kfree_skb(skb); + return -EINVAL; + } + if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + /* + * Snoop the frame header to check if the frame is for + * an offloaded session + */ + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); + if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + /* This frame is for offloaded session */ + BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " + "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); + spin_unlock_bh(&hba->hba_lock); + rc = bnx2fc_xmit_l2_frame(tgt, fp); + if (rc != -ENODEV) { + kfree_skb(skb); + return rc; + } + } else { + spin_unlock_bh(&hba->hba_lock); + } + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (bnx2fc_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) + + frag->page_offset; + } else { + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); + cp = NULL; + } + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->dev = hba->netdev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (hba->ctlr.map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); + + if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ + if (lport->seq_offload && fr_max_payload(fp)) { + skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; + skb_shinfo(skb)->gso_size = fr_max_payload(fp); + } else { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + } + + /*update tx stats */ + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->TxFrames++; + stats->TxWords += wlen; + put_cpu(); + + /* send down to lld */ + fr_dev(fp) = lport; + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lport, skb); + + return 0; +} + +/** + * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ + * + * @skb: the receive socket buffer + * @dev: associated net device + * @ptype: context + * @olddev: last device + * + * This function receives the packet and builds FC frame and passes it up + */ +static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lport; + struct bnx2fc_hba *hba; + struct fc_frame_header *fh; + struct fcoe_rcv_info *fr; + struct fcoe_percpu_s *bg; + unsigned short oxid; + + hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); + lport = hba->ctlr.lp; + + if (unlikely(lport == NULL)) { + printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); + goto err; + } + + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); + goto err; + } + + /* + * Check for minimum frame length, and make sure required FCoE + * and FC headers are pulled into the linear data area. + */ + if (unlikely((skb->len < FCOE_MIN_FRAME) || + !pskb_may_pull(skb, FCOE_HEADER_LEN))) + goto err; + + skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); + fh = (struct fc_frame_header *) skb_transport_header(skb); + + oxid = ntohs(fh->fh_ox_id); + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lport; + fr->ptype = ptype; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + + __skb_queue_tail(&bg->fcoe_rx_list, skb); + if (bg->fcoe_rx_list.qlen == 1) + wake_up_process(bg->thread); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + return 0; +err: + kfree_skb(skb); + return -1; +} + +static int bnx2fc_l2_rcv_thread(void *arg) +{ + struct fcoe_percpu_s *bg = arg; + struct sk_buff *skb; + + set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_RUNNING); + spin_lock_bh(&bg->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { + spin_unlock_bh(&bg->fcoe_rx_list.lock); + bnx2fc_recv_frame(skb); + spin_lock_bh(&bg->fcoe_rx_list.lock); + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + return 0; +} + + +static void bnx2fc_recv_frame(struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fcoe_dev_stats *stats; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + struct fc_lport *vn_port; + struct fcoe_port *port; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(lport == NULL)) { + printk(KERN_ALERT PFX "Invalid lport struct\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + put_cpu(); + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + put_cpu(); + kfree_skb(skb); + return; + } + + fh = fc_frame_header_get(fp); + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (vn_port) { + port = lport_priv(vn_port); + if (compare_ether_addr(port->data_src_addr, dest_mac) + != 0) { + BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); + put_cpu(); + kfree_skb(skb); + return; + } + } + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + put_cpu(); + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + put_cpu(); + kfree_skb(skb); + return; + } + break; + } + } + if (le32_to_cpu(fr_crc(fp)) != + ~crc32(~0, skb->data, fr_len)) { + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING PFX "dropping frame with " + "CRC error\n"); + stats->InvalidCRCCount++; + put_cpu(); + kfree_skb(skb); + return; + } + put_cpu(); + fc_exch_recv(lport, fp); +} + +/** + * bnx2fc_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2fc_percpu_info structure + */ +int bnx2fc_percpu_io_thread(void *arg) +{ + struct bnx2fc_percpu_s *p = arg; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_RUNNING); + spin_lock_bh(&p->fp_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->fp_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe); + kfree(work); + } + + spin_lock_bh(&p->fp_work_lock); + } + spin_unlock_bh(&p->fp_work_lock); + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + + return 0; +} + +static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) +{ + struct fc_host_statistics *bnx2fc_stats; + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fcoe_statistics_params *fw_stats; + int rc = 0; + + fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; + if (!fw_stats) + return NULL; + + bnx2fc_stats = fc_get_host_stats(shost); + + init_completion(&hba->stat_req_done); + if (bnx2fc_send_stat_req(hba)) + return bnx2fc_stats; + rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); + if (!rc) { + BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); + return bnx2fc_stats; + } + bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt; + bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; + bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; + bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; + bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4; + + bnx2fc_stats->dumped_frames = 0; + bnx2fc_stats->lip_count = 0; + bnx2fc_stats->nos_count = 0; + bnx2fc_stats->loss_of_sync_count = 0; + bnx2fc_stats->loss_of_signal_count = 0; + bnx2fc_stats->prim_seq_protocol_err_count = 0; + + return bnx2fc_stats; +} + +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct Scsi_Host *shost = lport->host; + int rc = 0; + + shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; + shost->max_lun = BNX2FC_MAX_LUN; + shost->max_id = BNX2FC_MAX_FCP_TGT; + shost->max_channel = 0; + if (lport->vport) + shost->transportt = bnx2fc_vport_xport_template; + else + shost->transportt = bnx2fc_transport_template; + + /* Add the new host to SCSI-ml */ + rc = scsi_add_host(lport->host, dev); + if (rc) { + printk(KERN_ERR PFX "Error on scsi_add_host\n"); + return rc; + } + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; + sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", + BNX2FC_NAME, BNX2FC_VERSION, + hba->netdev->name); + + return 0; +} + +static int bnx2fc_mfs_update(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + u32 mfs; + u32 max_mfs; + + mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); + BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); + if (mfs > max_mfs) + mfs = max_mfs; + + /* Adjust mfs to be a multiple of 256 bytes */ + mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * + BNX2FC_MIN_PAYLOAD); + mfs = mfs + sizeof(struct fc_frame_header); + + BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); + if (fc_set_mfs(lport, mfs)) + return -EINVAL; + return 0; +} +static void bnx2fc_link_speed_update(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + struct ethtool_cmd ecmd = { ETHTOOL_GSET }; + + if (!dev_ethtool_get_settings(netdev, &ecmd)) { + lport->link_supported_speeds &= + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; + if (ecmd.supported & SUPPORTED_10000baseT_Full) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + + if (ecmd.speed == SPEED_1000) + lport->link_speed = FC_PORTSPEED_1GBIT; + if (ecmd.speed == SPEED_10000) + lport->link_speed = FC_PORTSPEED_10GBIT; + } + return; +} +static int bnx2fc_link_ok(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *dev = hba->phys_dev; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else { + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + rc = -1; + } + return rc; +} + +/** + * bnx2fc_get_link_state - get network link state + * + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +void bnx2fc_get_link_state(struct bnx2fc_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + +static int bnx2fc_net_config(struct fc_lport *lport) +{ + struct bnx2fc_hba *hba; + struct fcoe_port *port; + u64 wwnn, wwpn; + + port = lport_priv(lport); + hba = port->priv; + + /* require support for get_pauseparam ethtool op. */ + if (!hba->phys_dev->ethtool_ops || + !hba->phys_dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (bnx2fc_mfs_update(lport)) + return -EINVAL; + + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); + + bnx2fc_link_speed_update(lport); + + if (!lport->vport) { + wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); + BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); + fc_set_wwnn(lport, wwnn); + + wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); + fc_set_wwpn(lport, wwpn); + } + + return 0; +} + +static void bnx2fc_destroy_timer(unsigned long data) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; + + BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); + hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + wake_up_interruptible(&hba->destroy_wait); +} + +/** + * bnx2fc_indicate_netevent - Generic netdev event handler + * + * @context: adapter structure pointer + * @event: event type + * + * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and + * NETDEV_CHANGE_MTU events + */ +static void bnx2fc_indicate_netevent(void *context, unsigned long event) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + struct fc_lport *lport = hba->ctlr.lp; + struct fc_lport *vport; + u32 link_possible = 1; + + if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n", + hba->netdev->name, event); + return; + } + + /* + * ASSUMPTION: + * indicate_netevent cannot be called from cnic unless bnx2fc + * does register_device + */ + BUG_ON(!lport); + + BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n", + hba->netdev->name, event); + + switch (event) { + case NETDEV_UP: + BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n", + hba->adapter_state); + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + printk(KERN_ERR "indicate_netevent: "\ + "adapter is not UP!!\n"); + /* fall thru to update mfs if MTU has changed */ + case NETDEV_CHANGEMTU: + BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); + bnx2fc_mfs_update(lport); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + bnx2fc_mfs_update(vport); + mutex_unlock(&lport->lp_mutex); + break; + + case NETDEV_DOWN: + BNX2FC_HBA_DBG(lport, "Port down\n"); + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_GOING_DOWN: + BNX2FC_HBA_DBG(lport, "Port going down\n"); + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_CHANGE: + BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n"); + break; + + default: + printk(KERN_ERR PFX "Unkonwn netevent %ld", event); + return; + } + + bnx2fc_link_speed_update(lport); + + if (link_possible && !bnx2fc_link_ok(lport)) { + printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); + fcoe_ctlr_link_up(&hba->ctlr); + } else { + printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); + if (fcoe_ctlr_link_down(&hba->ctlr)) { + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + per_cpu_ptr(lport->dev_stats, + get_cpu())->LinkFailureCount++; + put_cpu(); + fcoe_clean_pending_queue(lport); + + init_waitqueue_head(&hba->shutdown_wait); + BNX2FC_HBA_DBG(lport, "indicate_netevent " + "num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 1; + BNX2FC_HBA_DBG(lport, "waiting for uploads to " + "compl proc = %s\n", + current->comm); + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 0; + + if (signal_pending(current)) + flush_signals(current); + } + } +} + +static int bnx2fc_libfc_config(struct fc_lport *lport) +{ + + /* Set the function pointers set by bnx2fc driver */ + memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, + sizeof(struct libfc_function_template)); + fc_elsct_init(lport); + fc_exch_init(lport); + fc_rport_init(lport); + fc_disc_init(lport); + return 0; +} + +static int bnx2fc_em_config(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, + FCOE_MAX_XID, NULL)) { + printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); + return -ENOMEM; + } + + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, + BNX2FC_MAX_XID); + + if (!hba->cmd_mgr) { + printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); + fc_exch_mgr_free(lport); + return -ENOMEM; + } + return 0; +} + +static int bnx2fc_lport_config(struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = 3; + lport->max_rport_retry_count = 3; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + /* REVISIT: enable when supporting tape devices + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + */ + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS); + lport->does_npiv = 1; + + memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); + lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; + + /* alloc stats structure */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish fc_lport configuration */ + fc_lport_config(lport); + + return 0; +} + +/** + * bnx2fc_fip_recv - handle a received FIP frame. + * + * @skb: the received skb + * @dev: associated &net_device + * @ptype: the &packet_type structure which was used to register this handler. + * @orig_dev: original receive &net_device, in case @ dev is a bond. + * + * Returns: 0 for success + */ +static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct bnx2fc_hba *hba; + hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); + fcoe_ctlr_recv(&hba->ctlr, skb); + return 0; +} + +/** + * bnx2fc_update_src_mac - Update Ethernet MAC filters. + * + * @fip: FCoE controller. + * @old: Unicast MAC address to delete if the MAC is non-zero. + * @new: Unicast MAC address to add. + * + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ +static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct fcoe_port *port = lport_priv(lport); + + memcpy(port->data_src_addr, addr, ETH_ALEN); +} + +/** + * bnx2fc_get_src_mac - return the ethernet source address for an lport + * + * @lport: libfc port + */ +static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port; + + port = (struct fcoe_port *)lport_priv(lport); + return port->data_src_addr; +} + +/** + * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. + * + * @fip: FCoE controller. + * @skb: FIP Packet. + */ +static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + skb->dev = bnx2fc_from_ctlr(fip)->netdev; + dev_queue_xmit(skb); +} + +static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + struct fc_lport *vn_port; + + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "vn ports cannot be created on" + "this hba\n"); + return -EIO; + } + mutex_lock(&bnx2fc_dev_lock); + vn_port = bnx2fc_if_create(hba, &vport->dev, 1); + mutex_unlock(&bnx2fc_dev_lock); + + if (IS_ERR(vn_port)) { + printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", + netdev->name); + return -EIO; + } + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_lport_init(vn_port); + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +static int bnx2fc_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct fcoe_port *port = lport_priv(vn_port); + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + queue_work(bnx2fc_wq, &port->destroy_work); + return 0; +} + +static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + + +static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) +{ + struct net_device *netdev = hba->netdev; + struct net_device *physdev = hba->phys_dev; + struct netdev_hw_addr *ha; + int sel_san_mac = 0; + + /* Do not support for bonding device */ + if ((netdev->priv_flags & IFF_MASTER_ALB) || + (netdev->priv_flags & IFF_SLAVE_INACTIVE) || + (netdev->priv_flags & IFF_MASTER_8023AD)) { + return -EOPNOTSUPP; + } + + /* setup Source MAC Address */ + rcu_read_lock(); + for_each_dev_addr(physdev, ha) { + BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", + ha->type); + printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], + ha->addr[1], ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); + + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(ha->addr))) { + memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); + sel_san_mac = 1; + BNX2FC_MISC_DBG("Found SAN MAC\n"); + } + } + rcu_read_unlock(); + + if (!sel_san_mac) + return -ENODEV; + + hba->fip_packet_type.func = bnx2fc_fip_recv; + hba->fip_packet_type.type = htons(ETH_P_FIP); + hba->fip_packet_type.dev = netdev; + dev_add_pack(&hba->fip_packet_type); + + hba->fcoe_packet_type.func = bnx2fc_rcv; + hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + hba->fcoe_packet_type.dev = netdev; + dev_add_pack(&hba->fcoe_packet_type); + + return 0; +} + +static int bnx2fc_attach_transport(void) +{ + bnx2fc_transport_template = + fc_attach_transport(&bnx2fc_transport_function); + + if (bnx2fc_transport_template == NULL) { + printk(KERN_ERR PFX "Failed to attach FC transport\n"); + return -ENODEV; + } + + bnx2fc_vport_xport_template = + fc_attach_transport(&bnx2fc_vport_xport_function); + if (bnx2fc_vport_xport_template == NULL) { + printk(KERN_ERR PFX + "Failed to attach FC transport for vport\n"); + fc_release_transport(bnx2fc_transport_template); + bnx2fc_transport_template = NULL; + return -ENODEV; + } + return 0; +} +static void bnx2fc_release_transport(void) +{ + fc_release_transport(bnx2fc_transport_template); + fc_release_transport(bnx2fc_vport_xport_template); + bnx2fc_transport_template = NULL; + bnx2fc_vport_xport_template = NULL; +} + +static void bnx2fc_interface_release(struct kref *kref) +{ + struct bnx2fc_hba *hba; + struct net_device *netdev; + struct net_device *phys_dev; + + hba = container_of(kref, struct bnx2fc_hba, kref); + BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n"); + + netdev = hba->netdev; + phys_dev = hba->phys_dev; + + /* tear-down FIP controller */ + if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) + fcoe_ctlr_destroy(&hba->ctlr); + + /* Free the command manager */ + if (hba->cmd_mgr) { + bnx2fc_cmd_mgr_free(hba->cmd_mgr); + hba->cmd_mgr = NULL; + } + dev_put(netdev); + module_put(THIS_MODULE); +} + +static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) +{ + kref_get(&hba->kref); +} + +static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) +{ + kref_put(&hba->kref, bnx2fc_interface_release); +} +static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) +{ + bnx2fc_unbind_pcidev(hba); + kfree(hba); +} + +/** + * bnx2fc_interface_create - create a new fcoe instance + * + * @cnic: pointer to cnic device + * + * Creates a new FCoE instance on the given device which include allocating + * hba structure, scsi_host and lport structures. + */ +static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + int rc; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + printk(KERN_ERR PFX "Unable to allocate hba structure\n"); + return NULL; + } + spin_lock_init(&hba->hba_lock); + mutex_init(&hba->hba_mutex); + + hba->cnic = cnic; + rc = bnx2fc_bind_pcidev(hba); + if (rc) + goto bind_err; + hba->phys_dev = cnic->netdev; + /* will get overwritten after we do vlan discovery */ + hba->netdev = hba->phys_dev; + + init_waitqueue_head(&hba->shutdown_wait); + init_waitqueue_head(&hba->destroy_wait); + + return hba; +bind_err: + printk(KERN_ERR PFX "create_interface: bind error\n"); + kfree(hba); + return NULL; +} + +static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, + enum fip_state fip_mode) +{ + int rc = 0; + struct net_device *netdev = hba->netdev; + struct fcoe_ctlr *fip = &hba->ctlr; + + dev_hold(netdev); + kref_init(&hba->kref); + + hba->flags = 0; + + /* Initialize FIP */ + memset(fip, 0, sizeof(*fip)); + fcoe_ctlr_init(fip, fip_mode); + hba->ctlr.send = bnx2fc_fip_send; + hba->ctlr.update_mac = bnx2fc_update_src_mac; + hba->ctlr.get_src_addr = bnx2fc_get_src_mac; + set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done); + + rc = bnx2fc_netdev_setup(hba); + if (rc) + goto setup_err; + + hba->next_conn_id = 0; + + memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); + hba->num_ofld_sess = 0; + + return 0; + +setup_err: + fcoe_ctlr_destroy(&hba->ctlr); + dev_put(netdev); + bnx2fc_interface_put(hba); + return rc; +} + +/** + * bnx2fc_if_create - Create FCoE instance on a given interface + * + * @hba: FCoE interface to create a local port on + * @parent: Device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is vport or not + * + * Creates a fc_lport instance and a Scsi_Host instance and configure them. + * + * Returns: Allocated fc_lport or an error pointer + */ +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, + struct device *parent, int npiv) +{ + struct fc_lport *lport = NULL; + struct fcoe_port *port; + struct Scsi_Host *shost; + struct fc_vport *vport = dev_to_vport(parent); + int rc = 0; + + /* Allocate Scsi_Host structure */ + if (!npiv) { + lport = libfc_host_alloc(&bnx2fc_shost_template, + sizeof(struct fcoe_port)); + } else { + lport = libfc_vport_create(vport, + sizeof(struct fcoe_port)); + } + + if (!lport) { + printk(KERN_ERR PFX "could not allocate scsi host structure\n"); + return NULL; + } + shost = lport->host; + port = lport_priv(lport); + port->lport = lport; + port->priv = hba; + INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); + + /* Configure fcoe_port */ + rc = bnx2fc_lport_config(lport); + if (rc) + goto lp_config_err; + + if (npiv) { + vport = dev_to_vport(parent); + printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + /* Configure netdev and networking properties of the lport */ + rc = bnx2fc_net_config(lport); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); + goto lp_config_err; + } + + rc = bnx2fc_shost_config(lport, parent); + if (rc) { + printk(KERN_ERR PFX "Couldnt configure shost for %s\n", + hba->netdev->name); + goto lp_config_err; + } + + /* Initialize the libfc library */ + rc = bnx2fc_libfc_config(lport); + if (rc) { + printk(KERN_ERR PFX "Couldnt configure libfc\n"); + goto shost_err; + } + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + + /* Allocate exchange manager */ + if (!npiv) { + rc = bnx2fc_em_config(lport); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); + goto shost_err; + } + } + + bnx2fc_interface_get(hba); + return lport; + +shost_err: + scsi_remove_host(shost); +lp_config_err: + scsi_host_put(lport->host); + return NULL; +} + +static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) +{ + /* Dont listen for Ethernet packets anymore */ + __dev_remove_pack(&hba->fcoe_packet_type); + __dev_remove_pack(&hba->fip_packet_type); + synchronize_net(); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + + BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n"); + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + bnx2fc_interface_put(hba); + + /* Free queued packets for the receive thread */ + bnx2fc_clean_rx_queue(lport); + + /* Detach from scsi-ml */ + fc_remove_host(lport->host); + scsi_remove_host(lport->host); + + /* + * Note that only the physical lport will have the exchange manager. + * for vports, this function is NOP + */ + fc_exch_mgr_free(lport); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(lport); + + /* Release Scsi_Host */ + scsi_host_put(lport->host); +} + +/** + * bnx2fc_destroy - Destroy a bnx2fc FCoE interface + * + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_destroy(struct net_device *netdev) +{ + struct bnx2fc_hba *hba = NULL; + struct net_device *phys_dev; + int rc = 0; + + if (!rtnl_trylock()) + return restart_syscall(); + + mutex_lock(&bnx2fc_dev_lock); +#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto netdev_err; + } +#endif + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto netdev_err; + } + + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); + goto netdev_err; + } + + if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n"); + goto netdev_err; + } + + bnx2fc_netdev_cleanup(hba); + + bnx2fc_stop(hba); + + bnx2fc_if_destroy(hba->ctlr.lp); + + destroy_workqueue(hba->timer_work_queue); + + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) + bnx2fc_fw_destroy(hba); + + clear_bit(BNX2FC_CREATE_DONE, &hba->init_done); +netdev_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static void bnx2fc_destroy_work(struct work_struct *work) +{ + struct fcoe_port *port; + struct fc_lport *lport; + + port = container_of(work, struct fcoe_port, destroy_work); + lport = port->lport; + + BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); + + bnx2fc_port_shutdown(lport); + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + bnx2fc_if_destroy(lport); + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); +} + +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) +{ + bnx2fc_free_fw_resc(hba); + bnx2fc_free_task_ctx(hba); +} + +/** + * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated + * pci structure + * + * @hba: Adapter instance + */ +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) +{ + if (bnx2fc_setup_task_ctx(hba)) + goto mem_err; + + if (bnx2fc_setup_fw_resc(hba)) + goto mem_err; + + return 0; +mem_err: + bnx2fc_unbind_adapter_devices(hba); + return -ENOMEM; +} + +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) +{ + struct cnic_dev *cnic; + + if (!hba->cnic) { + printk(KERN_ERR PFX "cnic is NULL\n"); + return -ENODEV; + } + cnic = hba->cnic; + hba->pcidev = cnic->pcidev; + if (hba->pcidev) + pci_dev_get(hba->pcidev); + + return 0; +} + +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) +{ + if (hba->pcidev) + pci_dev_put(hba->pcidev); + hba->pcidev = NULL; +} + + + +/** + * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance + * + * @handle: transport handle pointing to adapter struture + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on-chip FCoE components. + * This bnx2fc - cnic interface api callback is used after following + * conditions are met - + * a) underlying network interface is up (marked by event NETDEV_UP + * from netdev + * b) bnx2fc adatper structure is registered. + */ +static void bnx2fc_ulp_start(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct fc_lport *lport = hba->ctlr.lp; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&bnx2fc_dev_lock); + + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) + goto start_disc; + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) + bnx2fc_fw_init(hba); + +start_disc: + mutex_unlock(&bnx2fc_dev_lock); + + BNX2FC_MISC_DBG("bnx2fc started.\n"); + + /* Kick off Fabric discovery*/ + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "ulp_init: start discovery\n"); + lport->tt.frame_send = bnx2fc_xmit; + bnx2fc_start_disc(hba); + } +} + +static void bnx2fc_port_shutdown(struct fc_lport *lport) +{ + BNX2FC_MISC_DBG("Entered %s\n", __func__); + fc_fabric_logoff(lport); + fc_lport_destroy(lport); +} + +static void bnx2fc_stop(struct bnx2fc_hba *hba) +{ + struct fc_lport *lport; + struct fc_lport *vport; + + BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, + hba->init_done); + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) && + test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + lport = hba->ctlr.lp; + bnx2fc_port_shutdown(lport); + BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d " + "offloaded sessions\n", + hba->num_ofld_sess); + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + fcoe_ctlr_link_down(&hba->ctlr); + fcoe_clean_pending_queue(lport); + + mutex_lock(&hba->hba_mutex); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_unlock(&hba->hba_mutex); + } +} + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba) +{ +#define BNX2FC_INIT_POLL_TIME (1000 / HZ) + int rc = -1; + int i = HZ; + + rc = bnx2fc_bind_adapter_devices(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); + goto err_out; + } + + rc = bnx2fc_send_fw_fcoe_init_msg(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); + goto err_unbind; + } + + /* + * Wait until the adapter init message is complete, and adapter + * state is UP. + */ + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2FC_INIT_POLL_TIME); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { + printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " + "Ignoring...\n", + hba->cnic->netdev->name); + rc = -1; + goto err_unbind; + } + + + /* Mark HBA to indicate that the FW INIT is done */ + set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done); + return 0; + +err_unbind: + bnx2fc_unbind_adapter_devices(hba); +err_out: + return rc; +} + +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) +{ + if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { + init_timer(&hba->destroy_timer); + hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + + jiffies; + hba->destroy_timer.function = bnx2fc_destroy_timer; + hba->destroy_timer.data = (unsigned long)hba; + add_timer(&hba->destroy_timer); + wait_event_interruptible(hba->destroy_wait, + (hba->flags & + BNX2FC_FLAG_DESTROY_CMPL)); + /* This should never happen */ + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&hba->destroy_timer); + } + bnx2fc_unbind_adapter_devices(hba); + } +} + +/** + * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * Driver checks if adapter is already in shutdown mode, if not start + * the shutdown process. + */ +static void bnx2fc_ulp_stop(void *handle) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; + + printk(KERN_ERR "ULP_STOP\n"); + + mutex_lock(&bnx2fc_dev_lock); + bnx2fc_stop(hba); + bnx2fc_fw_destroy(hba); + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_start_disc(struct bnx2fc_hba *hba) +{ + struct fc_lport *lport; + int wait_cnt = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* Kick off FIP/FLOGI */ + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "Init not done yet\n"); + return; + } + + lport = hba->ctlr.lp; + BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); + + if (!bnx2fc_link_ok(lport)) { + BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); + fcoe_ctlr_link_up(&hba->ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &hba->adapter_state); + } + + /* wait for the FCF to be selected before issuing FLOGI */ + while (!hba->ctlr.sel_fcf) { + msleep(250); + /* give up after 3 secs */ + if (++wait_cnt > 12) + break; + } + fc_lport_init(lport); + fc_fabric_login(lport); +} + + +/** + * bnx2fc_ulp_init - Initialize an adapter instance + * + * @dev : cnic device handle + * Called from cnic_register_driver() context to initialize all + * enumerated cnic devices. This routine allocates adapter structure + * and other device specific resources. + */ +static void bnx2fc_ulp_init(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + int rc = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* bnx2fc works only when bnx2x is loaded */ + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," + " flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + /* Configure FCoE interface */ + hba = bnx2fc_interface_create(dev); + if (!hba) { + printk(KERN_ERR PFX "hba initialization failed\n"); + return; + } + + /* Add HBA to the adapter list */ + mutex_lock(&bnx2fc_dev_lock); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + mutex_unlock(&bnx2fc_dev_lock); + + clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); + rc = dev->register_device(dev, CNIC_ULP_FCOE, + (void *) hba); + if (rc) + printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); + else + set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); +} + + +static int bnx2fc_disable(struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct ethtool_drvinfo drvinfo; + int rc = 0; + + if (!rtnl_trylock()) { + printk(KERN_ERR PFX "retrying for rtnl_lock\n"); + return -EIO; + } + + mutex_lock(&bnx2fc_dev_lock); + + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto nodev; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto nodev; + } + + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -ENODEV; + goto nodev; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -ENODEV; + goto nodev; + } + + printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n"); + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); + } else { + fcoe_ctlr_link_down(&hba->ctlr); + fcoe_clean_pending_queue(hba->ctlr.lp); + } + +nodev: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + + +static int bnx2fc_enable(struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct ethtool_drvinfo drvinfo; + int rc = 0; + + if (!rtnl_trylock()) { + printk(KERN_ERR PFX "retrying for rtnl_lock\n"); + return -EIO; + } + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&bnx2fc_dev_lock); + + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto nodev; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto nodev; + } + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -ENODEV; + goto nodev; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -ENODEV; + goto nodev; + } + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n"); + } else if (!bnx2fc_link_ok(hba->ctlr.lp)) + fcoe_ctlr_link_up(&hba->ctlr); + +nodev: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_create - Create bnx2fc FCoE interface + * + * @buffer: The name of Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct fc_lport *lport; + struct ethtool_drvinfo drvinfo; + int rc = 0; + int vlan_id; + + BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); + if (fip_mode != FIP_MODE_FABRIC) { + printk(KERN_ERR "fip mode not FABRIC\n"); + return -EIO; + } + + if (!rtnl_trylock()) { + printk(KERN_ERR "trying for rtnl_lock\n"); + return -EIO; + } + mutex_lock(&bnx2fc_dev_lock); + +#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto mod_err; + } +#endif + + if (!try_module_get(THIS_MODULE)) { + rc = -EINVAL; + goto mod_err; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) { + phys_dev = vlan_dev_real_dev(netdev); + vlan_id = vlan_dev_vlan_id(netdev); + } else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -EINVAL; + goto netdev_err; + } + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -EINVAL; + goto netdev_err; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -EINVAL; + goto netdev_err; + } + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); + goto netdev_err; + } + + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + rc = bnx2fc_fw_init(hba); + if (rc) + goto netdev_err; + } + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + rc = -EEXIST; + goto netdev_err; + } + + /* update netdev with vlan netdev */ + hba->netdev = netdev; + hba->vlan_id = vlan_id; + hba->vlan_enabled = 1; + + rc = bnx2fc_interface_setup(hba, fip_mode); + if (rc) { + printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n"); + goto ifput_err; + } + + hba->timer_work_queue = + create_singlethread_workqueue("bnx2fc_timer_wq"); + if (!hba->timer_work_queue) { + printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); + rc = -EINVAL; + goto ifput_err; + } + + lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); + if (!lport) { + printk(KERN_ERR PFX "Failed to create interface (%s)\n", + netdev->name); + bnx2fc_netdev_cleanup(hba); + rc = -EINVAL; + goto if_create_err; + } + + lport->boot_time = jiffies; + + /* Make this master N_port */ + hba->ctlr.lp = lport; + + set_bit(BNX2FC_CREATE_DONE, &hba->init_done); + printk(KERN_ERR PFX "create: START DISC\n"); + bnx2fc_start_disc(hba); + /* + * Release from kref_init in bnx2fc_interface_setup, on success + * lport should be holding a reference taken in bnx2fc_if_create + */ + bnx2fc_interface_put(hba); + /* put netdev that was held while calling dev_get_by_name */ + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return 0; + +if_create_err: + destroy_workqueue(hba->timer_work_queue); +ifput_err: + bnx2fc_interface_put(hba); +netdev_err: + module_put(THIS_MODULE); +mod_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance + * + * @cnic: Pointer to cnic device instance + * + **/ +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct list_head *list; + struct list_head *temp; + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_safe(list, temp, &adapter_list) { + hba = (struct bnx2fc_hba *)list; + if (hba->cnic == cnic) + return hba; + } + return NULL; +} + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) +{ + struct list_head *list; + struct list_head *temp; + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_safe(list, temp, &adapter_list) { + hba = (struct bnx2fc_hba *)list; + if (hba->phys_dev == phys_dev) + return hba; + } + printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); + return NULL; +} + +/** + * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources + * + * @dev cnic device handle + */ +static void bnx2fc_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + + BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); + + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + mutex_lock(&bnx2fc_dev_lock); + hba = bnx2fc_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", + dev); + mutex_unlock(&bnx2fc_dev_lock); + return; + } + + list_del_init(&hba->link); + adapter_count--; + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + /* destroy not called yet, move to quiesced list */ + bnx2fc_netdev_cleanup(hba); + bnx2fc_if_destroy(hba->ctlr.lp); + } + mutex_unlock(&bnx2fc_dev_lock); + + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_interface_destroy(hba); +} + +/** + * bnx2fc_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} + + +static bool bnx2fc_match(struct net_device *netdev) +{ + mutex_lock(&bnx2fc_dev_lock); + if (netdev->priv_flags & IFF_802_1Q_VLAN) { + struct net_device *phys_dev = vlan_dev_real_dev(netdev); + + if (bnx2fc_hba_lookup(phys_dev)) { + mutex_unlock(&bnx2fc_dev_lock); + return true; + } + } + mutex_unlock(&bnx2fc_dev_lock); + return false; +} + + +static struct fcoe_transport bnx2fc_transport = { + .name = {"bnx2fc"}, + .attached = false, + .list = LIST_HEAD_INIT(bnx2fc_transport.list), + .match = bnx2fc_match, + .create = bnx2fc_create, + .destroy = bnx2fc_destroy, + .enable = bnx2fc_enable, + .disable = bnx2fc_disable, +}; + +/** + * bnx2fc_percpu_thread_create - Create a receive thread for an + * online CPU + * + * @cpu: cpu index for the online cpu + */ +static void bnx2fc_percpu_thread_create(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2fc_percpu, cpu); + + thread = kthread_create(bnx2fc_percpu_io_thread, + (void *)p, + "bnx2fc_thread/%d", cpu); + /* bind thread to the cpu */ + if (likely(!IS_ERR(p->iothread))) { + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + } +} + +static void bnx2fc_percpu_thread_destroy(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&p->fp_work_lock); + thread = p->iothread; + p->iothread = NULL; + + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe); + kfree(work); + } + + spin_unlock_bh(&p->fp_work_lock); + + if (thread) + kthread_stop(thread); +} + +/** + * bnx2fc_cpu_callback - Handler for CPU hotplug events + * + * @nfb: The callback data block + * @action: The event triggering the callback + * @hcpu: The index of the CPU that the event is for + * + * This creates or destroys per-CPU data for fcoe + * + * Returns NOTIFY_OK always. + */ +static int bnx2fc_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + printk(PFX "CPU %x online: Create Rx thread\n", cpu); + bnx2fc_percpu_thread_create(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); + bnx2fc_percpu_thread_destroy(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +/** + * bnx2fc_mod_init - module init entry point + * + * Initialize driver wide global data structures, and register + * with cnic module + **/ +static int __init bnx2fc_mod_init(void) +{ + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + int rc = 0; + unsigned int cpu = 0; + struct bnx2fc_percpu_s *p; + + printk(KERN_INFO PFX "%s", version); + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&bnx2fc_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + goto out; + } + + INIT_LIST_HEAD(&adapter_list); + mutex_init(&bnx2fc_dev_lock); + adapter_count = 0; + + /* Attach FC transport template */ + rc = bnx2fc_attach_transport(); + if (rc) + goto detach_ft; + + bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); + if (!bnx2fc_wq) { + rc = -ENOMEM; + goto release_bt; + } + + bg = &bnx2fc_global; + skb_queue_head_init(&bg->fcoe_rx_list); + l2_thread = kthread_create(bnx2fc_l2_rcv_thread, + (void *)bg, + "bnx2fc_l2_thread"); + if (IS_ERR(l2_thread)) { + rc = PTR_ERR(l2_thread); + goto free_wq; + } + wake_up_process(l2_thread); + spin_lock_bh(&bg->fcoe_rx_list.lock); + bg->thread = l2_thread; + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2fc_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->fp_work_lock); + } + + for_each_online_cpu(cpu) { + bnx2fc_percpu_thread_create(cpu); + } + + /* Initialize per CPU interrupt thread */ + register_hotcpu_notifier(&bnx2fc_cpu_notifier); + + cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); + + return 0; + +free_wq: + destroy_workqueue(bnx2fc_wq); +release_bt: + bnx2fc_release_transport(); +detach_ft: + fcoe_transport_detach(&bnx2fc_transport); +out: + return rc; +} + +static void __exit bnx2fc_mod_exit(void) +{ + LIST_HEAD(to_be_deleted); + struct bnx2fc_hba *hba, *next; + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + struct sk_buff *skb; + unsigned int cpu = 0; + + /* + * NOTE: Since cnic calls register_driver routine rtnl_lock, + * it will have higher precedence than bnx2fc_dev_lock. + * unregister_device() cannot be called with bnx2fc_dev_lock + * held. + */ + mutex_lock(&bnx2fc_dev_lock); + list_splice(&adapter_list, &to_be_deleted); + INIT_LIST_HEAD(&adapter_list); + adapter_count = 0; + mutex_unlock(&bnx2fc_dev_lock); + + /* Unregister with cnic */ + list_for_each_entry_safe(hba, next, &to_be_deleted, link) { + list_del_init(&hba->link); + printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", + hba, atomic_read(&hba->kref.refcount)); + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, + &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_interface_destroy(hba); + } + cnic_unregister_driver(CNIC_ULP_FCOE); + + /* Destroy global thread */ + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + l2_thread = bg->thread; + bg->thread = NULL; + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) + kfree_skb(skb); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + if (l2_thread) + kthread_stop(l2_thread); + + unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + + /* Destroy per cpu threads */ + for_each_online_cpu(cpu) { + bnx2fc_percpu_thread_destroy(cpu); + } + + destroy_workqueue(bnx2fc_wq); + /* + * detach from scsi transport + * must happen after all destroys are done + */ + bnx2fc_release_transport(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&bnx2fc_transport); +} + +module_init(bnx2fc_mod_init); +module_exit(bnx2fc_mod_exit); + +static struct fc_function_template bnx2fc_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = bnx2fc_get_host_stats, + + .issue_fc_host_lip = bnx2fc_fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, + + .vport_create = bnx2fc_vport_create, + .vport_delete = bnx2fc_vport_destroy, + .vport_disable = bnx2fc_vport_disable, +}; + +static struct fc_function_template bnx2fc_vport_xport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = bnx2fc_fcoe_reset, + .terminate_rport_io = fc_rport_terminate_io, +}; + +/** + * scsi_host_template structure used while registering with SCSI-ml + */ +static struct scsi_host_template bnx2fc_shost_template = { + .module = THIS_MODULE, + .name = "Broadcom Offload FCoE Initiator", + .queuecommand = bnx2fc_queuecommand, + .eh_abort_handler = bnx2fc_eh_abort, /* abts */ + .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = fc_change_queue_depth, + .change_queue_type = fc_change_queue_type, + .this_id = -1, + .cmd_per_lun = 3, + .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, + .max_sectors = 512, +}; + +static struct libfc_function_template bnx2fc_libfc_fcn_templ = { + .frame_send = bnx2fc_xmit, + .elsct_send = bnx2fc_elsct_send, + .fcp_abort_io = bnx2fc_abort_io, + .fcp_cleanup = bnx2fc_cleanup, + .rport_event_callback = bnx2fc_rport_event_handler, +}; + +/** + * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface + * structure carrying callback function pointers + */ +static struct cnic_ulp_ops bnx2fc_cnic_cb = { + .owner = THIS_MODULE, + .cnic_init = bnx2fc_ulp_init, + .cnic_exit = bnx2fc_ulp_exit, + .cnic_start = bnx2fc_ulp_start, + .cnic_stop = bnx2fc_ulp_stop, + .indicate_kcqes = bnx2fc_indicate_kcqe, + .indicate_netevent = bnx2fc_indicate_netevent, +}; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c new file mode 100644 index 000000000000..4f4096836742 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -0,0 +1,1868 @@ +/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains the code that low level functions that interact + * with 57712 FCoE firmware. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe); +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *conn_destroy); + +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_stat stat_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); + stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; + stat_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; + stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &stat_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w + * + * @hba: adapter structure pointer + * + * Send down FCoE firmware init KWQEs which initiates the initial handshake + * with the f/w. + * + */ +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_init1 fcoe_init1; + struct fcoe_kwqe_init2 fcoe_init2; + struct fcoe_kwqe_init3 fcoe_init3; + struct kwqe *kwqe_arr[3]; + int num_kwqes = 3; + int rc = 0; + + if (!hba->cnic) { + printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); + return -ENODEV; + } + + /* fill init1 KWQE */ + memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); + fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; + fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; + fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; + fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; + fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; + fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; + fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); + fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; + fcoe_init1.task_list_pbl_addr_hi = + (u32) ((u64) hba->task_ctx_bd_dma >> 32); + fcoe_init1.mtu = hba->netdev->mtu; + + fcoe_init1.flags = (PAGE_SHIFT << + FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); + + fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; + + /* fill init2 KWQE */ + memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); + fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; + fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; + fcoe_init2.hash_tbl_pbl_addr_hi = (u32) + ((u64) hba->hash_tbl_pbl_dma >> 32); + + fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; + fcoe_init2.t2_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_dma >> 32); + + fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; + fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_ptr_dma >> 32); + + fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; + + /* fill init3 KWQE */ + memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); + fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; + fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + fcoe_init3.error_bit_map_lo = 0xffffffff; + fcoe_init3.error_bit_map_hi = 0xffffffff; + + + kwqe_arr[0] = (struct kwqe *) &fcoe_init1; + kwqe_arr[1] = (struct kwqe *) &fcoe_init2; + kwqe_arr[2] = (struct kwqe *) &fcoe_init3; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_destroy fcoe_destroy; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = -1; + + /* fill destroy KWQE */ + memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); + fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; + fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct fc_lport *lport = port->lport; + struct bnx2fc_hba *hba = port->priv; + struct kwqe *kwqe_arr[4]; + struct fcoe_kwqe_conn_offload1 ofld_req1; + struct fcoe_kwqe_conn_offload2 ofld_req2; + struct fcoe_kwqe_conn_offload3 ofld_req3; + struct fcoe_kwqe_conn_offload4 ofld_req4; + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 4; + u32 port_id; + int rc = 0; + u16 conn_id; + + /* Initialize offload request 1 structure */ + memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); + + ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + + conn_id = (u16)tgt->fcoe_conn_id; + ofld_req1.fcoe_conn_id = conn_id; + + + ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; + ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); + + ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; + ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); + + ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; + ofld_req1.rq_first_pbe_addr_hi = + (u32)((u64) tgt->rq_dma >> 32); + + ofld_req1.rq_prod = 0x8000; + + /* Initialize offload request 2 structure */ + memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); + + ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; + + ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; + ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); + + ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; + ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); + + ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; + ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); + + /* Initialize offload request 3 structure */ + memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); + + ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; + ofld_req3.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req3.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; + ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; + + port_id = fc_host_port_id(lport->host); + if (port_id == 0) { + BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); + return -EINVAL; + } + + /* + * Store s_id of the initiator for further reference. This will + * be used during disable/destroy during linkdown processing as + * when the lport is reset, the port_id also is reset to 0 + */ + tgt->sid = port_id; + ofld_req3.s_id[0] = (port_id & 0x000000FF); + ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + ofld_req3.d_id[0] = (port_id & 0x000000FF); + ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; + + ofld_req3.tx_total_conc_seqs = rdata->max_seq; + + ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; + ofld_req3.rx_max_fc_pay_len = lport->mfs; + + ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; + ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; + ofld_req3.rx_open_seqs_exch_c3 = 1; + + ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; + ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); + + /* set mul_n_port_ids supported flag to 0, until it is supported */ + ofld_req3.flags = 0; + /* + ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << + FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); + */ + /* Info from PLOGI response */ + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); + + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); + + /* vlan flag */ + ofld_req3.flags |= (hba->vlan_enabled << + FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); + + /* C2_VALID and ACK flags are not set as they are not suppported */ + + + /* Initialize offload request 4 structure */ + memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); + ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; + ofld_req4.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; + + + ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; + /* local mac */ + ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; + ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; + ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; + ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; + ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; + ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; + ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); + + ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; + ofld_req4.confq_pbl_base_addr_hi = + (u32)((u64) tgt->confq_pbl_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + kwqe_arr[2] = (struct kwqe *) &ofld_req3; + kwqe_arr[3] = (struct kwqe *) &ofld_req4; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_enable_req - initiates FCoE Session enablement + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +static int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct kwqe *kwqe_arr[2]; + struct bnx2fc_hba *hba = port->priv; + struct fcoe_kwqe_conn_enable_disable enbl_req; + struct fc_lport *lport = port->lport; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&enbl_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; + enbl_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; + /* local mac */ + enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; + enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; + enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; + enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; + enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; + + enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + port_id = fc_host_port_id(lport->host); + if (port_id != tgt->sid) { + printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," + "sid = 0x%x\n", port_id, tgt->sid); + port_id = tgt->sid; + } + enbl_req.s_id[0] = (port_id & 0x000000FF); + enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + enbl_req.d_id[0] = (port_id & 0x000000FF); + enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + enbl_req.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + enbl_req.vlan_flag = hba->vlan_enabled; + enbl_req.context_id = tgt->context_id; + enbl_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &enbl_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_disable_req - initiates FCoE Session disable + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_hba *hba = port->priv; + struct fcoe_kwqe_conn_enable_disable disable_req; + struct kwqe *kwqe_arr[2]; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&disable_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; + disable_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; + disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; + disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; + disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; + disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; + + disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + port_id = tgt->sid; + disable_req.s_id[0] = (port_id & 0x000000FF); + disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + + port_id = rport->port_id; + disable_req.d_id[0] = (port_id & 0x000000FF); + disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + disable_req.context_id = tgt->context_id; + disable_req.conn_id = tgt->fcoe_conn_id; + disable_req.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + disable_req.vlan_tag |= + 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + disable_req.vlan_flag = hba->vlan_enabled; + + kwqe_arr[0] = (struct kwqe *) &disable_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + struct fcoe_kwqe_conn_destroy destroy_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); + destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; + destroy_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + destroy_req.context_id = tgt->context_id; + destroy_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &destroy_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +static void bnx2fc_unsol_els_work(struct work_struct *work) +{ + struct bnx2fc_unsol_els *unsol_els; + struct fc_lport *lport; + struct fc_frame *fp; + + unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); + lport = unsol_els->lport; + fp = unsol_els->fp; + fc_exch_recv(lport, fp); + kfree(unsol_els); +} + +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid) +{ + struct fcoe_port *port = tgt->port; + struct fc_lport *lport = port->lport; + struct bnx2fc_unsol_els *unsol_els; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct sk_buff *skb; + u32 payload_len; + u32 crc; + u8 op; + + + unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); + if (!unsol_els) { + BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", + l2_oxid, frame_len); + + payload_len = frame_len - sizeof(struct fc_frame_header); + + fp = fc_frame_alloc(lport, payload_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + return; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, frame_len); + + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + skb = fp_skb(fp); + + if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || + (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { + + if (fh->fh_type == FC_TYPE_ELS) { + op = fc_frame_payload_op(fp); + if ((op == ELS_TEST) || (op == ELS_ESTC) || + (op == ELS_FAN) || (op == ELS_CSU)) { + /* + * No need to reply for these + * ELS requests + */ + printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); + kfree_skb(skb); + return; + } + } + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + unsol_els->lport = lport; + unsol_els->fp = fp; + INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); + queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); + } else { + BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); + kfree_skb(skb); + } +} + +static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + u8 num_rq; + struct fcoe_err_report_entry *err_entry; + unsigned char *rq_data; + unsigned char *buf = NULL, *buf1; + int i; + u16 xid; + u32 frame_len, len; + struct bnx2fc_cmd *io_req = NULL; + struct fcoe_task_ctx_entry *task, *task_page; + struct bnx2fc_hba *hba = tgt->port->priv; + int task_idx, index; + int rc = 0; + + + BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); + switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { + case FCOE_UNSOLICITED_FRAME_CQE_TYPE: + frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> + FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; + + num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; + + rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); + if (rq_data) { + buf = rq_data; + } else { + buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), + GFP_ATOMIC); + + if (!buf1) { + BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); + break; + } + + for (i = 0; i < num_rq; i++) { + rq_data = (unsigned char *) + bnx2fc_get_next_rqe(tgt, 1); + len = BNX2FC_RQ_BUF_SZ; + memcpy(buf1, rq_data, len); + buf1 += len; + } + } + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, + FC_XID_UNKNOWN); + + if (buf != rq_data) + kfree(buf); + bnx2fc_return_rqe(tgt, num_rq); + break; + + case FCOE_ERROR_DETECTION_CQE_TYPE: + /* + *In case of error reporting CQE a single RQ entry + * is consumes. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = err_entry->fc_hdr.ox_id; + BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", + err_entry->err_warn_bitmap_hi, + err_entry->err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", + err_entry->tx_buf_off, err_entry->rx_buf_off); + + bnx2fc_return_rqe(tgt, 1); + + if (xid > BNX2FC_MAX_XID) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", + xid); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *) + hba->task_ctx[task_idx]; + task = &(task_page[index]); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) { + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " + "progress.. ignore unsol err\n"); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + /* + * If ABTS is already in progress, and FW error is + * received after that, do not cancel the timeout_work + * and let the error recovery continue by explicitly + * logging out the target, when the ABTS eventually + * times out. + */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* + * Cancel the timeout_work, as we received IO + * completion with FW error. + */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + + rc = bnx2fc_initiate_abts(io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts " + "failed. issue cleanup\n"); + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } else + printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " + "in ABTS processing\n", xid); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_WARNING_DETECTION_CQE_TYPE: + /* + *In case of warning reporting CQE a single RQ entry + * is consumes. + */ + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = cpu_to_be16(err_entry->fc_hdr.ox_id); + BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", + err_entry->err_warn_bitmap_hi, + err_entry->err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", + err_entry->tx_buf_off, err_entry->rx_buf_off); + + bnx2fc_return_rqe(tgt, 1); + break; + + default: + printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); + break; + } +} + +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd *io_req; + int task_idx, index; + u16 xid; + u8 cmd_type; + u8 rx_state = 0; + u8 num_rq; + + spin_lock_bh(&tgt->tgt_lock); + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + if (xid >= BNX2FC_MAX_TASKS) { + printk(KERN_ALERT PFX "ERROR:xid out of range\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; + task = &(task_page[index]); + + num_rq = ((task->rx_wr_tx_rd.rx_flags & + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + + if (io_req == NULL) { + printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + /* Timestamp IO completion time */ + cmd_type = io_req->cmd_type; + + /* optimized completion path */ + if (cmd_type == BNX2FC_SCSI_CMD) { + rx_state = ((task->rx_wr_tx_rd.rx_flags & + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); + + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + } + + /* Process other IO completion types */ + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state - %d\n", + rx_state); + break; + + case BNX2FC_TASK_MGMT_CMD: + BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); + bnx2fc_process_tm_compl(io_req, task, num_rq); + break; + + case BNX2FC_ABTS: + /* + * ABTS request received by firmware. ABTS response + * will be delivered to the task belonging to the IO + * that was aborted + */ + BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_ELS: + BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); + bnx2fc_process_els_compl(io_req, task, num_rq); + break; + + case BNX2FC_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + default: + printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); + break; + } + spin_unlock_bh(&tgt->tgt_lock); +} + +struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) +{ + struct bnx2fc_work *work; + work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); + if (!work) + return NULL; + + INIT_LIST_HEAD(&work->list); + work->tgt = tgt; + work->wqe = wqe; + return work; +} + +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) +{ + struct fcoe_cqe *cq; + u32 cq_cons; + struct fcoe_cqe *cqe; + u16 wqe; + bool more_cqes_found = false; + + /* + * cq_lock is a low contention lock used to protect + * the CQ data structure from being freed up during + * the upload operation + */ + spin_lock_bh(&tgt->cq_lock); + + if (!tgt->cq) { + printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); + spin_unlock_bh(&tgt->cq_lock); + return 0; + } + cq = tgt->cq; + cq_cons = tgt->cq_cons_idx; + cqe = &cq[cq_cons]; + + do { + more_cqes_found ^= true; + + while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == + (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT)) { + + /* new entry on the cq */ + if (wqe & FCOE_CQE_CQE_TYPE) { + /* Unsolicited event notification */ + bnx2fc_process_unsol_compl(tgt, wqe); + } else { + struct bnx2fc_work *work = NULL; + struct bnx2fc_percpu_s *fps = NULL; + unsigned int cpu = wqe % num_possible_cpus(); + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (unlikely(!fps->iothread)) + goto unlock; + + work = bnx2fc_alloc_work(tgt, wqe); + if (work) + list_add_tail(&work->list, + &fps->work_list); +unlock: + spin_unlock_bh(&fps->fp_work_lock); + + /* Pending work request completion */ + if (fps->iothread && work) + wake_up_process(fps->iothread); + else + bnx2fc_process_cq_compl(tgt, wqe); + } + cqe++; + tgt->cq_cons_idx++; + + if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { + tgt->cq_cons_idx = 0; + cqe = cq; + tgt->cq_curr_toggle_bit = + 1 - tgt->cq_curr_toggle_bit; + } + } + /* Re-arm CQ */ + if (more_cqes_found) { + tgt->conn_db->cq_arm.lo = -1; + wmb(); + } + } while (more_cqes_found); + + /* + * Commit tgt->cq_cons_idx change to the memory + * spin_lock implies full memory barrier, no need to smp_wmb + */ + + spin_unlock_bh(&tgt->cq_lock); + return 0; +} + +/** + * bnx2fc_fastpath_notification - process global event queue (KCQ) + * + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry + * + * Fast path event notification handler + */ +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe) +{ + u32 conn_id = new_cqe_kcqe->fcoe_conn_id; + struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; + + if (!tgt) { + printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); + return; + } + + bnx2fc_process_new_cqes(tgt); +} + +/** + * bnx2fc_process_ofld_cmpl - process FCoE session offload completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session offload completion, enable the session if offload is + * successful. + */ +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct fcoe_port *port; + u32 conn_id; + u32 context_id; + int rc; + + conn_id = ofld_kcqe->fcoe_conn_id; + context_id = ofld_kcqe->fcoe_conn_context_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); + return; + } + BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + port = tgt->port; + if (hba != tgt->port->priv) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); + goto ofld_cmpl_err; + } + /* + * cnic has allocated a context_id for this session; use this + * while enabling the session. + */ + tgt->context_id = context_id; + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { + printk(KERN_ERR PFX "unable to allocate FCoE context " + "resources\n"); + set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); + } + goto ofld_cmpl_err; + } else { + + /* now enable the session */ + rc = bnx2fc_send_session_enable_req(port, tgt); + if (rc) { + printk(KERN_ALERT PFX "enable session failed\n"); + goto ofld_cmpl_err; + } + } + return; +ofld_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +/** + * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session enable completion, mark the rport as ready + */ + +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + u32 context_id; + + context_id = ofld_kcqe->fcoe_conn_context_id; + conn_id = ofld_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + + /* + * context_id should be the same for this target during offload + * and enable + */ + if (tgt->context_id != context_id) { + printk(KERN_ALERT PFX "context id mis-match\n"); + return; + } + if (hba != tgt->port->priv) { + printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); + goto enbl_cmpl_err; + } + if (ofld_kcqe->completion_status) { + goto enbl_cmpl_err; + } else { + /* enable successful - rport ready for issuing IOs */ + set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); + } + return; + +enbl_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *disable_kcqe) +{ + + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = disable_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); + + if (disable_kcqe->completion_status) { + printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", + disable_kcqe->completion_status); + return; + } else { + /* disable successful */ + BNX2FC_TGT_DBG(tgt, "disable successful\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = destroy_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); + + if (destroy_kcqe->completion_status) { + printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", + destroy_kcqe->completion_status); + return; + } else { + /* destroy successful */ + BNX2FC_TGT_DBG(tgt, "upload successful\n"); + clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) +{ + switch (err_code) { + case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: + printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: + printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: + printk(KERN_ERR PFX "init_failure due to NIC error\n"); + break; + + default: + printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); + } +} + +/** + * bnx2fc_indicae_kcqe - process KCQE + * + * @hba: adapter structure pointer + * @kcqe: kcqe pointer + * @num_cqe: Number of completion queue elements + * + * Generic KCQ event handler + */ +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + int i = 0; + struct fcoe_kcqe *kcqe = NULL; + + while (i < num_cqe) { + kcqe = (struct fcoe_kcqe *) kcq[i++]; + + switch (kcqe->op_code) { + case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: + bnx2fc_fastpath_notification(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_OFFLOAD_CONN: + bnx2fc_process_ofld_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_ENABLE_CONN: + bnx2fc_process_enable_conn_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_INIT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + bnx2fc_init_failure(hba, + kcqe->completion_status); + } else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2fc_get_link_state(hba); + printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", + (u8)hba->pcidev->bus->number); + } + break; + + case FCOE_KCQE_OPCODE_DESTROY_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + + printk(KERN_ERR PFX "DESTROY failed\n"); + } else { + printk(KERN_ERR PFX "DESTROY success\n"); + } + hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + wake_up_interruptible(&hba->destroy_wait); + break; + + case FCOE_KCQE_OPCODE_DISABLE_CONN: + bnx2fc_process_conn_disable_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_DESTROY_CONN: + bnx2fc_process_conn_destroy_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_STAT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) + printk(KERN_ERR PFX "STAT failed\n"); + complete(&hba->stat_req_done); + break; + + case FCOE_KCQE_OPCODE_FCOE_ERROR: + /* fall thru */ + default: + printk(KERN_ALERT PFX "unknown opcode 0x%x\n", + kcqe->op_code); + } + } +} + +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) +{ + struct fcoe_sqe *sqe; + + sqe = &tgt->sq[tgt->sq_prod_idx]; + + /* Fill SQ WQE */ + sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; + sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; + + /* Advance SQ Prod Idx */ + if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { + tgt->sq_prod_idx = 0; + tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; + } +} + +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) +{ + struct b577xx_doorbell_set_prod ev_doorbell; + u32 msg; + + wmb(); + + memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); + ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; + + ev_doorbell.prod = tgt->sq_prod_idx | + (tgt->sq_curr_toggle_bit << 15); + ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; + msg = *((u32 *)&ev_doorbell); + writel(cpu_to_le32(msg), tgt->ctx_base); + + mmiowb(); + +} + +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) +{ + u32 context_id = tgt->context_id; + struct fcoe_port *port = tgt->port; + u32 reg_off; + resource_size_t reg_base; + struct bnx2fc_hba *hba = port->priv; + + reg_base = pci_resource_start(hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = BNX2FC_5771X_DB_PAGE_SIZE * + (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; + tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); + if (!tgt->ctx_base) + return -ENOMEM; + return 0; +} + +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); + + if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) + return NULL; + + tgt->rq_cons_idx += num_items; + + if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) + tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; + + return buf; +} + +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + /* return the rq buffer */ + u32 next_prod_idx = tgt->rq_prod_idx + num_items; + if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { + /* Wrap around RQ */ + next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; + } + tgt->rq_prod_idx = next_prod_idx; + tgt->conn_db->rq_prod = tgt->rq_prod_idx; +} + +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid) +{ + u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; + struct bnx2fc_rport *tgt = io_req->tgt; + u32 context_id = tgt->context_id; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Tx Write Rx Read */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + /* Common */ + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.general.cleanup_info.task_id = orig_xid; + + +} + +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_frame_header *fc_hdr; + u8 task_type = 0; + u64 *hdr; + u64 temp_hdr[3]; + u32 context_id; + + + /* Obtain task_type */ + if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || + (io_req->cmd_type == BNX2FC_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == BNX2FC_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", + io_req->cmd_type, task_type); + + /* Tx only */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)mp_req->mp_req_bd_dma; + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_req_bd_dma >> 32); + task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; + BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", + (unsigned long long)mp_req->mp_req_bd_dma); + } + + /* Tx Write Rx Read */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + + /* Common */ + task->cmn.data_2_trns = io_req->data_xfer_len; + context_id = tgt->context_id; + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; + + /* Rx Write Tx Read */ + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); + fc_hdr->fh_rx_id = htons(0xffff); + task->rx_wr_tx_rd.rx_id = 0xffff; + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); + } + + /* Fill FC Header into middle path buffer */ + hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)mp_req->mp_resp_bd_dma; + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_resp_bd_dma >> 32); + task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; + } +} + +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + u8 task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + struct bnx2fc_rport *tgt = io_req->tgt; + u64 *fcp_cmnd; + u64 tmp_fcp_cmnd[4]; + u32 context_id; + int cnt, i; + int bd_count; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE; + else + task_type = FCOE_TASK_TYPE_READ; + + /* Tx only */ + if (task_type == FCOE_TASK_TYPE_WRITE) { + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = + bd_tbl->bd_valid; + } + + /*Tx Write Rx Read */ + /* Init state to NORMAL */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + + /* Common */ + task->cmn.data_2_trns = io_req->data_xfer_len; + context_id = tgt->context_id; + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; + + /* Set initiative ownership */ + task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; + + /* Set initial seq counter */ + task->cmn.tx_low_seq_cnt = 1; + + /* Set state to "waiting for the first packet" */ + task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; + + /* Fill FCP_CMND IU */ + fcp_cmnd = (u64 *) + task->cmn.general.cmd_info.fcp_cmd_payload.opaque; + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* swap fcp_cmnd */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u64); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* Rx Write Tx Read */ + task->rx_wr_tx_rd.rx_id = 0xffff; + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_READ) { + + bd_count = bd_tbl->bd_valid; + if (bd_count == 1) { + + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = + fcoe_bd_tbl->buf_len; + task->tx_wr_rx_rd.init_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; + } else { + + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = + bd_tbl->bd_valid; + } + } +} + +/** + * bnx2fc_setup_task_ctx - allocate and map task context + * + * @hba: pointer to adapter structure + * + * allocate memory for task context, and associated BD table to be used + * by firmware + * + */ +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) +{ + int rc = 0; + struct regpair *task_ctx_bdt; + dma_addr_t addr; + int i; + + /* + * Allocate task context bd table. A page size of bd table + * can map 256 buffers. Each buffer contains 32 task context + * entries. Hence the limit with one page is 8192 task context + * entries. + */ + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_bd_dma, + GFP_KERNEL); + if (!hba->task_ctx_bd_tbl) { + printk(KERN_ERR PFX "unable to allocate task context BDT\n"); + rc = -1; + goto out; + } + memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); + + /* + * Allocate task_ctx which is an array of pointers pointing to + * a page containing 32 task contexts + */ + hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), + GFP_KERNEL); + if (!hba->task_ctx) { + printk(KERN_ERR PFX "unable to allocate task context array\n"); + rc = -1; + goto out1; + } + + /* + * Allocate task_ctx_dma which is an array of dma addresses + */ + hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * + sizeof(dma_addr_t)), GFP_KERNEL); + if (!hba->task_ctx_dma) { + printk(KERN_ERR PFX "unable to alloc context mapping array\n"); + rc = -1; + goto out2; + } + + task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_dma[i], + GFP_KERNEL); + if (!hba->task_ctx[i]) { + printk(KERN_ERR PFX "unable to alloc task context\n"); + rc = -1; + goto out3; + } + memset(hba->task_ctx[i], 0, PAGE_SIZE); + addr = (u64)hba->task_ctx_dma[i]; + task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); + task_ctx_bdt->lo = cpu_to_le32((u32)addr); + task_ctx_bdt++; + } + return 0; + +out3: + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + if (hba->task_ctx[i]) { + + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +out2: + kfree(hba->task_ctx); + hba->task_ctx = NULL; +out1: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; +out: + return rc; +} + +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) +{ + int i; + + if (hba->task_ctx_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, + hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; + } + + if (hba->task_ctx) { + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + if (hba->task_ctx[i]) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], + hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + kfree(hba->task_ctx); + hba->task_ctx = NULL; + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +} + +static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int segment_count; + int hash_table_size; + u32 *pbl; + + segment_count = hba->hash_tbl_segment_count; + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; + + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + + } + + if (hba->hash_tbl_pbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->hash_tbl_pbl, + hba->hash_tbl_pbl_dma); + hba->hash_tbl_pbl = NULL; + } +} + +static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int hash_table_size; + int segment_count; + int segment_array_size; + int dma_segment_array_size; + dma_addr_t *dma_segment_array; + u32 *pbl; + + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; + segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; + hba->hash_tbl_segment_count = segment_count; + + segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); + hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); + if (!hba->hash_tbl_segments) { + printk(KERN_ERR PFX "hash table pointers alloc failed\n"); + return -ENOMEM; + } + dma_segment_array_size = segment_count * sizeof(*dma_segment_array); + dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); + if (!dma_segment_array) { + printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); + return -ENOMEM; + } + + for (i = 0; i < segment_count; ++i) { + hba->hash_tbl_segments[i] = + dma_alloc_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + &dma_segment_array[i], + GFP_KERNEL); + if (!hba->hash_tbl_segments[i]) { + printk(KERN_ERR PFX "hash segment alloc failed\n"); + while (--i >= 0) { + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + hba->hash_tbl_segments[i] = NULL; + } + kfree(dma_segment_array); + return -ENOMEM; + } + memset(hba->hash_tbl_segments[i], 0, + BNX2FC_HASH_TBL_CHUNK_SIZE); + } + + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->hash_tbl_pbl_dma, + GFP_KERNEL); + if (!hba->hash_tbl_pbl) { + printk(KERN_ERR PFX "hash table pbl alloc failed\n"); + kfree(dma_segment_array); + return -ENOMEM; + } + memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + u64 paddr = dma_segment_array[i]; + *pbl = cpu_to_le32((u32) paddr); + ++pbl; + *pbl = cpu_to_le32((u32) (paddr >> 32)); + ++pbl; + } + pbl = hba->hash_tbl_pbl; + i = 0; + while (*pbl && *(pbl + 1)) { + u32 lo; + u32 hi; + lo = *pbl; + ++pbl; + hi = *pbl; + ++pbl; + ++i; + } + kfree(dma_segment_array); + return 0; +} + +/** + * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer + * + * @hba: Pointer to adapter structure + * + */ +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) +{ + u64 addr; + u32 mem_size; + int i; + + if (bnx2fc_allocate_hash_table(hba)) + return -ENOMEM; + + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_ptr_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl_ptr) { + printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); + + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl) { + printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->t2_hash_tbl, 0x00, mem_size); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + addr = (unsigned long) hba->t2_hash_tbl_dma + + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); + hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; + hba->t2_hash_tbl[i].next.hi = addr >> 32; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, &hba->dummy_buf_dma, + GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->stats_buf_dma, + GFP_KERNEL); + if (!hba->stats_buffer) { + printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->stats_buffer, 0x00, PAGE_SIZE); + + return 0; +} + +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) +{ + u32 mem_size; + + if (hba->stats_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->stats_buffer, hba->stats_buf_dma); + hba->stats_buffer = NULL; + } + + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + + if (hba->t2_hash_tbl_ptr) { + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl_ptr, + hba->t2_hash_tbl_ptr_dma); + hba->t2_hash_tbl_ptr = NULL; + } + + if (hba->t2_hash_tbl) { + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl, hba->t2_hash_tbl_dma); + hba->t2_hash_tbl = NULL; + } + bnx2fc_free_hash_table(hba); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c new file mode 100644 index 000000000000..0f1dd23730db --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -0,0 +1,1833 @@ +/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. + * IO manager and SCSI IO processing. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index); +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); +static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); +static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req); +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq); + +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec) +{ + struct bnx2fc_hba *hba = io_req->port->priv; + + if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, + msecs_to_jiffies(timer_msec))) + kref_get(&io_req->refcount); +} + +static void bnx2fc_cmd_timeout(struct work_struct *work) +{ + struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, + timeout_work.work); + struct fc_lport *lport; + struct fc_rport_priv *rdata; + u8 cmd_type = io_req->cmd_type; + struct bnx2fc_rport *tgt = io_req->tgt; + int logo_issued; + int rc; + + BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," + "req_flags = %lx\n", cmd_type, io_req->req_flags); + + spin_lock_bh(&tgt->tgt_lock); + if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + /* + * ideally we should hold the io_req until RRQ complets, + * and release io_req from timeout hold. + */ + spin_unlock_bh(&tgt->tgt_lock); + bnx2fc_send_rrq(io_req); + return; + } + if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); + goto done; + } + + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); + complete(&io_req->tm_done); + } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* Handle internally generated ABTS timeout */ + BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", + io_req->refcount.refcount.counter); + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + /* Explicitly logo the target */ + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicit " + "logo - tgt flags = 0x%lx\n", + tgt->flags); + + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } + } else { + /* Hanlde IO timeout */ + BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed before " + " timer expiry\n"); + goto done; + } + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + rc = bnx2fc_initiate_abts(io_req); + if (rc == SUCCESS) + goto done; + /* + * Explicitly logo the target if + * abts initiation fails + */ + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicit " + "logo - tgt flags = 0x%lx\n", + tgt->flags); + + + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } else { + BNX2FC_IO_DBG(io_req, "IO already in " + "ABTS processing\n"); + } + } + break; + case BNX2FC_ELS: + + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); + + if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + /* Explicitly logo the target */ + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicitly logo" + "(els)\n"); + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } + } else { + /* + * Handle ELS timeout. + * tgt_lock is used to sync compl path and timeout + * path. If els compl path is processing this IO, we + * have nothing to do here, just release the timer hold + */ + BNX2FC_IO_DBG(io_req, "ELS timed out\n"); + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &io_req->req_flags)) + goto done; + + /* Indicate the cb_func that this ELS is timed out */ + set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + } + break; + default: + printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", + cmd_type); + break; + } + +done: + /* release the cmd that was held when timer was set */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) +{ + /* Called with host lock held */ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* + * active_cmd_queue may have other command types as well, + * and during flush operation, we want to error back only + * scsi commands. + */ + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + return; + + BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + if (!sc_cmd) { + printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " + "IO(0x%x) already cleaned up\n", + io_req->xid); + return; + } + sc_cmd->result = err_code << 16; + + BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", + sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, + sc_cmd->allowed); + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); +} + +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, + u16 min_xid, u16 max_xid) +{ + struct bnx2fc_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct bnx2fc_cmd *io_req; + size_t len; + u32 mem_size; + u16 xid; + int i; + int num_ios; + size_t bd_tbl_sz; + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ + and max_xid 0x%x\n", min_xid, max_xid); + return NULL; + } + BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + len = (num_ios * (sizeof(struct bnx2fc_cmd *))); + len += sizeof(struct bnx2fc_cmd_mgr); + + cmgr = kzalloc(len, GFP_KERNEL); + if (!cmgr) { + printk(KERN_ERR PFX "failed to alloc cmgr\n"); + return NULL; + } + + cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * + num_possible_cpus(), GFP_KERNEL); + if (!cmgr->free_list) { + printk(KERN_ERR PFX "failed to alloc free_list\n"); + goto mem_err; + } + + cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * + num_possible_cpus(), GFP_KERNEL); + if (!cmgr->free_list_lock) { + printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + goto mem_err; + } + + cmgr->hba = hba; + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); + + for (i = 0; i < num_possible_cpus(); i++) { + INIT_LIST_HEAD(&cmgr->free_list[i]); + spin_lock_init(&cmgr->free_list_lock[i]); + } + + /* Pre-allocated pool of bnx2fc_cmds */ + xid = BNX2FC_MIN_XID; + for (i = 0; i < num_ios; i++) { + io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); + + if (!io_req) { + printk(KERN_ERR PFX "failed to alloc io_req\n"); + goto mem_err; + } + + INIT_LIST_HEAD(&io_req->link); + INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); + + io_req->xid = xid++; + if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) + printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", + io_req->xid); + list_add_tail(&io_req->link, + &cmgr->free_list[io_req->xid % num_possible_cpus()]); + io_req++; + } + + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ + mem_size = num_ios * sizeof(struct io_bdt *); + cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool) { + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); + goto mem_err; + } + + mem_size = sizeof(struct io_bdt); + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + printk(KERN_ERR PFX "failed to alloc " + "io_bdt_pool[%d]\n", i); + goto mem_err; + } + } + + /* Allocate an map fcoe_bdt_ctx structures */ + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + bd_tbl_sz, + &bdt_info->bd_tbl_dma, + GFP_KERNEL); + if (!bdt_info->bd_tbl) { + printk(KERN_ERR PFX "failed to alloc " + "bdt_tbl[%d]\n", i); + goto mem_err; + } + } + + return cmgr; + +mem_err: + bnx2fc_cmd_mgr_free(cmgr); + return NULL; +} + +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct bnx2fc_hba *hba = cmgr->hba; + size_t bd_tbl_sz; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = BNX2FC_MAX_XID; + int num_ios; + int i; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, + bdt_info->bd_tbl, + bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + kfree(cmgr->free_list_lock); + + /* Destroy cmd pool */ + if (!cmgr->free_list) + goto free_cmgr; + + for (i = 0; i < num_possible_cpus(); i++) { + struct list_head *list; + struct list_head *tmp; + + list_for_each_safe(list, tmp, &cmgr->free_list[i]) { + struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; + list_del(&io_req->link); + kfree(io_req); + } + } + kfree(cmgr->free_list); +free_cmgr: + /* Free command manager itself */ + kfree(cmgr); +} + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 max_sqes; + u16 xid; + + max_sqes = tgt->max_sqes; + switch (type) { + case BNX2FC_TASK_MGMT_CMD: + max_sqes = BNX2FC_TM_MAX_SQES; + break; + case BNX2FC_ELS: + max_sqes = BNX2FC_ELS_MAX_SQES; + break; + default: + break; + } + + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || + (tgt->num_active_ios.counter >= max_sqes)) { + BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " + "ios(%d):sqes(%d)\n", + tgt->num_active_ios.counter, tgt->max_sqes); + if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) + printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[smp_processor_id()].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + io_req->cmd_type = type; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} +static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 max_sqes; + u16 xid; + + max_sqes = BNX2FC_SCSI_MAX_SQES; + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || + (tgt->num_active_ios.counter >= max_sqes)) { + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[smp_processor_id()].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +void bnx2fc_cmd_release(struct kref *ref) +{ + struct bnx2fc_cmd *io_req = container_of(ref, + struct bnx2fc_cmd, refcount); + struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + bnx2fc_free_mp_resc(io_req); + cmd_mgr->cmds[io_req->xid] = NULL; + /* Delete IO from retire queue */ + list_del_init(&io_req->link); + /* Add it to the free list */ + list_add(&io_req->link, + &cmd_mgr->free_list[smp_processor_id()]); + atomic_dec(&io_req->tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); +} + +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_hba *hba = io_req->port->priv; + size_t sz = sizeof(struct fcoe_bd_ctx); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_req_bd, + mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_resp_bd, + mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->req_buf, + mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->resp_buf, + mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req; + struct fcoe_bd_ctx *mp_req_bd; + struct fcoe_bd_ctx *mp_resp_bd; + struct bnx2fc_hba *hba = io_req->port->priv; + dma_addr_t addr; + size_t sz; + + mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); + + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &mp_req->req_buf_dma, + GFP_ATOMIC); + if (!mp_req->req_buf) { + printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &mp_req->resp_buf_dma, + GFP_ATOMIC); + if (!mp_req->resp_buf) { + printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + memset(mp_req->req_buf, 0, PAGE_SIZE); + memset(mp_req->resp_buf, 0, PAGE_SIZE); + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_bd_ctx); + mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_req_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP req bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_resp_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_req_bd->buf_len = PAGE_SIZE; + mp_req_bd->flags = 0; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_resp_bd->buf_len = PAGE_SIZE; + mp_resp_bd->flags = 0; + + return SUCCESS; +} + +static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_lport *lport; + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fcoe_port *port; + struct bnx2fc_hba *hba; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + struct bnx2fc_mp_req *tm_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + int task_idx, index; + int rc = SUCCESS; + u16 xid; + u32 sid, did; + unsigned long start = jiffies; + + lport = shost_priv(host); + port = lport_priv(lport); + hba = port->priv; + + if (rport == NULL) { + printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "device_reset: link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); + rc = FAILED; + goto tmf_err; + } +retry_tmf: + io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); + if (!io_req) { + if (time_after(jiffies, start + HZ)) { + printk(KERN_ERR PFX "tmf: Failed TMF"); + rc = FAILED; + goto tmf_err; + } + msleep(20); + goto retry_tmf; + } + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->port = port; + io_req->tgt = tgt; + + tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + + rc = bnx2fc_init_mp_req(io_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + goto tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Fill FCP_CMND */ + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = tgt->sid; + did = rport->port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(io_req, task); + + sc_cmd->SCp.ptr = (char *)io_req; + + /* Obtain free SQ entry */ + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_tm_queue */ + io_req->on_tmf_queue = 1; + list_add_tail(&io_req->link, &tgt->active_tm_queue); + + init_completion(&io_req->tm_done); + io_req->wait_for_comp = 1; + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + + rc = wait_for_completion_timeout(&io_req->tm_done, + BNX2FC_TM_TIMEOUT * HZ); + spin_lock_bh(&tgt->tgt_lock); + + io_req->wait_for_comp = 0; + if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); + + spin_unlock_bh(&tgt->tgt_lock); + + if (!rc) { + printk(KERN_ERR PFX "task mgmt command failed...\n"); + rc = FAILED; + } else { + printk(KERN_ERR PFX "task mgmt command success...\n"); + rc = SUCCESS; + } +tmf_err: + return rc; +} + +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_rport *rport = tgt->rport; + struct fc_rport_priv *rdata = tgt->rdata; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct bnx2fc_cmd *abts_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fc_frame_header *fc_hdr; + struct bnx2fc_mp_req *abts_req; + int task_idx, index; + u32 sid, did; + u16 xid; + int rc = SUCCESS; + u32 r_a_tov = rdata->r_a_tov; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); + + port = io_req->port; + hba = port->priv; + lport = port->lport; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); + rc = FAILED; + goto abts_err; + } + + if (rport == NULL) { + printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); + rc = FAILED; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); + rc = FAILED; + goto abts_err; + } + + abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); + if (!abts_io_req) { + printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); + rc = FAILED; + goto abts_err; + } + + /* Initialize rest of io_req fields */ + abts_io_req->sc_cmd = NULL; + abts_io_req->port = port; + abts_io_req->tgt = tgt; + abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ + + abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); + memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); + + /* Fill FC header */ + fc_hdr = &(abts_req->req_fc_hdr); + + /* Obtain oxid and rxid for the original exchange to be aborted */ + fc_hdr->fh_ox_id = htons(io_req->xid); + fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id); + + sid = tgt->sid; + did = rport->port_id; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, + FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + xid = abts_io_req->xid; + BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(abts_io_req, task); + + /* + * ABTS task is a temporary task that will be cleaned up + * irrespective of ABTS response. We need to start the timer + * for the original exchange, as the CQE is posted for the original + * IO request. + * + * Timer for ABTS is started only when it is originated by a + * TM request. For the ABTS issued as part of ULP timeout, + * scsi-ml maintains the timers. + */ + + /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ + bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +abts_err: + return rc; +} + +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct bnx2fc_cmd *cleanup_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int task_idx, index; + u16 xid, orig_xid; + int rc = 0; + + /* ASSUMPTION: called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); + + port = io_req->port; + hba = port->priv; + lport = port->lport; + + cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); + if (!cleanup_io_req) { + printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); + rc = -1; + goto cleanup_err; + } + + /* Initialize rest of io_req fields */ + cleanup_io_req->sc_cmd = NULL; + cleanup_io_req->port = port; + cleanup_io_req->tgt = tgt; + cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = cleanup_io_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + orig_xid = io_req->xid; + + BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); + + bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +cleanup_err: + return rc; +} + +/** + * bnx2fc_eh_target_reset: Reset a target + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +/** + * bnx2fc_eh_device_reset - Reset a single LUN + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +/** + * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding + * SCSI command + * + * @sc_cmd: SCSI_ML command pointer + * + * SCSI abort request handler + */ +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_cmd *io_req; + struct fc_lport *lport; + struct bnx2fc_rport *tgt; + int rc = FAILED; + + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + lport = shost_priv(sc_cmd->device->host); + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + printk(KERN_ALERT PFX "eh_abort: link not ready\n"); + return rc; + } + + tgt = (struct bnx2fc_rport *)&rp[1]; + + BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); + + spin_lock_bh(&tgt->tgt_lock); + io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; + if (!io_req) { + /* Command might have just completed */ + printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", + io_req->refcount.refcount.counter); + + /* Hold IO request across abort processing */ + kref_get(&io_req->refcount); + + BUG_ON(tgt != io_req->tgt); + + /* Remove the io_req from the active_q. */ + /* + * Task Mgmt functions (LUN RESET & TGT RESET) will not + * issue an ABTS on this particular IO req, as the + * io_req is no longer in the active_q. + */ + if (tgt->flush_in_prog) { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "flush in progress\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + if (io_req->on_active_queue == 0) { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "not on active_q\n", io_req->xid); + /* + * This condition can happen only due to the FW bug, + * where we do not receive cleanup response from + * the FW. Handle this case gracefully by erroring + * back the IO request to SCSI-ml + */ + bnx2fc_scsi_done(io_req, DID_ABORT); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + /* + * Only eh_abort processing will remove the IO from + * active_cmd_q before processing the request. this is + * done to avoid race conditions between IOs aborted + * as part of task management completion and eh_abort + * processing + */ + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + + init_completion(&io_req->tm_done); + io_req->wait_for_comp = 1; + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + /* Cancel the current timer running on this io_req */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); + rc = bnx2fc_initiate_abts(io_req); + } else { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "already in abts processing\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + if (rc == FAILED) { + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; + } + spin_unlock_bh(&tgt->tgt_lock); + + wait_for_completion(&io_req->tm_done); + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_comp = 0; + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* Let the scsi-ml try to recover this command */ + printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", + io_req->xid); + rc = FAILED; + } else { + /* + * We come here even when there was a race condition + * between timeout and abts completion, and abts + * completion happens just in time. + */ + BNX2FC_IO_DBG(io_req, "abort succeeded\n"); + rc = SUCCESS; + bnx2fc_scsi_done(io_req, DID_ABORT); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + /* release the reference taken in eh_abort */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; +} + +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " + "refcnt = %d, cmd_type = %d\n", + io_req->refcount.refcount.counter, io_req->cmd_type); + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + u32 r_ctl; + u32 r_a_tov = FC_DEF_R_A_TOV; + u8 issue_rrq = 0; + struct bnx2fc_rport *tgt = io_req->tgt; + + BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" + "refcnt = %d, cmd_type = %d\n", + io_req->xid, + io_req->refcount.refcount.counter, io_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "Timer context finished processing" + " this io\n"); + return; + } + + /* Do not issue RRQ as this IO is already cleanedup */ + if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) + goto io_compl; + + /* + * For ABTS issued due to SCSI eh_abort_handler, timeout + * values are maintained by scsi-ml itself. Cancel timeout + * in case ABTS issued as part of task management function + * or due to FW error. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); + issue_rrq = 1; + break; + + case FC_RCTL_BA_RJT: + BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); + break; + default: + printk(KERN_ERR PFX "Unknown ABTS response\n"); + break; + } + + if (issue_rrq) { + BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); + set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + bnx2fc_cmd_timer_set(io_req, r_a_tov); + +io_compl: + if (io_req->wait_for_comp) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->tm_done); + } else { + /* + * We end up here when ABTS is issued as + * in asynchronous context, i.e., as part + * of task management completion, or + * when FW error is received or when the + * ABTS is issued when the IO is timed + * out. + */ + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +} + +static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + struct list_head *list; + struct list_head *tmp; + struct bnx2fc_cmd *cmd; + int tm_lun = sc_cmd->device->lun; + int rc = 0; + int lun; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); + cmd = (struct bnx2fc_cmd *)list; + lun = cmd->sc_cmd->device->lun; + if (lun == tm_lun) { + /* Initiate ABTS on this cmd */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); + /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldnt fail in this context */ + WARN_ON(rc != SUCCESS); + } else + printk(KERN_ERR PFX "lun_rst: abts already in" + " progress for this IO 0x%x\n", + cmd->xid); + } + } +} + +static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct list_head *list; + struct list_head *tmp; + struct bnx2fc_cmd *cmd; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); + cmd = (struct bnx2fc_cmd *)list; + /* Initiate ABTS */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldnt fail in this context */ + WARN_ON(rc != SUCCESS); + + } else + printk(KERN_ERR PFX "tgt_rst: abts already in progress" + " for this IO 0x%x\n", cmd->xid); + } +} + +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *tm_req; + struct fc_frame_header *fc_hdr; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u64 *hdr; + u64 *temp_hdr; + void *rsp_buf; + + /* Called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); + + if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); + else { + /* TM has already timed out and we got + * delayed completion. Ignore completion + * processing. + */ + return; + } + + tm_req = &(io_req->mp_req); + fc_hdr = &(tm_req->resp_fc_hdr); + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + + rsp_buf = tm_req->resp_buf; + + if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { + bnx2fc_parse_fcp_rsp(io_req, + (struct fcoe_fcp_rsp_payload *) + rsp_buf, num_rq); + if (io_req->fcp_rsp_code == 0) { + /* TM successful */ + if (tm_req->tm_flags & FCP_TMF_LUN_RESET) + bnx2fc_lun_reset_cmpl(io_req); + else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) + bnx2fc_tgt_reset_cmpl(io_req); + } + } else { + printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", + fc_hdr->fh_r_ctl); + } + if (!sc_cmd->SCp.ptr) { + printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); + return; + } + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + + default: + BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + + sc_cmd = io_req->sc_cmd; + io_req->sc_cmd = NULL; + + /* check if the io_req exists in tgt's tmf_q */ + if (io_req->on_tmf_queue) { + + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } else { + + printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); + return; + } + + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_comp) { + BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); + complete(&io_req->tm_done); + } +} + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len >= BNX2FC_BD_SPLIT_SZ) + frag_size = BNX2FC_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; + bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; + bd[bd_index + sg_frags].buf_len = (u16)frag_size; + bd[bd_index + sg_frags].flags = 0; + + addr += (u64) frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; + +} + +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr; + int i; + + sg_count = scsi_dma_map(sc); + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = sg_dma_address(sg); + if (sg_len > BNX2FC_MAX_BD_LEN) { + sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, + bd_count); + } else { + + sg_frags = 1; + bd[bd_count].buf_addr_lo = addr & 0xffffffff; + bd[bd_count].buf_addr_hi = addr >> 32; + bd[bd_count].buf_len = (u16)sg_len; + bd[bd_count].flags = 0; + } + bd_count += sg_frags; + byte_count += sg_len; + } + if (byte_count != scsi_bufflen(sc)) + printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " + "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), + io_req->xid); + return bd_count; +} + +static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) + bd_count = bnx2fc_map_sg(io_req); + else { + bd_count = 0; + bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; + bd[0].buf_len = bd[0].flags = 0; + } + io_req->bd_tbl->bd_valid = bd_count; +} + +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + + if (io_req->bd_tbl->bd_valid && sc) { + scsi_dma_unmap(sc); + io_req->bd_tbl->bd_valid = 0; + } +} + +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + char tag[2]; + + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + + int_to_scsilun(sc_cmd->device->lun, + (struct scsi_lun *) fcp_cmnd->fc_lun); + + + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + fcp_cmnd->fc_cmdref = 0; + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + + if (scsi_populate_tag_msg(sc_cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; + break; + case ORDERED_QUEUE_TAG: + fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; + break; + default: + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + break; + } + } else { + fcp_cmnd->fc_pri_ta = 0; + } +} + +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + u8 rsp_flags = fcp_rsp->fcp_flags.flags; + u32 rq_buff_len = 0; + int i; + unsigned char *rq_data; + unsigned char *dummy; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = + fcp_rsp->scsi_status_code; + + /* Fetch fcp_rsp_info and fcp_sns_info if available */ + if (num_rq) { + + /* + * We do not anticipate num_rq >1, as the linux defined + * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO + * 256 bytes of single rq buffer is good enough to hold this. + */ + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { + fcp_rsp_len = rq_buff_len + = fcp_rsp->fcp_rsp_len; + } + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { + fcp_sns_len = fcp_rsp->fcp_sns_len; + rq_buff_len += fcp_rsp->fcp_sns_len; + } + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + + if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { + /* Invalid sense sense length. */ + printk(KERN_ALERT PFX "invalid sns length %d\n", + rq_buff_len); + /* reset rq_buff_len */ + rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; + } + + rq_data = bnx2fc_get_next_rqe(tgt, 1); + + if (num_rq > 1) { + /* We do not need extra sense data */ + for (i = 1; i < num_rq; i++) + dummy = bnx2fc_get_next_rqe(tgt, 1); + } + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rq_data[3]; + printk(KERN_ERR PFX "fcp_rsp_code = %d\n", + io_req->fcp_rsp_code); + } + + /* fetch sense data */ + rq_data += fcp_rsp_len; + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + printk(KERN_ERR PFX "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); + + /* return RQ entries */ + for (i = 0; i < num_rq; i++) + bnx2fc_return_rqe(tgt, 1); + } +} + +/** + * bnx2fc_queuecommand - Queuecommand function of the scsi template + * + * @host: The Scsi_Host the command was issued to + * @sc_cmd: struct scsi_cmnd to be executed + * + * This is the IO strategy routine, called by SCSI-ML + **/ +int bnx2fc_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + int rc = 0; + int rval; + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + + io_req = bnx2fc_cmd_alloc(tgt); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + io_req->sc_cmd = sc_cmd; + + if (bnx2fc_post_io_req(tgt, io_req)) { + printk(KERN_ERR PFX "Unable to post io_req\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } +exit_qcmd: + return rc; +} + +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + struct fcoe_fcp_rsp_payload *fcp_rsp; + struct bnx2fc_rport *tgt = io_req->tgt; + struct scsi_cmnd *sc_cmd; + struct Scsi_Host *host; + + + /* scsi_cmd_cmpl is called with tgt lock held */ + + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + /* we will not receive ABTS response for this IO */ + BNX2FC_IO_DBG(io_req, "Timer context finished processing " + "this scsi cmd\n"); + } + + /* Cancel the timeout_work, as we received IO completion */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + sc_cmd = io_req->sc_cmd; + if (sc_cmd == NULL) { + printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); + return; + } + + /* Fetch fcp_rsp from task context and perform cmd completion */ + fcp_rsp = (struct fcoe_fcp_rsp_payload *) + &(task->cmn.general.rsp_info.fcp_rsp.payload); + + /* parse fcp_rsp and obtain sense data from RQ if available */ + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); + + host = sc_cmd->device->host; + if (!sc_cmd->SCp.ptr) { + printk(KERN_ERR PFX "SCp.ptr is NULL\n"); + return; + } + io_req->sc_cmd = NULL; + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } else { + /* This should not happen, but could have been pulled + * by bnx2fc_flush_active_ios(), or during a race + * between command abort and (late) completion. + */ + BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); + if (io_req->wait_for_comp) + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->tm_done); + } + + bnx2fc_unmap_sg_list(io_req); + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" + " fcp_resid = 0x%x\n", + io_req->cdb_status, io_req->fcp_resid); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct fc_lport *lport = port->lport; + struct fcoe_dev_stats *stats; + int task_idx, index; + u16 xid; + + /* Initialize rest of io_req fields */ + io_req->cmd_type = BNX2FC_SCSI_CMD; + io_req->port = port; + io_req->tgt = tgt; + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + sc_cmd->SCp.ptr = (char *)io_req; + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + io_req->io_req_flags = BNX2FC_READ; + stats->InputRequests++; + stats->InputBytes += io_req->data_xfer_len; + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + io_req->io_req_flags = BNX2FC_WRITE; + stats->OutputRequests++; + stats->OutputBytes += io_req->data_xfer_len; + } else { + io_req->io_req_flags = 0; + stats->ControlRequests++; + } + put_cpu(); + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + bnx2fc_build_bd_list_from_sg(io_req); + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_task(io_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EAGAIN; + } + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "Session not ready...post_io\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EAGAIN; + } + + /* Time IO req */ + bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_cmd_queue */ + + io_req->on_active_queue = 1; + /* move io_req from pending_queue to active_queue */ + list_add_tail(&io_req->link, &tgt->active_cmd_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + return 0; +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c new file mode 100644 index 000000000000..7ea93af60260 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -0,0 +1,844 @@ +/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static void bnx2fc_upld_timer(unsigned long data); +static void bnx2fc_ofld_timer(unsigned long data); +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata); +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); + +static void bnx2fc_upld_timer(unsigned long data) +{ + + struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; + + BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); + /* fake upload completion */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); +} + +static void bnx2fc_ofld_timer(unsigned long data) +{ + + struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; + + BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); + /* NOTE: This function should never be called, as + * offload should never timeout + */ + /* + * If the timer has expired, this session is dead + * Clear offloaded flag and logout of this device. + * Since OFFLOADED flag is cleared, this case + * will be considered as offload error and the + * port will be logged off, and conn_id, session + * resources are freed up in bnx2fc_offload_session + */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_offload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt, + struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_rport *rport = rdata->rport; + struct bnx2fc_hba *hba = port->priv; + int rval; + int i = 0; + + /* Initialize bnx2fc_rport */ + /* NOTE: tgt is already bzero'd */ + rval = bnx2fc_init_tgt(tgt, port, rdata); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate conn id for " + "port_id (%6x)\n", rport->port_id); + goto ofld_err; + } + + /* Allocate session resources */ + rval = bnx2fc_alloc_session_resc(hba, tgt); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate resources\n"); + goto ofld_err; + } + + /* + * Initialize FCoE session offload process. + * Upon completion of offload process add + * rport to list of rports + */ +retry_ofld: + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_ofld_req(port, tgt); + if (rval) { + printk(KERN_ERR PFX "ofld_req failed\n"); + goto ofld_err; + } + + /* + * wait for the session is offloaded and enabled. 3 Secs + * should be ample time for this process to complete. + */ + setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt); + mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->ofld_wait, + (test_bit( + BNX2FC_FLAG_OFLD_REQ_CMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->ofld_timer); + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, + &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " + "retry ofld..%d\n", i++); + msleep_interruptible(1000); + if (i > 3) { + i = 0; + goto ofld_err; + } + goto retry_ofld; + } + goto ofld_err; + } + if (bnx2fc_map_doorbell(tgt)) { + printk(KERN_ERR PFX "map doorbell failed - no mem\n"); + /* upload will take care of cleaning up sess resc */ + lport->tt.rport_logoff(rdata); + } + return; + +ofld_err: + /* couldn't offload the session. log off from this rport */ + BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); + lport->tt.rport_logoff(rdata); + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); + if (tgt->fcoe_conn_id != -1) + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); +} + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) +{ + struct bnx2fc_cmd *io_req; + struct list_head *list; + struct list_head *tmp; + int rc; + int i = 0; + BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", + tgt->num_active_ios.counter); + + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 1; + + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "cleaned up\n"); + complete(&io_req->tm_done); + } + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + } + + set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + + list_for_each_safe(list, tmp, &tgt->els_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + + BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + + list_for_each_safe(list, tmp, &tgt->io_retire_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + + BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + + BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); + i = 0; + spin_unlock_bh(&tgt->tgt_lock); + /* wait for active_ios to go to 0 */ + while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) + msleep(25); + if (tgt->num_active_ios.counter != 0) + printk(KERN_ERR PFX "CLEANUP on port 0x%x:" + " active_ios = %d\n", + tgt->rdata->ids.port_id, tgt->num_active_ios.counter); + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 0; + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_upload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_hba *hba = port->priv; + + BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", + tgt->num_active_ios.counter); + + /* + * Called with hba->hba_mutex held. + * This is a blocking call + */ + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_disable_req(port, tgt); + + /* + * wait for upload to complete. 3 Secs + * should be sufficient time for this process to complete. + */ + setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->upld_timer); + + /* + * traverse thru the active_q and tmf_q and cleanup + * IOs in these lists + */ + BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", + tgt->flags); + bnx2fc_flush_active_ios(tgt); + + /* Issue destroy KWQE */ + if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "send destroy req\n"); + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_destroy_req(hba, tgt); + + /* wait for destroy to complete */ + setup_timer(&tgt->upld_timer, + bnx2fc_upld_timer, (unsigned long)tgt); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + + if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) + printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); + + BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", + tgt->flags); + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->upld_timer); + + } else + printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" + " not sent to FW\n"); + + /* Free session resources */ + spin_lock_bh(&tgt->cq_lock); + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + spin_unlock_bh(&tgt->cq_lock); +} + +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata) +{ + + struct fc_rport *rport = rdata->rport; + struct bnx2fc_hba *hba = port->priv; + + tgt->rport = rport; + tgt->rdata = rdata; + tgt->port = port; + + if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { + BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); + tgt->fcoe_conn_id = -1; + return -1; + } + + tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); + if (tgt->fcoe_conn_id == -1) + return -1; + + BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); + + tgt->max_sqes = BNX2FC_SQ_WQES_MAX; + tgt->max_rqes = BNX2FC_RQ_WQES_MAX; + tgt->max_cqes = BNX2FC_CQ_WQES_MAX; + + /* Initialize the toggle bit */ + tgt->sq_curr_toggle_bit = 1; + tgt->cq_curr_toggle_bit = 1; + tgt->sq_prod_idx = 0; + tgt->cq_cons_idx = 0; + tgt->rq_prod_idx = 0x8000; + tgt->rq_cons_idx = 0; + atomic_set(&tgt->num_active_ios, 0); + + tgt->work_time_slice = 2; + + spin_lock_init(&tgt->tgt_lock); + spin_lock_init(&tgt->cq_lock); + + /* Initialize active_cmd_queue list */ + INIT_LIST_HEAD(&tgt->active_cmd_queue); + + /* Initialize IO retire queue */ + INIT_LIST_HEAD(&tgt->io_retire_queue); + + INIT_LIST_HEAD(&tgt->els_queue); + + /* Initialize active_tm_queue list */ + INIT_LIST_HEAD(&tgt->active_tm_queue); + + init_waitqueue_head(&tgt->ofld_wait); + init_waitqueue_head(&tgt->upld_wait); + + return 0; +} + +/** + * This event_callback is called after successful completion of libfc + * initiated target login. bnx2fc can proceed with initiating the session + * establishment. + */ +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct bnx2fc_rport *tgt; + u32 port_id; + + BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", + event, rdata->ids.port_id); + switch (event) { + case RPORT_EV_READY: + if (!rport) { + printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); + break; + } + + rp = rport->dd_data; + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * bnx2fc_rport structure doesnt exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + BNX2FC_HBA_DBG(lport, "not FCP type target." + " not offloading\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + BNX2FC_HBA_DBG(lport, "not FCP_TARGET" + " not offloading\n"); + break; + } + + /* + * Offlaod process is protected with hba mutex. + * Use the same mutex_lock for upload process too + */ + mutex_lock(&hba->hba_mutex); + tgt = (struct bnx2fc_rport *)&rp[1]; + + /* This can happen when ADISC finds the same target */ + if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "already offloaded\n"); + mutex_unlock(&hba->hba_mutex); + return; + } + + /* + * Offload the session. This is a blocking call, and will + * wait until the session is offloaded. + */ + bnx2fc_offload_session(port, tgt, rdata); + + BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + + if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { + /* + * Session is offloaded and enabled. Map + * doorbell register for this target + */ + BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); + /* This counter is protected with hba mutex */ + hba->num_ofld_sess++; + + set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + } else { + /* + * Offload or enable would have failed. + * In offload/enable completion path, the + * rport would have already been removed + */ + BNX2FC_TGT_DBG(tgt, "Port is being logged off as " + "offloaded flag not set\n"); + } + mutex_unlock(&hba->hba_mutex); + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", + port_id); + break; + } + rp = rport->dd_data; + mutex_lock(&hba->hba_mutex); + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + mutex_unlock(&hba->hba_mutex); + break; + } + clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + + bnx2fc_upload_session(port, tgt); + hba->num_ofld_sess--; + BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + /* + * Try to wake up the linkdown wait thread. If num_ofld_sess + * is 0, the waiting therad wakes up + */ + if ((hba->wait_for_link_down) && + (hba->num_ofld_sess == 0)) { + wake_up_interruptible(&hba->shutdown_wait); + } + if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) { + printk(KERN_ERR PFX "Relogin to the tgt\n"); + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_login(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + mutex_unlock(&hba->hba_mutex); + + break; + + case RPORT_EV_NONE: + break; + } +} + +/** + * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id + * + * @port: fcoe_port struct to lookup the target port on + * @port_id: The remote port ID to look up + */ +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id) +{ + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_rport *tgt; + struct fc_rport_priv *rdata; + int i; + + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if ((tgt) && (tgt->port == port)) { + rdata = tgt->rdata; + if (rdata->ids.port_id == port_id) { + if (rdata->rp_state != RPORT_ST_DELETE) { + BNX2FC_TGT_DBG(tgt, "rport " + "obtained\n"); + return tgt; + } else { + printk(KERN_ERR PFX "rport 0x%x " + "is in DELETED state\n", + rdata->ids.port_id); + return NULL; + } + } + } + } + return NULL; +} + + +/** + * bnx2fc_alloc_conn_id - allocates FCOE Connection id + * + * @hba: pointer to adapter structure + * @tgt: pointer to bnx2fc_rport structure + */ +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + u32 conn_id, next; + + /* called with hba mutex held */ + + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + next = hba->next_conn_id; + conn_id = hba->next_conn_id++; + if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) + hba->next_conn_id = 0; + + while (hba->tgt_ofld_list[conn_id] != NULL) { + conn_id++; + if (conn_id == BNX2FC_NUM_MAX_SESS) + conn_id = 0; + + if (conn_id == next) { + /* No free conn_ids are available */ + spin_unlock_bh(&hba->hba_lock); + return -1; + } + } + hba->tgt_ofld_list[conn_id] = tgt; + tgt->fcoe_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); + return conn_id; +} + +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) +{ + /* called with hba mutex held */ + spin_lock_bh(&hba->hba_lock); + hba->tgt_ofld_list[conn_id] = NULL; + hba->next_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); +} + +/** + *bnx2fc_alloc_session_resc - Allocate qp resources for the session + * + */ +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + dma_addr_t page; + int num_pages; + u32 *pbl; + + /* Allocate and map SQ */ + tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; + tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + &tgt->sq_dma, GFP_KERNEL); + if (!tgt->sq) { + printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", + tgt->sq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->sq, 0, tgt->sq_mem_size); + + /* Allocate and map CQ */ + tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; + tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + &tgt->cq_dma, GFP_KERNEL); + if (!tgt->cq) { + printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", + tgt->cq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->cq, 0, tgt->cq_mem_size); + + /* Allocate and map RQ and RQ PBL */ + tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; + tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + &tgt->rq_dma, GFP_KERNEL); + if (!tgt->rq) { + printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", + tgt->rq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->rq, 0, tgt->rq_mem_size); + + tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + &tgt->rq_pbl_dma, GFP_KERNEL); + if (!tgt->rq_pbl) { + printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", + tgt->rq_pbl_size); + goto mem_alloc_failure; + } + + memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); + num_pages = tgt->rq_mem_size / PAGE_SIZE; + page = tgt->rq_dma; + pbl = (u32 *)tgt->rq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += PAGE_SIZE; + } + + /* Allocate and map XFERQ */ + tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + &tgt->xferq_dma, GFP_KERNEL); + if (!tgt->xferq) { + printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", + tgt->xferq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->xferq, 0, tgt->xferq_mem_size); + + /* Allocate and map CONFQ & CONFQ PBL */ + tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; + tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + &tgt->confq_dma, GFP_KERNEL); + if (!tgt->confq) { + printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", + tgt->confq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->confq, 0, tgt->confq_mem_size); + + tgt->confq_pbl_size = + (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); + tgt->confq_pbl_size = + (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_pbl_size, + &tgt->confq_pbl_dma, GFP_KERNEL); + if (!tgt->confq_pbl) { + printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", + tgt->confq_pbl_size); + goto mem_alloc_failure; + } + + memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); + num_pages = tgt->confq_mem_size / PAGE_SIZE; + page = tgt->confq_dma; + pbl = (u32 *)tgt->confq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += PAGE_SIZE; + } + + /* Allocate and map ConnDB */ + tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); + + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, + tgt->conn_db_mem_size, + &tgt->conn_db_dma, GFP_KERNEL); + if (!tgt->conn_db) { + printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", + tgt->conn_db_mem_size); + goto mem_alloc_failure; + } + memset(tgt->conn_db, 0, tgt->conn_db_mem_size); + + + /* Allocate and map LCQ */ + tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + &tgt->lcq_dma, GFP_KERNEL); + + if (!tgt->lcq) { + printk(KERN_ALERT PFX "unable to allocate lcq %d\n", + tgt->lcq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->lcq, 0, tgt->lcq_mem_size); + + /* Arm CQ */ + tgt->conn_db->cq_arm.lo = -1; + tgt->conn_db->rq_prod = 0x8000; + + return 0; + +mem_alloc_failure: + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + return -ENOMEM; +} + +/** + * bnx2i_free_session_resc - free qp resources for the session + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + * + * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL + */ +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); + + if (tgt->ctx_base) { + iounmap(tgt->ctx_base); + tgt->ctx_base = NULL; + } + /* Free LCQ */ + if (tgt->lcq) { + dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + tgt->lcq, tgt->lcq_dma); + tgt->lcq = NULL; + } + /* Free connDB */ + if (tgt->conn_db) { + dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, + tgt->conn_db, tgt->conn_db_dma); + tgt->conn_db = NULL; + } + /* Free confq and confq pbl */ + if (tgt->confq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, + tgt->confq_pbl, tgt->confq_pbl_dma); + tgt->confq_pbl = NULL; + } + if (tgt->confq) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + tgt->confq, tgt->confq_dma); + tgt->confq = NULL; + } + /* Free XFERQ */ + if (tgt->xferq) { + dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + tgt->xferq, tgt->xferq_dma); + tgt->xferq = NULL; + } + /* Free RQ PBL and RQ */ + if (tgt->rq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + tgt->rq_pbl, tgt->rq_pbl_dma); + tgt->rq_pbl = NULL; + } + if (tgt->rq) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + tgt->rq, tgt->rq_dma); + tgt->rq = NULL; + } + /* Free CQ */ + if (tgt->cq) { + dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + tgt->cq, tgt->cq_dma); + tgt->cq = NULL; + } + /* Free SQ */ + if (tgt->sq) { + dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + tgt->sq, tgt->sq_dma); + tgt->sq = NULL; + } +} -- cgit v1.2.3 From 904f0bc482201fa86e75c330d79dfd11be494cf8 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 2 Mar 2011 15:52:51 -0800 Subject: [SCSI] target: Fix volume size misreporting for volumes > 2TB the target infrastructure fails to send the correct conventional size to READ_CAPACITY that force a retry with READ_CAPACITY_16, which reads the capacity for devices > 2TB. Fix by adding the correct return to trigger RC(16). Reported-by: Ben Jarvis Signed-off-by: Signed-off-by: Nicholas A. Bellinger Cc: stable@kernel.org Signed-off-by: James Bottomley --- drivers/target/target_core_cdb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 366080baf474..7f19c8b7b84c 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = SE_DEV(cmd); unsigned char *buf = cmd->t_task->t_task_buf; - u32 blocks = dev->transport->get_blocks(dev); + unsigned long long blocks_long = dev->transport->get_blocks(dev); + u32 blocks; + + if (blocks_long >= 0x00000000ffffffff) + blocks = 0xffffffff; + else + blocks = (u32)blocks_long; buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; -- cgit v1.2.3 From 9143a9612277abc6e4ddced2bc54a120734834c6 Mon Sep 17 00:00:00 2001 From: "scameron@beardog.cce.hp.com" Date: Mon, 7 Mar 2011 10:44:16 -0600 Subject: [SCSI] hpsa: fix incorrect PCI IDs and add two new ones (2nd try) My first attempt was botched, got the wrong PCI Device ID (used PCI_DEVICE_ID_HP_CISSE, should have been PCI_DEVICE_ID_HP_CISSF) Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c30591f84a06..09a529e34452 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -89,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -113,11 +115,13 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324a103C, "Smart Array P712m", &SA5_access}, {0x324b103C, "Smart Array P711m", &SA5_access}, - {0x3250103C, "Smart Array", &SA5_access}, - {0x3250113C, "Smart Array", &SA5_access}, - {0x3250123C, "Smart Array", &SA5_access}, - {0x3250133C, "Smart Array", &SA5_access}, - {0x3250143C, "Smart Array", &SA5_access}, + {0x3350103C, "Smart Array", &SA5_access}, + {0x3351103C, "Smart Array", &SA5_access}, + {0x3352103C, "Smart Array", &SA5_access}, + {0x3353103C, "Smart Array", &SA5_access}, + {0x3354103C, "Smart Array", &SA5_access}, + {0x3355103C, "Smart Array", &SA5_access}, + {0x3356103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; -- cgit v1.2.3 From 72f7d322fd60ce1a0579136dec7b26b0801ded4b Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 8 Mar 2011 02:03:59 -0500 Subject: [SCSI] Include protection operation in SCSI command trace When debugging DIF/DIX it is very helpful to be able to see which DIX operation is associated with the scsi_cmnd. Include the protection op in the SCSI command trace. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8d4ef8efa3cd..e531acfd98a2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1062,6 +1062,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, cmd->request = req; cmd->cmnd = req->cmd; + cmd->prot_op = SCSI_PROT_NORMAL; return cmd; } -- cgit v1.2.3 From c98a0eb0e90d1caa8a92913cd45462102cbd5eaf Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 8 Mar 2011 02:07:15 -0500 Subject: [SCSI] sd: Logical Block Provisioning update SBC3r26 contains many changes to the Logical Block Provisioning interfaces (formerly known as Thin Provisioning ditto). This patch implements support for both the old and new schemes using the same heuristic as before (whether the LBP VPD page is present). The new code also allows the provisioning mode (i.e. choice of command) to be overridden on a per-device basis via sysfs. Two additional modes are supported in this version: - WRITE SAME(10) with the UNMAP bit set - WRITE SAME(10) without the UNMAP bit set. This allows us to support devices that predate the TP/LBP enhancements in SBC3 and which work by way zero-detection Switching between modes has been consolidated in a helper function that also updates the block layer topology according to the limitations of the chosen command. I experimented with trying WRITE SAME(16) if UNMAP fails, WRITE SAME(10) if WRITE SAME(16) fails, etc. but found several devices that got cranky. So for now we'll disable discard if one of the commands fail. The user still has the option of selecting a different mode in sysfs. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 7 ++ drivers/scsi/sd.c | 229 +++++++++++++++++++++++++++++++++++++----------- drivers/scsi/sd.h | 25 ++++-- 3 files changed, 203 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e531acfd98a2..3829bf058aef 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -867,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) description = "Host Data Integrity Failure"; action = ACTION_FAIL; error = -EILSEQ; + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && + (cmd->cmnd[0] == UNMAP || + cmd->cmnd[0] == WRITE_SAME_16 || + cmd->cmnd[0] == WRITE_SAME)) { + description = "Discard failure"; + action = ACTION_FAIL; } else action = ACTION_FAIL; break; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e56730214c05..3be5db5d6343 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -96,6 +96,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #define SD_MINORS 0 #endif +static void sd_config_discard(struct scsi_disk *, unsigned int); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); @@ -294,7 +295,54 @@ sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning); + return snprintf(buf, 20, "%u\n", sdkp->lbpme); +} + +static const char *lbp_mode[] = { + [SD_LBP_FULL] = "full", + [SD_LBP_UNMAP] = "unmap", + [SD_LBP_WS16] = "writesame_16", + [SD_LBP_WS10] = "writesame_10", + [SD_LBP_ZERO] = "writesame_zero", + [SD_LBP_DISABLE] = "disabled", +}; + +static ssize_t +sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]); +} + +static ssize_t +sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20)) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20)) + sd_config_discard(sdkp, SD_LBP_WS16); + else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20)) + sd_config_discard(sdkp, SD_LBP_WS10); + else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20)) + sd_config_discard(sdkp, SD_LBP_ZERO); + else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20)) + sd_config_discard(sdkp, SD_LBP_DISABLE); + else + return -EINVAL; + + return count; } static struct device_attribute sd_disk_attrs[] = { @@ -309,6 +357,8 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), + __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, + sd_store_provisioning_mode), __ATTR_NULL, }; @@ -433,6 +483,49 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif) scsi_set_prot_type(scmd, dif); } +static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int max_blocks = 0; + + q->limits.discard_zeroes_data = sdkp->lbprz; + q->limits.discard_alignment = sdkp->unmap_alignment; + q->limits.discard_granularity = + max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); + + switch (mode) { + + case SD_LBP_DISABLE: + q->limits.max_discard_sectors = 0; + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + return; + + case SD_LBP_UNMAP: + max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); + break; + + case SD_LBP_WS16: + max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); + break; + + case SD_LBP_WS10: + max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + break; + + case SD_LBP_ZERO: + max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + q->limits.discard_zeroes_data = 1; + break; + } + + q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + + sdkp->provisioning_mode = mode; +} + /** * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device * @sdp: scsi device to operate one @@ -449,6 +542,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) unsigned int nr_sectors = bio_sectors(bio); unsigned int len; int ret; + char *buf; struct page *page; if (sdkp->device->sector_size == 4096) { @@ -464,8 +558,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) if (!page) return BLKPREP_DEFER; - if (sdkp->unmap) { - char *buf = page_address(page); + switch (sdkp->provisioning_mode) { + case SD_LBP_UNMAP: + buf = page_address(page); rq->cmd_len = 10; rq->cmd[0] = UNMAP; @@ -477,7 +572,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) put_unaligned_be32(nr_sectors, &buf[16]); len = 24; - } else { + break; + + case SD_LBP_WS16: rq->cmd_len = 16; rq->cmd[0] = WRITE_SAME_16; rq->cmd[1] = 0x8; /* UNMAP */ @@ -485,11 +582,29 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) put_unaligned_be32(nr_sectors, &rq->cmd[10]); len = sdkp->device->sector_size; + break; + + case SD_LBP_WS10: + case SD_LBP_ZERO: + rq->cmd_len = 10; + rq->cmd[0] = WRITE_SAME; + if (sdkp->provisioning_mode == SD_LBP_WS10) + rq->cmd[1] = 0x8; /* UNMAP */ + put_unaligned_be32(sector, &rq->cmd[2]); + put_unaligned_be16(nr_sectors, &rq->cmd[7]); + + len = sdkp->device->sector_size; + break; + + default: + goto out; } blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); rq->buffer = page_address(page); + +out: if (ret != BLKPREP_OK) { __free_page(page); rq->buffer = NULL; @@ -1251,12 +1366,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); int sense_valid = 0; int sense_deferred = 0; + unsigned char op = SCpnt->cmnd[0]; - if (SCpnt->request->cmd_flags & REQ_DISCARD) { - if (!result) - scsi_set_resid(SCpnt, 0); - return good_bytes; - } + if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) + scsi_set_resid(SCpnt, 0); if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); @@ -1295,10 +1408,17 @@ static int sd_done(struct scsi_cmnd *SCpnt) SCpnt->result = 0; memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); break; - case ABORTED_COMMAND: /* DIF: Target detected corruption */ - case ILLEGAL_REQUEST: /* DIX: Host detected corruption */ - if (sshdr.asc == 0x10) + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ + good_bytes = sd_completed_bytes(SCpnt); + break; + case ILLEGAL_REQUEST: + if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && + (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) + sd_config_discard(sdkp, SD_LBP_DISABLE); break; default: break; @@ -1596,17 +1716,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); - if (buffer[14] & 0x80) { /* TPE */ - struct request_queue *q = sdp->request_queue; + if (buffer[14] & 0x80) { /* LBPME */ + sdkp->lbpme = 1; - sdkp->thin_provisioning = 1; - q->limits.discard_granularity = sdkp->physical_block_size; - q->limits.max_discard_sectors = 0xffffffff; + if (buffer[14] & 0x40) /* LBPRZ */ + sdkp->lbprz = 1; - if (buffer[14] & 0x40) /* TPRZ */ - q->limits.discard_zeroes_data = 1; - - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + sd_config_discard(sdkp, SD_LBP_WS16); } sdkp->capacity = lba + 1; @@ -2091,7 +2207,6 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) */ static void sd_read_block_limits(struct scsi_disk *sdkp) { - struct request_queue *q = sdkp->disk->queue; unsigned int sector_sz = sdkp->device->sector_size; const int vpd_len = 64; unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); @@ -2106,39 +2221,46 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) blk_queue_io_opt(sdkp->disk->queue, get_unaligned_be32(&buffer[12]) * sector_sz); - /* Thin provisioning enabled and page length indicates TP support */ - if (sdkp->thin_provisioning && buffer[3] == 0x3c) { - unsigned int lba_count, desc_count, granularity; + if (buffer[3] == 0x3c) { + unsigned int lba_count, desc_count; - lba_count = get_unaligned_be32(&buffer[20]); - desc_count = get_unaligned_be32(&buffer[24]); - - if (lba_count && desc_count) { - if (sdkp->tpvpd && !sdkp->tpu) - sdkp->unmap = 0; - else - sdkp->unmap = 1; - } + sdkp->max_ws_blocks = + (u32) min_not_zero(get_unaligned_be64(&buffer[36]), + (u64)0xffffffff); - if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) { - sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \ - "enabled but neither TPU, nor TPWS are " \ - "set. Disabling discard!\n"); + if (!sdkp->lbpme) goto out; - } - if (lba_count) - q->limits.max_discard_sectors = - lba_count * sector_sz >> 9; + lba_count = get_unaligned_be32(&buffer[20]); + desc_count = get_unaligned_be32(&buffer[24]); - granularity = get_unaligned_be32(&buffer[28]); + if (lba_count && desc_count) + sdkp->max_unmap_blocks = lba_count; - if (granularity) - q->limits.discard_granularity = granularity * sector_sz; + sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); if (buffer[32] & 0x80) - q->limits.discard_alignment = + sdkp->unmap_alignment = get_unaligned_be32(&buffer[32]) & ~(1 << 31); + + if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ + + if (sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else + sd_config_discard(sdkp, SD_LBP_WS16); + + } else { /* LBP VPD page tells us what to use */ + + if (sdkp->lbpu && sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (sdkp->lbpws) + sd_config_discard(sdkp, SD_LBP_WS16); + else if (sdkp->lbpws10) + sd_config_discard(sdkp, SD_LBP_WS10); + else + sd_config_discard(sdkp, SD_LBP_DISABLE); + } } out: @@ -2172,15 +2294,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) } /** - * sd_read_thin_provisioning - Query thin provisioning VPD page + * sd_read_block_provisioning - Query provisioning VPD page * @disk: disk to query */ -static void sd_read_thin_provisioning(struct scsi_disk *sdkp) +static void sd_read_block_provisioning(struct scsi_disk *sdkp) { unsigned char *buffer; const int vpd_len = 8; - if (sdkp->thin_provisioning == 0) + if (sdkp->lbpme == 0) return; buffer = kmalloc(vpd_len, GFP_KERNEL); @@ -2188,9 +2310,10 @@ static void sd_read_thin_provisioning(struct scsi_disk *sdkp) if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) goto out; - sdkp->tpvpd = 1; - sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */ - sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ + sdkp->lbpvpd = 1; + sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ + sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ + sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ out: kfree(buffer); @@ -2247,7 +2370,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_capacity(sdkp, buffer); if (sd_try_extended_inquiry(sdp)) { - sd_read_thin_provisioning(sdkp); + sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index c9d8f6ca49e2..6ad798bfd52a 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -43,6 +43,15 @@ enum { SD_MEMPOOL_SIZE = 2, /* CDB pool size */ }; +enum { + SD_LBP_FULL = 0, /* Full logical block provisioning */ + SD_LBP_UNMAP, /* Use UNMAP command */ + SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ + SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */ + SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */ + SD_LBP_DISABLE, /* Discard disabled due to failed cmd */ +}; + struct scsi_disk { struct scsi_driver *driver; /* always &sd_template */ struct scsi_device *device; @@ -50,21 +59,27 @@ struct scsi_disk { struct gendisk *disk; atomic_t openers; sector_t capacity; /* size in 512-byte sectors */ + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; u32 index; unsigned int physical_block_size; u8 media_present; u8 write_prot; u8 protection_type;/* Data Integrity Field */ + u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ unsigned first_scan : 1; - unsigned thin_provisioning : 1; - unsigned unmap : 1; - unsigned tpws : 1; - unsigned tpu : 1; - unsigned tpvpd : 1; + unsigned lbpme : 1; + unsigned lbprz : 1; + unsigned lbpu : 1; + unsigned lbpws : 1; + unsigned lbpws10 : 1; + unsigned lbpvpd : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) -- cgit v1.2.3 From 5b94e23292dec213e5bb0240894b597d93744e2a Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 8 Mar 2011 02:08:11 -0500 Subject: [SCSI] scsi_debug: Logical Block Provisioning (SBC3r26) Update scsi_debug to support the Logical Block Provisioning commands and bits as defined in SBC3r26. The old tp* parameters have been transitioned to the new lbp* scheme found in the draft standard. The old tpu option to enable UNMAP is now called lbpu. tpws to signal support for WRITE SAME(16) with the UNMAP bit set is now lbpws. Support for WRITE SAME(10) with the UNMAP bit set is also available using the lpuws10 parameter. Limiting the maximum number of blocks per WRITE SAME command has been implemented and is available via the write_same_length module parameter. As part of the renaming process the parameter lists have been sorted alphabetically (request from Doug). Signed-off-by: Martin K. Petersen Acked-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 183 ++++++++++++++++++++++++++-------------------- 1 file changed, 105 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efed..6ba5dbb91096 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -89,32 +89,34 @@ static const char * scsi_debug_version_date = "20100324"; /* With these defaults, this driver will make 1 host with 1 target * (id 0) containing 1 logical unit (lun 0). That is 1 device. */ +#define DEF_ATO 1 #define DEF_DELAY 1 #define DEF_DEV_SIZE_MB 8 -#define DEF_EVERY_NTH 0 -#define DEF_NUM_PARTS 0 -#define DEF_OPTS 0 -#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ -#define DEF_PTYPE 0 +#define DEF_DIF 0 +#define DEF_DIX 0 #define DEF_D_SENSE 0 -#define DEF_NO_LUN_0 0 -#define DEF_VIRTUAL_GB 0 +#define DEF_EVERY_NTH 0 #define DEF_FAKE_RW 0 -#define DEF_VPD_USE_HOSTNO 1 -#define DEF_SECTOR_SIZE 512 -#define DEF_DIX 0 -#define DEF_DIF 0 #define DEF_GUARD 0 -#define DEF_ATO 1 -#define DEF_PHYSBLK_EXP 0 +#define DEF_LBPU 0 +#define DEF_LBPWS 0 +#define DEF_LBPWS10 0 #define DEF_LOWEST_ALIGNED 0 +#define DEF_NO_LUN_0 0 +#define DEF_NUM_PARTS 0 +#define DEF_OPTS 0 #define DEF_OPT_BLKS 64 +#define DEF_PHYSBLK_EXP 0 +#define DEF_PTYPE 0 +#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ +#define DEF_SECTOR_SIZE 512 +#define DEF_UNMAP_ALIGNMENT 0 +#define DEF_UNMAP_GRANULARITY 1 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF #define DEF_UNMAP_MAX_DESC 256 -#define DEF_UNMAP_GRANULARITY 1 -#define DEF_UNMAP_ALIGNMENT 0 -#define DEF_TPWS 0 -#define DEF_TPU 0 +#define DEF_VIRTUAL_GB 0 +#define DEF_VPD_USE_HOSTNO 1 +#define DEF_WRITESAME_LENGTH 0xFFFF /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 @@ -155,36 +157,38 @@ static const char * scsi_debug_version_date = "20100324"; #define SCSI_DEBUG_CANQUEUE 255 static int scsi_debug_add_host = DEF_NUM_HOST; +static int scsi_debug_ato = DEF_ATO; static int scsi_debug_delay = DEF_DELAY; static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; +static int scsi_debug_dif = DEF_DIF; +static int scsi_debug_dix = DEF_DIX; +static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_every_nth = DEF_EVERY_NTH; +static int scsi_debug_fake_rw = DEF_FAKE_RW; +static int scsi_debug_guard = DEF_GUARD; +static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; -static int scsi_debug_num_parts = DEF_NUM_PARTS; +static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; static int scsi_debug_no_uld = 0; +static int scsi_debug_num_parts = DEF_NUM_PARTS; static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ +static int scsi_debug_opt_blks = DEF_OPT_BLKS; static int scsi_debug_opts = DEF_OPTS; -static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; +static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ -static int scsi_debug_dsense = DEF_D_SENSE; -static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; +static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; +static int scsi_debug_sector_size = DEF_SECTOR_SIZE; static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; -static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; -static int scsi_debug_sector_size = DEF_SECTOR_SIZE; -static int scsi_debug_dix = DEF_DIX; -static int scsi_debug_dif = DEF_DIF; -static int scsi_debug_guard = DEF_GUARD; -static int scsi_debug_ato = DEF_ATO; -static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; -static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; -static int scsi_debug_opt_blks = DEF_OPT_BLKS; -static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; -static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; -static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int scsi_debug_lbpu = DEF_LBPU; +static unsigned int scsi_debug_lbpws = DEF_LBPWS; +static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; -static unsigned int scsi_debug_tpws = DEF_TPWS; -static unsigned int scsi_debug_tpu = DEF_TPU; +static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; static int scsi_debug_cmnd_count = 0; @@ -206,6 +210,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */ #define SCSI_DEBUG_MAX_CMD_LEN 32 +static unsigned int scsi_debug_lbp(void) +{ + return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10; +} + struct sdebug_dev_info { struct list_head dev_list; unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ @@ -727,7 +736,7 @@ static int inquiry_evpd_b0(unsigned char * arr) /* Optimal Transfer Length */ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); - if (scsi_debug_tpu) { + if (scsi_debug_lbpu) { /* Maximum Unmap LBA Count */ put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); @@ -744,7 +753,10 @@ static int inquiry_evpd_b0(unsigned char * arr) /* Optimal Unmap Granularity */ put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); - return 0x3c; /* Mandatory page length for thin provisioning */ + /* Maximum WRITE SAME Length */ + put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); + + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ return sizeof(vpdb0_data); } @@ -767,12 +779,15 @@ static int inquiry_evpd_b2(unsigned char *arr) memset(arr, 0, 0x8); arr[0] = 0; /* threshold exponent */ - if (scsi_debug_tpu) + if (scsi_debug_lbpu) arr[1] = 1 << 7; - if (scsi_debug_tpws) + if (scsi_debug_lbpws) arr[1] |= 1 << 6; + if (scsi_debug_lbpws10) + arr[1] |= 1 << 5; + return 0x8; } @@ -831,7 +846,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, arr[n++] = 0x89; /* ATA information */ arr[n++] = 0xb0; /* Block limits (SBC) */ arr[n++] = 0xb1; /* Block characteristics (SBC) */ - arr[n++] = 0xb2; /* Thin provisioning (SBC) */ + if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ + arr[n++] = 0xb2; arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ @@ -879,7 +895,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b1(&arr[4]); - } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */ + } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b2(&arr[4]); } else { @@ -1053,8 +1069,8 @@ static int resp_readcap16(struct scsi_cmnd * scp, arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; - if (scsi_debug_tpu || scsi_debug_tpws) - arr[14] |= 0x80; /* TPE */ + if (scsi_debug_lbp()) + arr[14] |= 0x80; /* LBPME */ arr[15] = scsi_debug_lowest_aligned & 0xff; @@ -2084,6 +2100,12 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, if (ret) return ret; + if (num > scsi_debug_write_same_length) { + mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, + 0); + return check_condition_result; + } + write_lock_irqsave(&atomic_rw, iflags); if (unmap && scsi_debug_unmap_granularity) { @@ -2695,37 +2717,40 @@ static int schedule_resp(struct scsi_cmnd * cmnd, /sys/bus/pseudo/drivers/scsi_debug directory is changed. */ module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); +module_param_named(ato, scsi_debug_ato, int, S_IRUGO); module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); +module_param_named(dif, scsi_debug_dif, int, S_IRUGO); +module_param_named(dix, scsi_debug_dix, int, S_IRUGO); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); +module_param_named(guard, scsi_debug_guard, int, S_IRUGO); +module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); +module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); +module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); +module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); +module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); +module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); -module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); -module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, - S_IRUGO | S_IWUSR); module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); -module_param_named(dix, scsi_debug_dix, int, S_IRUGO); -module_param_named(dif, scsi_debug_dif, int, S_IRUGO); -module_param_named(guard, scsi_debug_guard, int, S_IRUGO); -module_param_named(ato, scsi_debug_ato, int, S_IRUGO); -module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); -module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); -module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); +module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); +module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); -module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); -module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); -module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO); -module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO); +module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); +module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, + S_IRUGO | S_IWUSR); +module_param_named(write_same_length, scsi_debug_write_same_length, int, + S_IRUGO | S_IWUSR); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); @@ -2733,36 +2758,38 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(SCSI_DEBUG_VERSION); MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); +MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); +MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); +MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); +MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); +MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); +MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); +MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); +MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); -MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); -MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); -MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); -MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); -MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); -MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); -MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); -MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); -MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); +MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); -MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); -MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); -MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)"); -MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); +MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); +MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); static char sdebug_info[256]; @@ -3150,7 +3177,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) { ssize_t count; - if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) + if (!scsi_debug_lbp()) return scnprintf(buf, PAGE_SIZE, "0-%u\n", sdebug_store_sectors); @@ -3333,8 +3360,8 @@ static int __init scsi_debug_init(void) memset(dif_storep, 0xff, dif_size); } - /* Thin Provisioning */ - if (scsi_debug_tpu || scsi_debug_tpws) { + /* Logical Block Provisioning */ + if (scsi_debug_lbp()) { unsigned int map_bytes; scsi_debug_unmap_max_blocks = @@ -3664,7 +3691,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) errsts = resp_readcap16(SCpnt, devip); else if (cmd[1] == SAI_GET_LBA_STATUS) { - if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) { + if (scsi_debug_lbp() == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; @@ -3775,8 +3802,10 @@ write: } break; case WRITE_SAME_16: + case WRITE_SAME: if (cmd[1] & 0x8) { - if (scsi_debug_tpws == 0) { + if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || + (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); errsts = check_condition_result; @@ -3785,8 +3814,6 @@ write: } if (errsts) break; - /* fall through */ - case WRITE_SAME: errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; @@ -3798,7 +3825,7 @@ write: if (errsts) break; - if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) { + if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; -- cgit v1.2.3 From 3f5eac3a040a2ea61a575f713aabedecdd23c3f8 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Wed, 9 Mar 2011 17:00:01 -0600 Subject: [SCSI] hpsa: move device attributes to avoid forward declarations Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 253 +++++++++++++++++++++++++--------------------------- 1 file changed, 120 insertions(+), 133 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 09a529e34452..dcabef4bb149 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -155,21 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); -static ssize_t raid_level_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t lunid_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t unique_id_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t host_show_firmware_revision(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t host_show_commands_outstanding(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t host_show_transport_mode(struct device *dev, - struct device_attribute *attr, char *buf); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); -static ssize_t host_store_rescan(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count); static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c); static void check_ioctl_unit_attention(struct ctlr_info *h, @@ -190,53 +176,6 @@ static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, #define BOARD_NOT_READY 0 #define BOARD_READY 1 -static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); -static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); -static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); -static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); -static DEVICE_ATTR(firmware_revision, S_IRUGO, - host_show_firmware_revision, NULL); -static DEVICE_ATTR(commands_outstanding, S_IRUGO, - host_show_commands_outstanding, NULL); -static DEVICE_ATTR(transport_mode, S_IRUGO, - host_show_transport_mode, NULL); - -static struct device_attribute *hpsa_sdev_attrs[] = { - &dev_attr_raid_level, - &dev_attr_lunid, - &dev_attr_unique_id, - NULL, -}; - -static struct device_attribute *hpsa_shost_attrs[] = { - &dev_attr_rescan, - &dev_attr_firmware_revision, - &dev_attr_commands_outstanding, - &dev_attr_transport_mode, - NULL, -}; - -static struct scsi_host_template hpsa_driver_template = { - .module = THIS_MODULE, - .name = "hpsa", - .proc_name = "hpsa", - .queuecommand = hpsa_scsi_queue_command, - .scan_start = hpsa_scan_start, - .scan_finished = hpsa_scan_finished, - .change_queue_depth = hpsa_change_queue_depth, - .this_id = -1, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = hpsa_eh_device_reset_handler, - .ioctl = hpsa_ioctl, - .slave_alloc = hpsa_slave_alloc, - .slave_destroy = hpsa_slave_destroy, -#ifdef CONFIG_COMPAT - .compat_ioctl = hpsa_compat_ioctl, -#endif - .sdev_attrs = hpsa_sdev_attrs, - .shost_attrs = hpsa_shost_attrs, -}; - static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { unsigned long *priv = shost_priv(sdev->host); @@ -334,83 +273,11 @@ static ssize_t host_show_transport_mode(struct device *dev, "performant" : "simple"); } -/* Enqueuing and dequeuing functions for cmdlists. */ -static inline void addQ(struct list_head *list, struct CommandList *c) -{ - list_add_tail(&c->list, list); -} - -static inline u32 next_command(struct ctlr_info *h) -{ - u32 a; - - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) - return h->access.command_completed(h); - - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ - (h->reply_pool_head)++; - h->commands_outstanding--; - } else { - a = FIFO_EMPTY; - } - /* Check for wraparound */ - if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { - h->reply_pool_head = h->reply_pool; - h->reply_pool_wraparound ^= 1; - } - return a; -} - -/* set_performant_mode: Modify the tag for cciss performant - * set bit 0 for pull model, bits 3-1 for block fetch - * register number - */ -static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) -{ - if (likely(h->transMethod & CFGTBL_Trans_Performant)) - c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); -} - -static void enqueue_cmd_and_start_io(struct ctlr_info *h, - struct CommandList *c) -{ - unsigned long flags; - - set_performant_mode(h, c); - spin_lock_irqsave(&h->lock, flags); - addQ(&h->reqQ, c); - h->Qdepth++; - start_io(h); - spin_unlock_irqrestore(&h->lock, flags); -} - -static inline void removeQ(struct CommandList *c) -{ - if (WARN_ON(list_empty(&c->list))) - return; - list_del_init(&c->list); -} - -static inline int is_hba_lunid(unsigned char scsi3addr[]) -{ - return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; -} - static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) { return (scsi3addr[3] & 0xC0) == 0x40; } -static inline int is_scsi_rev_5(struct ctlr_info *h) -{ - if (!h->hba_inquiry_data) - return 0; - if ((h->hba_inquiry_data[2] & 0x07) == 5) - return 1; - return 0; -} - static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; @@ -502,6 +369,126 @@ static ssize_t unique_id_show(struct device *dev, sn[12], sn[13], sn[14], sn[15]); } +static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); +static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); +static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); +static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(firmware_revision, S_IRUGO, + host_show_firmware_revision, NULL); +static DEVICE_ATTR(commands_outstanding, S_IRUGO, + host_show_commands_outstanding, NULL); +static DEVICE_ATTR(transport_mode, S_IRUGO, + host_show_transport_mode, NULL); + +static struct device_attribute *hpsa_sdev_attrs[] = { + &dev_attr_raid_level, + &dev_attr_lunid, + &dev_attr_unique_id, + NULL, +}; + +static struct device_attribute *hpsa_shost_attrs[] = { + &dev_attr_rescan, + &dev_attr_firmware_revision, + &dev_attr_commands_outstanding, + &dev_attr_transport_mode, + NULL, +}; + +static struct scsi_host_template hpsa_driver_template = { + .module = THIS_MODULE, + .name = "hpsa", + .proc_name = "hpsa", + .queuecommand = hpsa_scsi_queue_command, + .scan_start = hpsa_scan_start, + .scan_finished = hpsa_scan_finished, + .change_queue_depth = hpsa_change_queue_depth, + .this_id = -1, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = hpsa_eh_device_reset_handler, + .ioctl = hpsa_ioctl, + .slave_alloc = hpsa_slave_alloc, + .slave_destroy = hpsa_slave_destroy, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpsa_compat_ioctl, +#endif + .sdev_attrs = hpsa_sdev_attrs, + .shost_attrs = hpsa_shost_attrs, +}; + + +/* Enqueuing and dequeuing functions for cmdlists. */ +static inline void addQ(struct list_head *list, struct CommandList *c) +{ + list_add_tail(&c->list, list); +} + +static inline u32 next_command(struct ctlr_info *h) +{ + u32 a; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) + return h->access.command_completed(h); + + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ + (h->reply_pool_head)++; + h->commands_outstanding--; + } else { + a = FIFO_EMPTY; + } + /* Check for wraparound */ + if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { + h->reply_pool_head = h->reply_pool; + h->reply_pool_wraparound ^= 1; + } + return a; +} + +/* set_performant_mode: Modify the tag for cciss performant + * set bit 0 for pull model, bits 3-1 for block fetch + * register number + */ +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) +{ + if (likely(h->transMethod & CFGTBL_Trans_Performant)) + c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); +} + +static void enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c) +{ + unsigned long flags; + + set_performant_mode(h, c); + spin_lock_irqsave(&h->lock, flags); + addQ(&h->reqQ, c); + h->Qdepth++; + start_io(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +static inline void removeQ(struct CommandList *c) +{ + if (WARN_ON(list_empty(&c->list))) + return; + list_del_init(&c->list); +} + +static inline int is_hba_lunid(unsigned char scsi3addr[]) +{ + return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; +} + +static inline int is_scsi_rev_5(struct ctlr_info *h) +{ + if (!h->hba_inquiry_data) + return 0; + if ((h->hba_inquiry_data[2] & 0x07) == 5) + return 1; + return 0; +} + static int hpsa_find_target_lun(struct ctlr_info *h, unsigned char scsi3addr[], int bus, int *target, int *lun) { -- cgit v1.2.3 From 941b1cdae83039c99fc5c1884a98d2afd39760e5 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Wed, 9 Mar 2011 17:00:06 -0600 Subject: [SCSI] hpsa: export resettable host attribute This attribute, requested by Redhat, allows kexec-tools to know whether the controller can honor the reset_devices kernel parameter and actually reset the controller. For kdump to work properly it is necessary that the reset_devices parameter be honored. This attribute enables kexec-tools to warn the user if they attempt to designate a non-resettable controller as the dump device. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index dcabef4bb149..415ad4fb50d4 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -273,6 +273,44 @@ static ssize_t host_show_transport_mode(struct device *dev, "performant" : "simple"); } +/* List of controllers which cannot be reset on kexec with reset_devices */ +static u32 unresettable_controller[] = { + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ + 0x3223103C, /* Smart Array P800 */ + 0x3234103C, /* Smart Array P400 */ + 0x3235103C, /* Smart Array P400i */ + 0x3211103C, /* Smart Array E200i */ + 0x3212103C, /* Smart Array E200 */ + 0x3213103C, /* Smart Array E200i */ + 0x3214103C, /* Smart Array E200i */ + 0x3215103C, /* Smart Array E200i */ + 0x3237103C, /* Smart Array E500 */ + 0x323D103C, /* Smart Array P700m */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; + +static int ctlr_is_resettable(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) + if (unresettable_controller[i] == h->board_id) + return 0; + return 1; +} + +static ssize_t host_show_resettable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); +} + static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) { return (scsi3addr[3] & 0xC0) == 0x40; @@ -379,6 +417,8 @@ static DEVICE_ATTR(commands_outstanding, S_IRUGO, host_show_commands_outstanding, NULL); static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); +static DEVICE_ATTR(resettable, S_IRUGO, + host_show_resettable, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, @@ -392,6 +432,7 @@ static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_firmware_revision, &dev_attr_commands_outstanding, &dev_attr_transport_mode, + &dev_attr_resettable, NULL, }; -- cgit v1.2.3 From a82058a730c2bd01c43beb8a4847526a2998cc1a Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 10 Mar 2011 17:13:18 -0600 Subject: [SCSI] libsas: fix ata list corruption issue I think this stems from a misunderstanding of how the ata error handler works. ata_scsi_cmd_error_handler() gets called with a passed in list of commands to handle. However, that list may still not be empty when it exits. The command ata_scsi_port_error_handler() must be called (which takes no list) before the list will be completely emptied. This bites the sas error handler because the two are called from different functions and the original list has gone out of scope before ata_scsi_port_error_handler() is called. leading to some commands dangling on bare stack, which is a potential memory corruption issue. Fix this by manually deleting all outstanding commands from the on-stack list before it goes out of scope. Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 16c5094bc86c..3356bf36cd98 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -802,6 +802,19 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, if (!list_empty(&sata_q)) { ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n"); ata_scsi_cmd_error_handler(shost, ap, &sata_q); + /* + * ata's error handler may leave the cmd on the list + * so make sure they don't remain on a stack list + * about to go out of scope. + * + * This looks strange, since the commands are + * now part of no list, but the next error + * action will be ata_port_error_handler() + * which takes no list and sweeps them up + * anyway from the ata tag array. + */ + while (!list_empty(&sata_q)) + list_del_init(sata_q.next); } } while (ap); -- cgit v1.2.3 From 32f7ef73585a8773914661e1a8e477e7a0bfa8b4 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Fri, 11 Mar 2011 10:43:35 -0500 Subject: [SCSI] scsi_debug: add consecutive medium errors A useful test case for error recovery is multiple, consecutive medium errors. When scsi_debug is started with "opts=2" a MEDIUM ERROR is generated when block 0x1234 (4660) is read. The patch extends that to 10 consecutive blocks from 0x1234 (i.e. blocks 4660 to 4669 inclusive). [0:0:0:0] disk ATA INTEL SSD 2CV1 /dev/sda /dev/sg0 80.0GB [10:0:0:0] disk Linux scsi_debug 0004 /dev/sdb /dev/sg1 1.09TB Output file not specified so no copy, just reading input >> unrecovered read error at blk=4660, substitute zeros ... >> unrecovered read error at blk=4669, substitute zeros 4670+10 records in 0+0 records out 10 unrecovered read errors lowest unrecovered read lba=4660, highest unrecovered lba=4669 time to read data: 0.047943 secs at 49.87 MB/sec BTW Change /dev/sg1 (bsg device works just as well) to /dev/sdb to see why, with faulty media, you do not want to use the block layer interface. Reason: time block layer takes to do useless retries and collateral damage to data in its 4 KB blocks (O_DIRECT mitigates the latter). ChangeLog: - extend opts=2 medium error generation at block 0x1234 to 10 consecutive blocks (i.e. blocks 0x1234 to 0x123d). Signed-off-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 6ba5dbb91096..1829648982be 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -146,6 +146,7 @@ static const char * scsi_debug_version_date = "20100324"; /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ +#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) * or "peripheral device" addressing (value 0) */ @@ -1807,15 +1808,15 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, return ret; if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && - (lba <= OPT_MEDIUM_ERR_ADDR) && + (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { /* claim unrecoverable read error */ - mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, - 0); + mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); /* set info field and valid bit for fixed descriptor */ if (0x70 == (devip->sense_buff[0] & 0x7f)) { devip->sense_buff[0] |= 0x80; /* Valid bit */ - ret = OPT_MEDIUM_ERR_ADDR; + ret = (lba < OPT_MEDIUM_ERR_ADDR) + ? OPT_MEDIUM_ERR_ADDR : (int)lba; devip->sense_buff[3] = (ret >> 24) & 0xff; devip->sense_buff[4] = (ret >> 16) & 0xff; devip->sense_buff[5] = (ret >> 8) & 0xff; -- cgit v1.2.3 From bc898c97f7ba24def788d9f80786cf028a197122 Mon Sep 17 00:00:00 2001 From: "Yanqing_Liu@Dell.com" Date: Fri, 11 Mar 2011 09:47:11 -0600 Subject: [SCSI] scsi_dh_rdac: Add MD36xxf into device list This patch is to add Dell MD36xxf array into the RDAC handler device list. Singed-off-by: Yanqing Liu Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index f468b05cfaac..293c183dfe6d 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -767,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"DELL", "MD32xx"}, {"DELL", "MD32xxi"}, {"DELL", "MD36xxi"}, + {"DELL", "MD36xxf"}, {"LSI", "INF-01-00"}, {"ENGENIO", "INF-01-00"}, {"STK", "FLEXLINE 380"}, -- cgit v1.2.3