/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "isci.h" #include "scic_io_request.h" #include "scic_remote_device.h" #include "scic_port.h" #include "port.h" #include "request.h" #include "host.h" /** * isci_isr() - This function is the interrupt service routine for the * controller. It schedules the tasklet and returns. * @vec: This parameter specifies the interrupt vector. * @data: This parameter specifies the ISCI host object. * * IRQ_HANDLED if out interrupt otherwise, IRQ_NONE */ irqreturn_t isci_isr(int vec, void *data) { struct isci_host *isci_host = (struct isci_host *)data; struct scic_controller_handler_methods *handlers = &isci_host->scic_irq_handlers[SCI_MSIX_NORMAL_VECTOR]; irqreturn_t ret = IRQ_NONE; if (isci_host_get_state(isci_host) != isci_starting && handlers->interrupt_handler) { if (handlers->interrupt_handler(isci_host->core_controller)) { if (isci_host_get_state(isci_host) != isci_stopped) { tasklet_schedule( &isci_host->completion_tasklet); } else dev_dbg(&isci_host->pdev->dev, "%s: controller stopped\n", __func__); ret = IRQ_HANDLED; } } else dev_warn(&isci_host->pdev->dev, "%s: get_handler_methods failed, " "isci_host->status = 0x%x\n", __func__, isci_host_get_state(isci_host)); return ret; } irqreturn_t isci_legacy_isr(int vec, void *data) { struct pci_dev *pdev = data; struct isci_host *isci_host; struct scic_controller_handler_methods *handlers; irqreturn_t ret = IRQ_NONE; /* * Since this is a legacy interrupt, either or both * controllers could have triggered it. Thus, we have to call * the legacy interrupt handler for all controllers on the * PCI function. */ for_each_isci_host(isci_host, pdev) { handlers = &isci_host->scic_irq_handlers[SCI_MSIX_NORMAL_VECTOR]; if (isci_host_get_state(isci_host) != isci_starting && handlers->interrupt_handler) { if (handlers->interrupt_handler(isci_host->core_controller)) { if (isci_host_get_state(isci_host) != isci_stopped) { tasklet_schedule( &isci_host->completion_tasklet); } else dev_dbg(&isci_host->pdev->dev, "%s: controller stopped\n", __func__); ret = IRQ_HANDLED; } } else dev_warn(&isci_host->pdev->dev, "%s: get_handler_methods failed, " "isci_host->status = 0x%x\n", __func__, isci_host_get_state(isci_host)); } return ret; } /** * isci_host_start_complete() - This function is called by the core library, * through the ISCI Module, to indicate controller start status. * @isci_host: This parameter specifies the ISCI host object * @completion_status: This parameter specifies the completion status from the * core library. * */ void isci_host_start_complete( struct isci_host *isci_host, enum sci_status completion_status) { if (completion_status == SCI_SUCCESS) { dev_dbg(&isci_host->pdev->dev, "%s: completion_status: SCI_SUCCESS\n", __func__); isci_host_change_state(isci_host, isci_ready); complete_all(&isci_host->start_complete); } else dev_err(&isci_host->pdev->dev, "controller start failed with " "completion_status = 0x%x;", completion_status); } /** * isci_host_scan_finished() - This function is one of the SCSI Host Template * functions. The SCSI midlayer calls this function during a target scan, * approx. once every 10 millisecs. * @shost: This parameter specifies the SCSI host being scanned * @time: This parameter specifies the number of ticks since the scan started. * * scan status, zero indicates the SCSI midlayer should continue to poll, * otherwise assume controller is ready. */ int isci_host_scan_finished( struct Scsi_Host *shost, unsigned long time) { struct isci_host *isci_host = isci_host_from_sas_ha(SHOST_TO_SAS_HA(shost)); struct scic_controller_handler_methods *handlers = &isci_host->scic_irq_handlers[SCI_MSIX_NORMAL_VECTOR]; if (handlers->interrupt_handler == NULL) { dev_err(&isci_host->pdev->dev, "%s: scic_controller_get_handler_methods failed\n", __func__); return 1; } /** * check interrupt_handler's status and call completion_handler if true, * link_up events should be coming from the scu core lib, as phy's come * online. for each link_up from the core, call * get_received_identify_address_frame, copy the frame into the * sas_phy object and call libsas notify_port_event(PORTE_BYTES_DMAED). * continue to return zero from thee scan_finished routine until * the scic_cb_controller_start_complete() call comes from the core. **/ if (handlers->interrupt_handler(isci_host->core_controller)) handlers->completion_handler(isci_host->core_controller); if (isci_starting == isci_host_get_state(isci_host) && time < (HZ * 10)) { dev_dbg(&isci_host->pdev->dev, "%s: isci_host->status = %d, time = %ld\n", __func__, isci_host_get_state(isci_host), time); return 0; } dev_dbg(&isci_host->pdev->dev, "%s: isci_host->status = %d, time = %ld\n", __func__, isci_host_get_state(isci_host), time); scic_controller_enable_interrupts(isci_host->core_controller); return 1; } /** * isci_host_scan_start() - This function is one of the SCSI Host Template * function, called by the SCSI mid layer berfore a target scan begins. The * core library controller start routine is called from here. * @shost: This parameter specifies the SCSI host to be scanned * */ void isci_host_scan_start(struct Scsi_Host *shost) { struct isci_host *isci_host; isci_host = isci_host_from_sas_ha(SHOST_TO_SAS_HA(shost)); isci_host_change_state(isci_host, isci_starting); scic_controller_disable_interrupts(isci_host->core_controller); init_completion(&isci_host->start_complete); scic_controller_start( isci_host->core_controller, scic_controller_get_suggested_start_timeout( isci_host->core_controller) ); } void isci_host_stop_complete( struct isci_host *isci_host, enum sci_status completion_status) { isci_host_change_state(isci_host, isci_stopped); scic_controller_disable_interrupts( isci_host->core_controller ); complete(&isci_host->stop_complete); } static struct coherent_memory_info *isci_host_alloc_mdl_struct( struct isci_host *isci_host, u32 size) { struct coherent_memory_info *mdl_struct; void *uncached_address = NULL; mdl_struct = devm_kzalloc(&isci_host->pdev->dev, sizeof(*mdl_struct), GFP_KERNEL); if (!mdl_struct) return NULL; INIT_LIST_HEAD(&mdl_struct->node); uncached_address = dmam_alloc_coherent(&isci_host->pdev->dev, size, &mdl_struct->dma_handle, GFP_KERNEL); if (!uncached_address) return NULL; /* memset the whole memory area. */ memset((char *)uncached_address, 0, size); mdl_struct->vaddr = uncached_address; mdl_struct->size = (size_t)size; return mdl_struct; } static void isci_host_build_mde( struct sci_physical_memory_descriptor *mde_struct, struct coherent_memory_info *mdl_struct) { unsigned long address = 0; dma_addr_t dma_addr = 0; address = (unsigned long)mdl_struct->vaddr; dma_addr = mdl_struct->dma_handle; /* to satisfy the alignment. */ if ((address % mde_struct->constant_memory_alignment) != 0) { int align_offset = (mde_struct->constant_memory_alignment - (address % mde_struct->constant_memory_alignment)); address += align_offset; dma_addr += align_offset; } mde_struct->virtual_address = (void *)address; mde_struct->physical_address = dma_addr; mdl_struct->mde = mde_struct; } static int isci_host_mdl_allocate_coherent( struct isci_host *isci_host) { struct sci_physical_memory_descriptor *current_mde; struct coherent_memory_info *mdl_struct; u32 size = 0; struct sci_base_memory_descriptor_list *mdl_handle = sci_controller_get_memory_descriptor_list_handle( isci_host->core_controller); sci_mdl_first_entry(mdl_handle); current_mde = sci_mdl_get_current_entry(mdl_handle); while (current_mde != NULL) { size = (current_mde->constant_memory_size + current_mde->constant_memory_alignment); mdl_struct = isci_host_alloc_mdl_struct(isci_host, size); if (!mdl_struct) return -ENOMEM; list_add_tail(&mdl_struct->node, &isci_host->mdl_struct_list); isci_host_build_mde(current_mde, mdl_struct); sci_mdl_next_entry(mdl_handle); current_mde = sci_mdl_get_current_entry(mdl_handle); } return 0; } /** * isci_host_completion_routine() - This function is the delayed service * routine that calls the sci core library's completion handler. It's * scheduled as a tasklet from the interrupt service routine when interrupts * in use, or set as the timeout function in polled mode. * @data: This parameter specifies the ISCI host object * */ static void isci_host_completion_routine(unsigned long data) { struct isci_host *isci_host = (struct isci_host *)data; struct scic_controller_handler_methods *handlers = &isci_host->scic_irq_handlers[SCI_MSIX_NORMAL_VECTOR]; struct list_head completed_request_list; struct list_head aborted_request_list; struct list_head *current_position; struct list_head *next_position; struct isci_request *request; struct isci_request *next_request; struct sas_task *task; INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&aborted_request_list); spin_lock_irq(&isci_host->scic_lock); if (handlers->completion_handler) { handlers->completion_handler( isci_host->core_controller ); } /* Take the lists of completed I/Os from the host. */ list_splice_init(&isci_host->requests_to_complete, &completed_request_list); list_splice_init(&isci_host->requests_to_abort, &aborted_request_list); spin_unlock_irq(&isci_host->scic_lock); /* Process any completions in the lists. */ list_for_each_safe(current_position, next_position, &completed_request_list) { request = list_entry(current_position, struct isci_request, completed_node); task = isci_request_access_task(request); /* Normal notification (task_done) */ dev_dbg(&isci_host->pdev->dev, "%s: Normal - request/task = %p/%p\n", __func__, request, task); task->task_done(task); task->lldd_task = NULL; /* Free the request object. */ isci_request_free(isci_host, request); } list_for_each_entry_safe(request, next_request, &aborted_request_list, completed_node) { task = isci_request_access_task(request); /* Use sas_task_abort */ dev_warn(&isci_host->pdev->dev, "%s: Error - request/task = %p/%p\n", __func__, request, task); /* Put the task into the abort path. */ sas_task_abort(task); } } void isci_host_deinit( struct isci_host *isci_host) { int i; isci_host_change_state(isci_host, isci_stopping); for (i = 0; i < SCI_MAX_PORTS; i++) { struct isci_port *port = &isci_host->isci_ports[i]; struct isci_remote_device *device, *tmpdev; list_for_each_entry_safe(device, tmpdev, &port->remote_dev_list, node) { isci_remote_device_change_state(device, isci_stopping); isci_remote_device_stop(device); } } /* stop the comtroller and wait for completion. */ init_completion(&isci_host->stop_complete); scic_controller_stop( isci_host->core_controller, SCIC_CONTROLLER_STOP_TIMEOUT ); wait_for_completion(&isci_host->stop_complete); /* next, reset the controller. */ scic_controller_reset(isci_host->core_controller); } static int isci_verify_firmware(const struct firmware *fw, struct isci_firmware *isci_fw) { const u8 *tmp; if (fw->size < ISCI_FIRMWARE_MIN_SIZE) return -EINVAL; tmp = fw->data; /* 12th char should be the NULL terminate for the ID string */ if (tmp[11] != '\0') return -EINVAL; if (strncmp("#SCU MAGIC#", tmp, 11) != 0) return -EINVAL; isci_fw->id = tmp; isci_fw->version = fw->data[ISCI_FW_VER_OFS]; isci_fw->subversion = fw->data[ISCI_FW_SUBVER_OFS]; tmp = fw->data + ISCI_FW_DATA_OFS; while (*tmp != ISCI_FW_HDR_EOF) { switch (*tmp) { case ISCI_FW_HDR_PHYMASK: tmp++; isci_fw->phy_masks_size = *tmp; tmp++; isci_fw->phy_masks = (const u32 *)tmp; tmp += sizeof(u32) * isci_fw->phy_masks_size; break; case ISCI_FW_HDR_PHYGEN: tmp++; isci_fw->phy_gens_size = *tmp; tmp++; isci_fw->phy_gens = (const u32 *)tmp; tmp += sizeof(u32) * isci_fw->phy_gens_size; break; case ISCI_FW_HDR_SASADDR: tmp++; isci_fw->sas_addrs_size = *tmp; tmp++; isci_fw->sas_addrs = (const u64 *)tmp; tmp += sizeof(u64) * isci_fw->sas_addrs_size; break; default: pr_err("bad field in firmware binary blob\n"); return -EINVAL; } } pr_info("isci firmware v%u.%u loaded.\n", isci_fw->version, isci_fw->subversion); return SCI_SUCCESS; } static void __iomem *scu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; } static void __iomem *smu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } #define SCI_MAX_TIMER_COUNT 25 int isci_host_init(struct isci_host *isci_host) { int err = 0; int index = 0; enum sci_status status; struct scic_sds_controller *controller; struct scic_sds_port *scic_port; struct scic_controller_handler_methods *handlers = &isci_host->scic_irq_handlers[0]; union scic_oem_parameters scic_oem_params; union scic_user_parameters scic_user_params; const struct firmware *fw = NULL; struct isci_firmware *isci_fw = NULL; INIT_LIST_HEAD(&isci_host->timer_list_struct.timers); isci_timer_list_construct( &isci_host->timer_list_struct, SCI_MAX_TIMER_COUNT ); controller = scic_controller_alloc(&isci_host->pdev->dev); if (!controller) { err = -ENOMEM; dev_err(&isci_host->pdev->dev, "%s: failed (%d)\n", __func__, err); goto out; } isci_host->core_controller = controller; spin_lock_init(&isci_host->state_lock); spin_lock_init(&isci_host->scic_lock); spin_lock_init(&isci_host->queue_lock); isci_host_change_state(isci_host, isci_starting); isci_host->can_queue = ISCI_CAN_QUEUE_VAL; status = scic_controller_construct(controller, scu_base(isci_host), smu_base(isci_host)); if (status != SCI_SUCCESS) { dev_err(&isci_host->pdev->dev, "%s: scic_controller_construct failed - status = %x\n", __func__, status); err = -ENODEV; goto out; } isci_host->sas_ha.dev = &isci_host->pdev->dev; isci_host->sas_ha.lldd_ha = isci_host; /*----------- SCIC controller Initialization Stuff ------------------ * set association host adapter struct in core controller. */ sci_object_set_association(isci_host->core_controller, (void *)isci_host ); /* grab initial values stored in the controller object for OEM and USER * parameters */ scic_oem_parameters_get(controller, &scic_oem_params); scic_user_parameters_get(controller, &scic_user_params); isci_fw = devm_kzalloc(&isci_host->pdev->dev, sizeof(struct isci_firmware), GFP_KERNEL); if (!isci_fw) { dev_warn(&isci_host->pdev->dev, "allocating firmware struct failed\n"); dev_warn(&isci_host->pdev->dev, "Default OEM configuration being used:" " 4 narrow ports, and default SAS Addresses\n"); goto set_default_params; } status = request_firmware(&fw, ISCI_FW_NAME, &isci_host->pdev->dev); if (status) { dev_warn(&isci_host->pdev->dev, "Loading firmware failed, using default values\n"); dev_warn(&isci_host->pdev->dev, "Default OEM configuration being used:" " 4 narrow ports, and default SAS Addresses\n"); goto set_default_params; } else { status = isci_verify_firmware(fw, isci_fw); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "firmware verification failed\n"); dev_warn(&isci_host->pdev->dev, "Default OEM configuration being used:" " 4 narrow ports, and default SAS " "Addresses\n"); goto set_default_params; } /* grab any OEM and USER parameters specified at module load */ status = isci_parse_oem_parameters(&scic_oem_params, isci_host->id, isci_fw); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "parsing firmware oem parameters failed\n"); err = -EINVAL; goto out; } status = isci_parse_user_parameters(&scic_user_params, isci_host->id, isci_fw); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "%s: isci_parse_user_parameters" " failed\n", __func__); err = -EINVAL; goto out; } } set_default_params: status = scic_oem_parameters_set(isci_host->core_controller, &scic_oem_params ); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "%s: scic_oem_parameters_set failed\n", __func__); err = -ENODEV; goto out; } status = scic_user_parameters_set(isci_host->core_controller, &scic_user_params ); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "%s: scic_user_parameters_set failed\n", __func__); err = -ENODEV; goto out; } status = scic_controller_initialize(isci_host->core_controller); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, "%s: scic_controller_initialize failed -" " status = 0x%x\n", __func__, status); err = -ENODEV; goto out; } /* @todo: use both MSI-X interrupts, and don't do indirect * calls to the handlers just register direct calls */ if (isci_host->pdev->msix_enabled) { status = scic_controller_get_handler_methods( SCIC_MSIX_INTERRUPT_TYPE, SCI_MSIX_DOUBLE_VECTOR, handlers ); } else { status = scic_controller_get_handler_methods( SCIC_LEGACY_LINE_INTERRUPT_TYPE, 0, handlers ); } if (status != SCI_SUCCESS) { handlers->interrupt_handler = NULL; handlers->completion_handler = NULL; dev_err(&isci_host->pdev->dev, "%s: scic_controller_get_handler_methods failed\n", __func__); } tasklet_init(&isci_host->completion_tasklet, isci_host_completion_routine, (unsigned long)isci_host ); INIT_LIST_HEAD(&(isci_host->mdl_struct_list)); INIT_LIST_HEAD(&isci_host->requests_to_complete); INIT_LIST_HEAD(&isci_host->requests_to_abort); /* populate mdl with dma memory. scu_mdl_allocate_coherent() */ err = isci_host_mdl_allocate_coherent(isci_host); if (err) goto err_out; /* * keep the pool alloc size around, will use it for a bounds checking * when trying to convert virtual addresses to physical addresses */ isci_host->dma_pool_alloc_size = sizeof(struct isci_request) + scic_io_request_get_object_size(); isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev, isci_host->dma_pool_alloc_size, SLAB_HWCACHE_ALIGN, 0); if (!isci_host->dma_pool) { err = -ENOMEM; goto req_obj_err_out; } for (index = 0; index < SCI_MAX_PORTS; index++) { isci_port_init(&isci_host->isci_ports[index], isci_host, index); } for (index = 0; index < SCI_MAX_PHYS; index++) isci_phy_init(&isci_host->phys[index], isci_host, index); /* Why are we doing this? Is this even necessary? */ memcpy(&isci_host->sas_addr[0], &isci_host->phys[0].sas_addr[0], SAS_ADDR_SIZE); /* Start the ports */ for (index = 0; index < SCI_MAX_PORTS; index++) { scic_controller_get_port_handle(controller, index, &scic_port); scic_port_start(scic_port); } goto out; /* SPB_Debug: destroy request object cache */ req_obj_err_out: /* SPB_Debug: destroy remote object cache */ err_out: /* SPB_Debug: undo controller init, construct and alloc, remove from parent * controller list. */ out: if (fw) release_firmware(fw); return err; }