summaryrefslogtreecommitdiff
path: root/drivers/ata/libata-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r--drivers/ata/libata-core.c1400
1 files changed, 928 insertions, 472 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0abd72d0dec2..4733f009c7c9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,7 +59,7 @@
#include "libata.h"
-#define DRV_VERSION "2.20" /* must be exactly four chars */
+#define DRV_VERSION "2.21" /* must be exactly four chars */
/* debounce timing parameters in msecs { interval, duration, timeout } */
@@ -72,7 +72,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
-static unsigned int ata_print_id = 1;
+unsigned int ata_print_id = 1;
static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
@@ -89,6 +89,10 @@ int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+static int ata_ignore_hpa = 0;
+module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
+MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
+
static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
module_param(ata_probe_timeout, int, 0444);
MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
@@ -808,6 +812,207 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
*p = '\0';
}
+static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
+{
+ u64 sectors = 0;
+
+ sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
+ sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
+ sectors |= (tf->hob_lbal & 0xff) << 24;
+ sectors |= (tf->lbah & 0xff) << 16;
+ sectors |= (tf->lbam & 0xff) << 8;
+ sectors |= (tf->lbal & 0xff);
+
+ return ++sectors;
+}
+
+static u64 ata_tf_to_lba(struct ata_taskfile *tf)
+{
+ u64 sectors = 0;
+
+ sectors |= (tf->device & 0x0f) << 24;
+ sectors |= (tf->lbah & 0xff) << 16;
+ sectors |= (tf->lbam & 0xff) << 8;
+ sectors |= (tf->lbal & 0xff);
+
+ return ++sectors;
+}
+
+/**
+ * ata_read_native_max_address_ext - LBA48 native max query
+ * @dev: Device to query
+ *
+ * Perform an LBA48 size query upon the device in question. Return the
+ * actual LBA48 size or zero if the command fails.
+ */
+
+static u64 ata_read_native_max_address_ext(struct ata_device *dev)
+{
+ unsigned int err;
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+ tf.device |= 0x40;
+
+ err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err)
+ return 0;
+
+ return ata_tf_to_lba48(&tf);
+}
+
+/**
+ * ata_read_native_max_address - LBA28 native max query
+ * @dev: Device to query
+ *
+ * Performa an LBA28 size query upon the device in question. Return the
+ * actual LBA28 size or zero if the command fails.
+ */
+
+static u64 ata_read_native_max_address(struct ata_device *dev)
+{
+ unsigned int err;
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = ATA_CMD_READ_NATIVE_MAX;
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+ tf.device |= 0x40;
+
+ err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err)
+ return 0;
+
+ return ata_tf_to_lba(&tf);
+}
+
+/**
+ * ata_set_native_max_address_ext - LBA48 native max set
+ * @dev: Device to query
+ * @new_sectors: new max sectors value to set for the device
+ *
+ * Perform an LBA48 size set max upon the device in question. Return the
+ * actual LBA48 size or zero if the command fails.
+ */
+
+static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
+{
+ unsigned int err;
+ struct ata_taskfile tf;
+
+ new_sectors--;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = ATA_CMD_SET_MAX_EXT;
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+ tf.device |= 0x40;
+
+ tf.lbal = (new_sectors >> 0) & 0xff;
+ tf.lbam = (new_sectors >> 8) & 0xff;
+ tf.lbah = (new_sectors >> 16) & 0xff;
+
+ tf.hob_lbal = (new_sectors >> 24) & 0xff;
+ tf.hob_lbam = (new_sectors >> 32) & 0xff;
+ tf.hob_lbah = (new_sectors >> 40) & 0xff;
+
+ err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err)
+ return 0;
+
+ return ata_tf_to_lba48(&tf);
+}
+
+/**
+ * ata_set_native_max_address - LBA28 native max set
+ * @dev: Device to query
+ * @new_sectors: new max sectors value to set for the device
+ *
+ * Perform an LBA28 size set max upon the device in question. Return the
+ * actual LBA28 size or zero if the command fails.
+ */
+
+static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
+{
+ unsigned int err;
+ struct ata_taskfile tf;
+
+ new_sectors--;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = ATA_CMD_SET_MAX;
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+
+ tf.lbal = (new_sectors >> 0) & 0xff;
+ tf.lbam = (new_sectors >> 8) & 0xff;
+ tf.lbah = (new_sectors >> 16) & 0xff;
+ tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
+
+ err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err)
+ return 0;
+
+ return ata_tf_to_lba(&tf);
+}
+
+/**
+ * ata_hpa_resize - Resize a device with an HPA set
+ * @dev: Device to resize
+ *
+ * Read the size of an LBA28 or LBA48 disk with HPA features and resize
+ * it if required to the full size of the media. The caller must check
+ * the drive has the HPA feature set enabled.
+ */
+
+static u64 ata_hpa_resize(struct ata_device *dev)
+{
+ u64 sectors = dev->n_sectors;
+ u64 hpa_sectors;
+
+ if (ata_id_has_lba48(dev->id))
+ hpa_sectors = ata_read_native_max_address_ext(dev);
+ else
+ hpa_sectors = ata_read_native_max_address(dev);
+
+ /* if no hpa, both should be equal */
+ ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
+ "hpa_sectors = %lld\n",
+ __FUNCTION__, (long long)sectors, (long long)hpa_sectors);
+
+ if (hpa_sectors > sectors) {
+ ata_dev_printk(dev, KERN_INFO,
+ "Host Protected Area detected:\n"
+ "\tcurrent size: %lld sectors\n"
+ "\tnative size: %lld sectors\n",
+ (long long)sectors, (long long)hpa_sectors);
+
+ if (ata_ignore_hpa) {
+ if (ata_id_has_lba48(dev->id))
+ hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
+ else
+ hpa_sectors = ata_set_native_max_address(dev,
+ hpa_sectors);
+
+ if (hpa_sectors) {
+ ata_dev_printk(dev, KERN_INFO, "native size "
+ "increased to %lld sectors\n",
+ (long long)hpa_sectors);
+ return hpa_sectors;
+ }
+ }
+ }
+ return sectors;
+}
+
static u64 ata_id_n_sectors(const u16 *id)
{
if (ata_id_has_lba(id)) {
@@ -1113,7 +1318,7 @@ void ata_port_flush_task(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
DPRINTK("flush #1\n");
- flush_workqueue(ata_wq);
+ cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
/*
* At this point, if a task is running, it's guaranteed to see
@@ -1124,7 +1329,7 @@ void ata_port_flush_task(struct ata_port *ap)
if (ata_msg_ctl(ap))
ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
__FUNCTION__);
- flush_workqueue(ata_wq);
+ cancel_work_sync(&ap->port_task.work);
}
spin_lock_irqsave(ap->lock, flags);
@@ -1270,12 +1475,16 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
- if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
- if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "zero err_mask for failed "
- "internal command, assuming AC_ERR_OTHER\n");
- qc->err_mask |= AC_ERR_OTHER;
+ /* perform minimal error analysis */
+ if (qc->flags & ATA_QCFLAG_FAILED) {
+ if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+ qc->err_mask |= AC_ERR_DEV;
+
+ if (!qc->err_mask)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (qc->err_mask & ~AC_ERR_OTHER)
+ qc->err_mask &= ~AC_ERR_OTHER;
}
/* finish up */
@@ -1382,27 +1591,41 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
- int pio;
- int speed = adev->pio_mode - XFER_PIO_0;
-
- if (speed < 2)
+ /* Controller doesn't support IORDY. Probably a pointless check
+ as the caller should know this */
+ if (adev->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
- if (speed > 2)
+ /* PIO3 and higher it is mandatory */
+ if (adev->pio_mode > XFER_PIO_2)
return 1;
+ /* We turn it on when possible */
+ if (ata_id_has_iordy(adev->id))
+ return 1;
+ return 0;
+}
- /* If we have no drive specific rule, then PIO 2 is non IORDY */
+/**
+ * ata_pio_mask_no_iordy - Return the non IORDY mask
+ * @adev: ATA device
+ *
+ * Compute the highest mode possible if we are not using iordy. Return
+ * -1 if no iordy mode is available.
+ */
+static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
+{
+ /* If we have no drive specific rule, then PIO 2 is non IORDY */
if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
- pio = adev->id[ATA_ID_EIDE_PIO];
+ u16 pio = adev->id[ATA_ID_EIDE_PIO];
/* Is the speed faster than the drive allows non IORDY ? */
if (pio) {
/* This is cycle times not frequency - watch the logic! */
if (pio > 240) /* PIO2 is 240nS per cycle */
- return 1;
- return 0;
+ return 3 << ATA_SHIFT_PIO;
+ return 7 << ATA_SHIFT_PIO;
}
}
- return 0;
+ return 3 << ATA_SHIFT_PIO;
}
/**
@@ -1431,13 +1654,13 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
struct ata_taskfile tf;
unsigned int err_mask = 0;
const char *reason;
+ int may_fallback = 1, tried_spinup = 0;
int rc;
if (ata_msg_ctl(ap))
ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
-
retry:
ata_tf_init(dev, &tf);
@@ -1475,11 +1698,31 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
return -ENOENT;
}
+ /* Device or controller might have reported the wrong
+ * device class. Give a shot at the other IDENTIFY if
+ * the current one is aborted by the device.
+ */
+ if (may_fallback &&
+ (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+ may_fallback = 0;
+
+ if (class == ATA_DEV_ATA)
+ class = ATA_DEV_ATAPI;
+ else
+ class = ATA_DEV_ATA;
+ goto retry;
+ }
+
rc = -EIO;
reason = "I/O error";
goto err_out;
}
+ /* Falling back doesn't make sense if ID data was read
+ * successfully at least once.
+ */
+ may_fallback = 0;
+
swap_buf_le16(id, ATA_ID_WORDS);
/* sanity check */
@@ -1494,6 +1737,32 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
goto err_out;
}
+ if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
+ tried_spinup = 1;
+ /*
+ * Drive powered-up in standby mode, and requires a specific
+ * SET_FEATURES spin-up subcommand before it will accept
+ * anything other than the original IDENTIFY command.
+ */
+ ata_tf_init(dev, &tf);
+ tf.command = ATA_CMD_SET_FEATURES;
+ tf.feature = SETFEATURES_SPINUP;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "SPINUP failed";
+ goto err_out;
+ }
+ /*
+ * If the drive initially returned incomplete IDENTIFY info,
+ * we now must reissue the IDENTIFY command.
+ */
+ if (id[2] == 0x37c8)
+ goto retry;
+ }
+
if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
/*
* The exact sequence expected by certain pre-ATA4 drives is:
@@ -1560,20 +1829,6 @@ static void ata_dev_config_ncq(struct ata_device *dev,
snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
}
-static void ata_set_port_max_cmd_len(struct ata_port *ap)
-{
- int i;
-
- if (ap->scsi_host) {
- unsigned int len = 0;
-
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- len = max(len, ap->device[i].cdb_len);
-
- ap->scsi_host->max_cmd_len = len;
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -1608,7 +1863,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
/* set _SDD */
- rc = ata_acpi_push_id(ap, dev->devno);
+ rc = ata_acpi_push_id(dev);
if (rc) {
ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
rc);
@@ -1684,6 +1939,9 @@ int ata_dev_configure(struct ata_device *dev)
dev->flags |= ATA_DFLAG_FLUSH_EXT;
}
+ if (ata_id_hpa_enabled(dev->id))
+ dev->n_sectors = ata_hpa_resize(dev);
+
/* config NCQ */
ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
@@ -1773,8 +2031,6 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- ata_set_port_max_cmd_len(ap);
-
/* limit bridge transfers to udma5, 200 sectors */
if (ata_dev_knobble(dev)) {
if (ata_msg_drv(ap) && print_info)
@@ -1785,14 +2041,15 @@ int ata_dev_configure(struct ata_device *dev)
}
if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
- dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+ dev->max_sectors);
/* limit ATAPI DMA to R/W commands only */
if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
if (ap->ops->dev_config)
- ap->ops->dev_config(ap, dev);
+ ap->ops->dev_config(dev);
if (ata_msg_probe(ap))
ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
@@ -1807,6 +2064,56 @@ err_out_nosup:
}
/**
+ * ata_cable_40wire - return 40 wire cable type
+ * @ap: port
+ *
+ * Helper method for drivers which want to hardwire 40 wire cable
+ * detection.
+ */
+
+int ata_cable_40wire(struct ata_port *ap)
+{
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * ata_cable_80wire - return 80 wire cable type
+ * @ap: port
+ *
+ * Helper method for drivers which want to hardwire 80 wire cable
+ * detection.
+ */
+
+int ata_cable_80wire(struct ata_port *ap)
+{
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * ata_cable_unknown - return unknown PATA cable.
+ * @ap: port
+ *
+ * Helper method for drivers which have no PATA cable detection.
+ */
+
+int ata_cable_unknown(struct ata_port *ap)
+{
+ return ATA_CBL_PATA_UNK;
+}
+
+/**
+ * ata_cable_sata - return SATA cable type
+ * @ap: port
+ *
+ * Helper method for drivers which have SATA cables
+ */
+
+int ata_cable_sata(struct ata_port *ap)
+{
+ return ATA_CBL_SATA;
+}
+
+/**
* ata_bus_probe - Reset and probe ATA bus
* @ap: Bus to probe
*
@@ -1876,6 +2183,10 @@ int ata_bus_probe(struct ata_port *ap)
goto fail;
}
+ /* Now ask for the cable type as PDIAG- should have been released */
+ if (ap->ops->cable_detect)
+ ap->cbl = ap->ops->cable_detect(ap);
+
/* After the identify sequence we can now set up the devices. We do
this in the normal order so that the user doesn't get confused */
@@ -1958,7 +2269,7 @@ void ata_port_probe(struct ata_port *ap)
* LOCKING:
* None.
*/
-static void sata_print_link_status(struct ata_port *ap)
+void sata_print_link_status(struct ata_port *ap)
{
u32 sstatus, scontrol, tmp;
@@ -2353,6 +2664,12 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
t->recover = t->cycle - t->active;
}
+ /* In a few cases quantisation may produce enough errors to
+ leave t->cycle too low for the sum of active and recovery
+ if so we must correct this */
+ if (t->active + t->recover > t->cycle)
+ t->cycle = t->active + t->recover;
+
return 0;
}
@@ -2481,12 +2798,13 @@ static int ata_dev_set_mode(struct ata_device *dev)
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
* @ap: port on which timings will be programmed
* @r_failed_dev: out paramter for failed device
*
- * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
+ * Standard implementation of the function used to tune and set
+ * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
+ * ata_dev_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
@@ -2495,14 +2813,12 @@ static int ata_dev_set_mode(struct ata_device *dev)
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+
+int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
{
struct ata_device *dev;
int i, rc = 0, used_dma = 0, found = 0;
- /* has private set_mode? */
- if (ap->ops->set_mode)
- return ap->ops->set_mode(ap, r_failed_dev);
/* step 1: calculate xfer_mask */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
@@ -2563,7 +2879,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
dev = &ap->device[i];
/* don't update suspended devices' xfer mode */
- if (!ata_dev_ready(dev))
+ if (!ata_dev_enabled(dev))
continue;
rc = ata_dev_set_mode(dev);
@@ -2577,9 +2893,6 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
ap->host->simplex_claimed = ap;
- /* step5: chip specific finalisation */
- if (ap->ops->post_set_mode)
- ap->ops->post_set_mode(ap);
out:
if (rc)
*r_failed_dev = dev;
@@ -2587,6 +2900,29 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
}
/**
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * @ap: port on which timings will be programmed
+ * @r_failed_dev: out paramter for failed device
+ *
+ * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
+ * ata_set_mode() fails, pointer to the failing device is
+ * returned in @r_failed_dev.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+ /* has private set_mode? */
+ if (ap->ops->set_mode)
+ return ap->ops->set_mode(ap, r_failed_dev);
+ return ata_do_set_mode(ap, r_failed_dev);
+}
+
+/**
* ata_tf_to_host - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
@@ -2661,23 +2997,71 @@ int ata_busy_sleep(struct ata_port *ap,
return 0;
}
-static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+/**
+ * ata_wait_ready - sleep until BSY clears, or timeout
+ * @ap: port containing status register to be polled
+ * @deadline: deadline jiffies for the operation
+ *
+ * Sleep until ATA Status register bit BSY clears, or timeout
+ * occurs.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
+{
+ unsigned long start = jiffies;
+ int warned = 0;
+
+ while (1) {
+ u8 status = ata_chk_status(ap);
+ unsigned long now = jiffies;
+
+ if (!(status & ATA_BUSY))
+ return 0;
+ if (!ata_port_online(ap) && status == 0xff)
+ return -ENODEV;
+ if (time_after(now, deadline))
+ return -EBUSY;
+
+ if (!warned && time_after(now, start + 5 * HZ) &&
+ (deadline - now > 3 * HZ)) {
+ ata_port_printk(ap, KERN_WARNING,
+ "port is slow to respond, please be patient "
+ "(Status 0x%x)\n", status);
+ warned = 1;
+ }
+
+ msleep(50);
+ }
+}
+
+static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1);
- unsigned long timeout;
+ int rc, ret = 0;
/* if device 0 was found in ata_devchk, wait for its
* BSY bit to clear
*/
- if (dev0)
- ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+ if (dev0) {
+ rc = ata_wait_ready(ap, deadline);
+ if (rc) {
+ if (rc != -ENODEV)
+ return rc;
+ ret = rc;
+ }
+ }
/* if device 1 was found in ata_devchk, wait for
* register access, then wait for BSY to clear
*/
- timeout = jiffies + ATA_TMOUT_BOOT;
while (dev1) {
u8 nsect, lbal;
@@ -2686,14 +3070,18 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
lbal = ioread8(ioaddr->lbal_addr);
if ((nsect == 1) && (lbal == 1))
break;
- if (time_after(jiffies, timeout)) {
- dev1 = 0;
- break;
- }
+ if (time_after(jiffies, deadline))
+ return -EBUSY;
msleep(50); /* give drive a breather */
}
- if (dev1)
- ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+ if (dev1) {
+ rc = ata_wait_ready(ap, deadline);
+ if (rc) {
+ if (rc != -ENODEV)
+ return rc;
+ ret = rc;
+ }
+ }
/* is all this really necessary? */
ap->ops->dev_select(ap, 0);
@@ -2701,10 +3089,12 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
ap->ops->dev_select(ap, 1);
if (dev0)
ap->ops->dev_select(ap, 0);
+
+ return ret;
}
-static unsigned int ata_bus_softreset(struct ata_port *ap,
- unsigned int devmask)
+static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -2734,11 +3124,9 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
* pulldown resistor.
*/
if (ata_check_status(ap) == 0xFF)
- return 0;
-
- ata_bus_post_reset(ap, devmask);
+ return -ENODEV;
- return 0;
+ return ata_bus_post_reset(ap, devmask, deadline);
}
/**
@@ -2767,6 +3155,7 @@ void ata_bus_reset(struct ata_port *ap)
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
u8 err;
unsigned int dev0, dev1 = 0, devmask = 0;
+ int rc;
DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
@@ -2788,9 +3177,11 @@ void ata_bus_reset(struct ata_port *ap)
ap->ops->dev_select(ap, 0);
/* issue bus reset */
- if (ap->flags & ATA_FLAG_SRST)
- if (ata_bus_softreset(ap, devmask))
+ if (ap->flags & ATA_FLAG_SRST) {
+ rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
+ if (rc && rc != -ENODEV)
goto err_out;
+ }
/*
* determine by signature whether we have ATA or ATAPI devices
@@ -2832,29 +3223,37 @@ err_out:
* sata_phy_debounce - debounce SATA phy status
* @ap: ATA port to debounce SATA phy status for
* @params: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
*
* Make sure SStatus of @ap reaches stable state, determined by
* holding the same value where DET is not 1 for @duration polled
* every @interval, before @timeout. Timeout constraints the
- * beginning of the stable state. Because, after hot unplugging,
- * DET gets stuck at 1 on some controllers, this functions waits
+ * beginning of the stable state. Because DET gets stuck at 1 on
+ * some controllers after hot unplugging, this functions waits
* until timeout then returns 0 if DET is stable at 1.
*
+ * @timeout is further limited by @deadline. The sooner of the
+ * two is used.
+ *
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
+int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
+ unsigned long deadline)
{
unsigned long interval_msec = params[0];
- unsigned long duration = params[1] * HZ / 1000;
- unsigned long timeout = jiffies + params[2] * HZ / 1000;
- unsigned long last_jiffies;
+ unsigned long duration = msecs_to_jiffies(params[1]);
+ unsigned long last_jiffies, t;
u32 last, cur;
int rc;
+ t = jiffies + msecs_to_jiffies(params[2]);
+ if (time_before(t, deadline))
+ deadline = t;
+
if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
return rc;
cur &= 0xf;
@@ -2870,7 +3269,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
/* DET stable? */
if (cur == last) {
- if (cur == 1 && time_before(jiffies, timeout))
+ if (cur == 1 && time_before(jiffies, deadline))
continue;
if (time_after(jiffies, last_jiffies + duration))
return 0;
@@ -2881,8 +3280,8 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
last = cur;
last_jiffies = jiffies;
- /* check timeout */
- if (time_after(jiffies, timeout))
+ /* check deadline */
+ if (time_after(jiffies, deadline))
return -EBUSY;
}
}
@@ -2891,6 +3290,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
* sata_phy_resume - resume SATA phy
* @ap: ATA port to resume SATA phy for
* @params: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
*
* Resume SATA phy of @ap and debounce it.
*
@@ -2900,7 +3300,8 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
+int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
+ unsigned long deadline)
{
u32 scontrol;
int rc;
@@ -2918,43 +3319,19 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
*/
msleep(200);
- return sata_phy_debounce(ap, params);
-}
-
-static void ata_wait_spinup(struct ata_port *ap)
-{
- struct ata_eh_context *ehc = &ap->eh_context;
- unsigned long end, secs;
- int rc;
-
- /* first, debounce phy if SATA */
- if (ap->cbl == ATA_CBL_SATA) {
- rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
-
- /* if debounced successfully and offline, no need to wait */
- if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
- return;
- }
-
- /* okay, let's give the drive time to spin up */
- end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
- secs = ((end - jiffies) + HZ - 1) / HZ;
-
- if (time_after(jiffies, end))
- return;
-
- if (secs > 5)
- ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
- "(%lu secs)\n", secs);
-
- schedule_timeout_uninterruptible(end - jiffies);
+ return sata_phy_debounce(ap, params, deadline);
}
/**
* ata_std_prereset - prepare for reset
* @ap: ATA port to be reset
+ * @deadline: deadline jiffies for the operation
*
- * @ap is about to be reset. Initialize it.
+ * @ap is about to be reset. Initialize it. Failure from
+ * prereset makes libata abort whole reset sequence and give up
+ * that port, so prereset should be best-effort. It does its
+ * best to prepare for reset sequence but if things go wrong, it
+ * should just whine, not fail.
*
* LOCKING:
* Kernel thread context (may sleep)
@@ -2962,41 +3339,41 @@ static void ata_wait_spinup(struct ata_port *ap)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_std_prereset(struct ata_port *ap)
+int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
{
struct ata_eh_context *ehc = &ap->eh_context;
const unsigned long *timing = sata_ehc_deb_timing(ehc);
int rc;
- /* handle link resume & hotplug spinup */
+ /* handle link resume */
if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
(ap->flags & ATA_FLAG_HRST_TO_RESUME))
ehc->i.action |= ATA_EH_HARDRESET;
- if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
- (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
- ata_wait_spinup(ap);
-
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
return 0;
/* if SATA, resume phy */
if (ap->cbl == ATA_CBL_SATA) {
- rc = sata_phy_resume(ap, timing);
- if (rc && rc != -EOPNOTSUPP) {
- /* phy resume failed */
+ rc = sata_phy_resume(ap, timing, deadline);
+ /* whine about phy resume failure but proceed */
+ if (rc && rc != -EOPNOTSUPP)
ata_port_printk(ap, KERN_WARNING, "failed to resume "
"link for reset (errno=%d)\n", rc);
- return rc;
- }
}
/* Wait for !BSY if the controller can wait for the first D2H
* Reg FIS and we don't know that no device is attached.
*/
- if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
- ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+ if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
+ rc = ata_wait_ready(ap, deadline);
+ if (rc && rc != -ENODEV) {
+ ata_port_printk(ap, KERN_WARNING, "device not ready "
+ "(errno=%d), forcing hardreset\n", rc);
+ ehc->i.action |= ATA_EH_HARDRESET;
+ }
+ }
return 0;
}
@@ -3005,6 +3382,7 @@ int ata_std_prereset(struct ata_port *ap)
* ata_std_softreset - reset host port via ATA SRST
* @ap: port to reset
* @classes: resulting classes of attached devices
+ * @deadline: deadline jiffies for the operation
*
* Reset host port using ATA SRST.
*
@@ -3014,10 +3392,12 @@ int ata_std_prereset(struct ata_port *ap)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
+int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
+ unsigned long deadline)
{
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0, err_mask;
+ unsigned int devmask = 0;
+ int rc;
u8 err;
DPRINTK("ENTER\n");
@@ -3038,11 +3418,11 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
/* issue bus reset */
DPRINTK("about to softreset, devmask=%x\n", devmask);
- err_mask = ata_bus_softreset(ap, devmask);
- if (err_mask) {
- ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
- err_mask);
- return -EIO;
+ rc = ata_bus_softreset(ap, devmask, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
+ ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
@@ -3059,6 +3439,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
* sata_port_hardreset - reset port via SATA phy reset
* @ap: port to reset
* @timing: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
*
* SATA phy-reset host port using DET bits of SControl register.
*
@@ -3068,7 +3449,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
+int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
+ unsigned long deadline)
{
u32 scontrol;
int rc;
@@ -3107,7 +3489,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
msleep(1);
/* bring phy back */
- rc = sata_phy_resume(ap, timing);
+ rc = sata_phy_resume(ap, timing, deadline);
out:
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
@@ -3117,6 +3499,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
* sata_std_hardreset - reset host port via SATA phy reset
* @ap: port to reset
* @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
*
* SATA phy-reset host port using DET bits of SControl register,
* wait for !BSY and classify the attached device.
@@ -3127,7 +3510,8 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
+int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
{
const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
int rc;
@@ -3135,7 +3519,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
DPRINTK("ENTER\n");
/* do hardreset */
- rc = sata_port_hardreset(ap, timing);
+ rc = sata_port_hardreset(ap, timing, deadline);
if (rc) {
ata_port_printk(ap, KERN_ERR,
"COMRESET failed (errno=%d)\n", rc);
@@ -3152,10 +3536,12 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
/* wait a while before checking status, see SRST for more info */
msleep(150);
- if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
+ rc = ata_wait_ready(ap, deadline);
+ /* link occupied, -ENODEV too is an error */
+ if (rc) {
ata_port_printk(ap, KERN_ERR,
- "COMRESET failed (device not ready)\n");
- return -EIO;
+ "COMRESET failed (errno=%d)\n", rc);
+ return rc;
}
ap->ops->dev_select(ap, 0); /* probably unnecessary */
@@ -3236,7 +3622,6 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
const u16 *old_id = dev->id;
unsigned char model[2][ATA_ID_PROD_LEN + 1];
unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
- u64 new_n_sectors;
if (dev->class != new_class) {
ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
@@ -3248,7 +3633,6 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
- new_n_sectors = ata_id_n_sectors(new_id);
if (strcmp(model[0], model[1])) {
ata_dev_printk(dev, KERN_INFO, "model number mismatch "
@@ -3262,20 +3646,12 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
return 0;
}
- if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
- ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
- "%llu != %llu\n",
- (unsigned long long)dev->n_sectors,
- (unsigned long long)new_n_sectors);
- return 0;
- }
-
return 1;
}
/**
- * ata_dev_revalidate - Revalidate ATA device
- * @dev: device to revalidate
+ * ata_dev_reread_id - Re-read IDENTIFY data
+ * @adev: target ATA device
* @readid_flags: read ID flags
*
* Re-read IDENTIFY page and make sure @dev is still attached to
@@ -3287,34 +3663,68 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
+int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
u16 *id = (void *)dev->ap->sector_buf;
int rc;
- if (!ata_dev_enabled(dev)) {
- rc = -ENODEV;
- goto fail;
- }
-
/* read ID data */
rc = ata_dev_read_id(dev, &class, readid_flags, id);
if (rc)
- goto fail;
+ return rc;
/* is the device still there? */
- if (!ata_dev_same_device(dev, class, id)) {
- rc = -ENODEV;
- goto fail;
- }
+ if (!ata_dev_same_device(dev, class, id))
+ return -ENODEV;
memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
+ return 0;
+}
+
+/**
+ * ata_dev_revalidate - Revalidate ATA device
+ * @dev: device to revalidate
+ * @readid_flags: read ID flags
+ *
+ * Re-read IDENTIFY page, make sure @dev is still attached to the
+ * port and reconfigure it according to the new IDENTIFY page.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
+{
+ u64 n_sectors = dev->n_sectors;
+ int rc;
+
+ if (!ata_dev_enabled(dev))
+ return -ENODEV;
+
+ /* re-read ID */
+ rc = ata_dev_reread_id(dev, readid_flags);
+ if (rc)
+ goto fail;
/* configure device according to the new ID */
rc = ata_dev_configure(dev);
- if (rc == 0)
- return 0;
+ if (rc)
+ goto fail;
+
+ /* verify n_sectors hasn't changed */
+ if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
+ ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
+ "%llu != %llu\n",
+ (unsigned long long)n_sectors,
+ (unsigned long long)dev->n_sectors);
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ return 0;
fail:
ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
@@ -3358,6 +3768,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
+ { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
@@ -3372,6 +3783,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
+ { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
/* NCQ hard hangs device under heavier load, needs hard power cycle */
{ "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
/* Blacklist entries taken from Silicon Image 3124/3132
@@ -3441,19 +3853,7 @@ static void ata_dev_xfermask(struct ata_device *dev)
xfer_mask = ata_pack_xfermask(ap->pio_mask,
ap->mwdma_mask, ap->udma_mask);
- /* Apply cable rule here. Don't apply it early because when
- * we handle hot plug the cable type can itself change.
- */
- if (ap->cbl == ATA_CBL_PATA40)
- xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
- /* Apply drive side cable rule. Unknown or 80 pin cables reported
- * host side are checked drive side as well. Cases where we know a
- * 40wire cable is used safely for 80 are not checked here.
- */
- if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
- xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
-
-
+ /* drive modes available */
xfer_mask &= ata_pack_xfermask(dev->pio_mask,
dev->mwdma_mask, dev->udma_mask);
xfer_mask &= ata_id_xfermask(dev->id);
@@ -3482,8 +3882,30 @@ static void ata_dev_xfermask(struct ata_device *dev)
"other device, disabling DMA\n");
}
+ if (ap->flags & ATA_FLAG_NO_IORDY)
+ xfer_mask &= ata_pio_mask_no_iordy(dev);
+
if (ap->ops->mode_filter)
- xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
+ xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
+
+ /* Apply cable rule here. Don't apply it early because when
+ * we handle hot plug the cable type can itself change.
+ * Check this last so that we know if the transfer rate was
+ * solely limited by the cable.
+ * Unknown or 80 wire cables reported host side are checked
+ * drive side as well. Cases where we know a 40wire cable
+ * is used safely for 80 are not checked here.
+ */
+ if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+ /* UDMA/44 or higher would be available */
+ if((ap->cbl == ATA_CBL_PATA40) ||
+ (ata_drive_40wire(dev->id) &&
+ (ap->cbl == ATA_CBL_PATA_UNK ||
+ ap->cbl == ATA_CBL_PATA80))) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "limited to UDMA/33 due to 40-wire cable\n");
+ xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+ }
ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
&dev->mwdma_mask, &dev->udma_mask);
@@ -3511,10 +3933,13 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
/* set up set-features taskfile */
DPRINTK("set features - xfer mode\n");
+ /* Some controllers and ATAPI devices show flaky interrupt
+ * behavior after setting xfer mode. Use polling instead.
+ */
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
tf.feature = SETFEATURES_XFER;
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
tf.protocol = ATA_PROT_NODATA;
tf.nsect = dev->xfer_mode;
@@ -4022,10 +4447,10 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
/**
- * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ * ata_pio_sector - Transfer a sector of data.
* @qc: Command on going
*
- * Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ * Transfer qc->sect_size bytes of data from/to the ATA device.
*
* LOCKING:
* Inherited from caller.
@@ -4040,7 +4465,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset;
unsigned char *buf;
- if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
+ if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
page = sg[qc->cursg].page;
@@ -4060,17 +4485,17 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */
- ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
kunmap_atomic(buf, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(page);
- ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
}
- qc->curbytes += ATA_SECT_SIZE;
- qc->cursg_ofs += ATA_SECT_SIZE;
+ qc->curbytes += qc->sect_size;
+ qc->cursg_ofs += qc->sect_size;
if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
qc->cursg++;
@@ -4079,10 +4504,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
}
/**
- * ata_pio_sectors - Transfer one or many 512-byte sectors.
+ * ata_pio_sectors - Transfer one or many sectors.
* @qc: Command on going
*
- * Transfer one or many ATA_SECT_SIZE of data from/to the
+ * Transfer one or many sectors of data from/to the
* ATA device for the DRQ request.
*
* LOCKING:
@@ -4097,7 +4522,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
WARN_ON(qc->dev->multi_count == 0);
- nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
+ nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
qc->dev->multi_count);
while (nsect--)
ata_pio_sector(qc);
@@ -4992,14 +5417,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
}
}
- /* Some controllers show flaky interrupt behavior after
- * setting xfer mode. Use polling instead.
- */
- if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
- qc->tf.feature == SETFEATURES_XFER) &&
- (ap->flags & ATA_FLAG_SETXFER_POLLING))
- qc->tf.flags |= ATA_TFLAG_POLLING;
-
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
@@ -5460,37 +5877,11 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
- int i, j, rc;
+ int rc;
rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
- if (rc)
- goto fail;
-
- /* EH is quiescent now. Fail if we have any ready device.
- * This happens if hotplug occurs between completion of device
- * suspension and here.
- */
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- for (j = 0; j < ATA_MAX_DEVICES; j++) {
- struct ata_device *dev = &ap->device[j];
-
- if (ata_dev_ready(dev)) {
- ata_port_printk(ap, KERN_WARNING,
- "suspend failed, device %d "
- "still active\n", dev->devno);
- rc = -EBUSY;
- goto fail;
- }
- }
- }
-
- host->dev->power.power_state = mesg;
- return 0;
-
- fail:
- ata_host_resume(host);
+ if (rc == 0)
+ host->dev->power.power_state = mesg;
return rc;
}
@@ -5577,42 +5968,36 @@ void ata_dev_init(struct ata_device *dev)
}
/**
- * ata_port_init - Initialize an ata_port structure
- * @ap: Structure to initialize
- * @host: Collection of hosts to which @ap belongs
- * @ent: Probe information provided by low-level driver
- * @port_no: Port number associated with this ata_port
+ * ata_port_alloc - allocate and initialize basic ATA port resources
+ * @host: ATA host this allocated port belongs to
+ *
+ * Allocate and initialize basic ATA port resources.
*
- * Initialize a new ata_port structure.
+ * RETURNS:
+ * Allocate ATA port on success, NULL on failure.
*
* LOCKING:
- * Inherited from caller.
+ * Inherited from calling layer (may sleep).
*/
-void ata_port_init(struct ata_port *ap, struct ata_host *host,
- const struct ata_probe_ent *ent, unsigned int port_no)
+struct ata_port *ata_port_alloc(struct ata_host *host)
{
+ struct ata_port *ap;
unsigned int i;
+ DPRINTK("ENTER\n");
+
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+ if (!ap)
+ return NULL;
+
+ ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
ap->flags = ATA_FLAG_DISABLED;
- ap->print_id = ata_print_id++;
+ ap->print_id = -1;
ap->ctl = ATA_DEVCTL_OBS;
ap->host = host;
- ap->dev = ent->dev;
- ap->port_no = port_no;
- if (port_no == 1 && ent->pinfo2) {
- ap->pio_mask = ent->pinfo2->pio_mask;
- ap->mwdma_mask = ent->pinfo2->mwdma_mask;
- ap->udma_mask = ent->pinfo2->udma_mask;
- ap->flags |= ent->pinfo2->flags;
- ap->ops = ent->pinfo2->port_ops;
- } else {
- ap->pio_mask = ent->pio_mask;
- ap->mwdma_mask = ent->mwdma_mask;
- ap->udma_mask = ent->udma_mask;
- ap->flags |= ent->port_flags;
- ap->ops = ent->port_ops;
- }
+ ap->dev = host->dev;
+
ap->hw_sata_spd_limit = UINT_MAX;
ap->active_tag = ATA_TAG_POISON;
ap->last_ctl = 0xFF;
@@ -5632,10 +6017,7 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
- /* set cable type */
ap->cbl = ATA_CBL_NONE;
- if (ap->flags & ATA_FLAG_SATA)
- ap->cbl = ATA_CBL_SATA;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
@@ -5648,100 +6030,209 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
+ return ap;
+}
+
+static void ata_host_release(struct device *gendev, void *res)
+{
+ struct ata_host *host = dev_get_drvdata(gendev);
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (!ap)
+ continue;
+
+ if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+
+ if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
+ host->ops->host_stop(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (!ap)
+ continue;
- memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+ if (ap->scsi_host)
+ scsi_host_put(ap->scsi_host);
+
+ kfree(ap);
+ host->ports[i] = NULL;
+ }
+
+ dev_set_drvdata(gendev, NULL);
}
/**
- * ata_port_init_shost - Initialize SCSI host associated with ATA port
- * @ap: ATA port to initialize SCSI host for
- * @shost: SCSI host associated with @ap
+ * ata_host_alloc - allocate and init basic ATA host resources
+ * @dev: generic device this host is associated with
+ * @max_ports: maximum number of ATA ports associated with this host
+ *
+ * Allocate and initialize basic ATA host resources. LLD calls
+ * this function to allocate a host, initializes it fully and
+ * attaches it using ata_host_register().
*
- * Initialize SCSI host @shost associated with ATA port @ap.
+ * @max_ports ports are allocated and host->n_ports is
+ * initialized to @max_ports. The caller is allowed to decrease
+ * host->n_ports before calling ata_host_register(). The unused
+ * ports will be automatically freed on registration.
+ *
+ * RETURNS:
+ * Allocate ATA host on success, NULL on failure.
*
* LOCKING:
- * Inherited from caller.
+ * Inherited from calling layer (may sleep).
*/
-static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
+struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
{
- ap->scsi_host = shost;
+ struct ata_host *host;
+ size_t sz;
+ int i;
+
+ DPRINTK("ENTER\n");
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return NULL;
+
+ /* alloc a container for our list of ATA ports (buses) */
+ sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+ /* alloc a container for our list of ATA ports (buses) */
+ host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
+ if (!host)
+ goto err_out;
+
+ devres_add(dev, host);
+ dev_set_drvdata(dev, host);
+
+ spin_lock_init(&host->lock);
+ host->dev = dev;
+ host->n_ports = max_ports;
- shost->unique_id = ap->print_id;
- shost->max_id = 16;
- shost->max_lun = 1;
- shost->max_channel = 1;
- shost->max_cmd_len = 12;
+ /* allocate ports bound to this host */
+ for (i = 0; i < max_ports; i++) {
+ struct ata_port *ap;
+
+ ap = ata_port_alloc(host);
+ if (!ap)
+ goto err_out;
+
+ ap->port_no = i;
+ host->ports[i] = ap;
+ }
+
+ devres_remove_group(dev, NULL);
+ return host;
+
+ err_out:
+ devres_release_group(dev, NULL);
+ return NULL;
}
/**
- * ata_port_add - Attach low-level ATA driver to system
- * @ent: Information provided by low-level driver
- * @host: Collections of ports to which we add
- * @port_no: Port number associated with this host
+ * ata_host_alloc_pinfo - alloc host and init with port_info array
+ * @dev: generic device this host is associated with
+ * @ppi: array of ATA port_info to initialize host with
+ * @n_ports: number of ATA ports attached to this host
*
- * Attach low-level ATA driver to system.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
+ * Allocate ATA host and initialize with info from @ppi. If NULL
+ * terminated, @ppi may contain fewer entries than @n_ports. The
+ * last entry will be used for the remaining ports.
*
* RETURNS:
- * New ata_port on success, for NULL on error.
+ * Allocate ATA host on success, NULL on failure.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
*/
-static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
- struct ata_host *host,
- unsigned int port_no)
+struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+ const struct ata_port_info * const * ppi,
+ int n_ports)
{
- struct Scsi_Host *shost;
- struct ata_port *ap;
-
- DPRINTK("ENTER\n");
+ const struct ata_port_info *pi;
+ struct ata_host *host;
+ int i, j;
- if (!ent->port_ops->error_handler &&
- !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
- printk(KERN_ERR "ata%u: no reset mechanism available\n",
- port_no);
+ host = ata_host_alloc(dev, n_ports);
+ if (!host)
return NULL;
- }
- shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
- if (!shost)
- return NULL;
+ for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
- shost->transportt = &ata_scsi_transport_template;
+ if (ppi[j])
+ pi = ppi[j++];
- ap = ata_shost_to_port(shost);
+ ap->pio_mask = pi->pio_mask;
+ ap->mwdma_mask = pi->mwdma_mask;
+ ap->udma_mask = pi->udma_mask;
+ ap->flags |= pi->flags;
+ ap->ops = pi->port_ops;
- ata_port_init(ap, host, ent, port_no);
- ata_port_init_shost(ap, shost);
+ if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
+ host->ops = pi->port_ops;
+ if (!host->private_data && pi->private_data)
+ host->private_data = pi->private_data;
+ }
- return ap;
+ return host;
}
-static void ata_host_release(struct device *gendev, void *res)
+/**
+ * ata_host_start - start and freeze ports of an ATA host
+ * @host: ATA host to start ports for
+ *
+ * Start and then freeze ports of @host. Started status is
+ * recorded in host->flags, so this function can be called
+ * multiple times. Ports are guaranteed to get started only
+ * once. If host->ops isn't initialized yet, its set to the
+ * first non-dummy port ops.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 if all ports are started successfully, -errno otherwise.
+ */
+int ata_host_start(struct ata_host *host)
{
- struct ata_host *host = dev_get_drvdata(gendev);
- int i;
+ int i, rc;
+
+ if (host->flags & ATA_HOST_STARTED)
+ return 0;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- if (ap && ap->ops->port_stop)
- ap->ops->port_stop(ap);
+ if (!host->ops && !ata_port_is_dummy(ap))
+ host->ops = ap->ops;
+
+ if (ap->ops->port_start) {
+ rc = ap->ops->port_start(ap);
+ if (rc) {
+ ata_port_printk(ap, KERN_ERR, "failed to "
+ "start port (errno=%d)\n", rc);
+ goto err_out;
+ }
+ }
+
+ ata_eh_freeze_port(ap);
}
- if (host->ops->host_stop)
- host->ops->host_stop(host);
+ host->flags |= ATA_HOST_STARTED;
+ return 0;
- for (i = 0; i < host->n_ports; i++) {
+ err_out:
+ while (--i >= 0) {
struct ata_port *ap = host->ports[i];
- if (ap)
- scsi_host_put(ap->scsi_host);
-
- host->ports[i] = NULL;
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
}
-
- dev_set_drvdata(gendev, NULL);
+ return rc;
}
/**
@@ -5755,7 +6246,7 @@ static void ata_host_release(struct device *gendev, void *res)
* PCI/etc. bus probe sem.
*
*/
-
+/* KILLME - the only user left is ipr */
void ata_host_init(struct ata_host *host, struct device *dev,
unsigned long flags, const struct ata_port_operations *ops)
{
@@ -5766,155 +6257,95 @@ void ata_host_init(struct ata_host *host, struct device *dev,
}
/**
- * ata_device_add - Register hardware device with ATA and SCSI layers
- * @ent: Probe information describing hardware device to be registered
- *
- * This function processes the information provided in the probe
- * information struct @ent, allocates the necessary ATA and SCSI
- * host information structures, initializes them, and registers
- * everything with requisite kernel subsystems.
+ * ata_host_register - register initialized ATA host
+ * @host: ATA host to register
+ * @sht: template for SCSI host
*
- * This function requests irqs, probes the ATA bus, and probes
- * the SCSI bus.
+ * Register initialized ATA host. @host is allocated using
+ * ata_host_alloc() and fully initialized by LLD. This function
+ * starts ports, registers @host with ATA and SCSI layers and
+ * probe registered devices.
*
* LOCKING:
- * PCI/etc. bus probe sem.
+ * Inherited from calling layer (may sleep).
*
* RETURNS:
- * Number of ports registered. Zero on error (no ports registered).
+ * 0 on success, -errno otherwise.
*/
-int ata_device_add(const struct ata_probe_ent *ent)
+int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
- unsigned int i;
- struct device *dev = ent->dev;
- struct ata_host *host;
- int rc;
-
- DPRINTK("ENTER\n");
+ int i, rc;
- if (ent->irq == 0) {
- dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
- return 0;
+ /* host must have been started */
+ if (!(host->flags & ATA_HOST_STARTED)) {
+ dev_printk(KERN_ERR, host->dev,
+ "BUG: trying to register unstarted host\n");
+ WARN_ON(1);
+ return -EINVAL;
}
- if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
- return 0;
+ /* Blow away unused ports. This happens when LLD can't
+ * determine the exact number of ports to allocate at
+ * allocation time.
+ */
+ for (i = host->n_ports; host->ports[i]; i++)
+ kfree(host->ports[i]);
- /* alloc a container for our list of ATA ports (buses) */
- host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
- (ent->n_ports * sizeof(void *)), GFP_KERNEL);
- if (!host)
- goto err_out;
- devres_add(dev, host);
- dev_set_drvdata(dev, host);
+ /* give ports names and add SCSI hosts */
+ for (i = 0; i < host->n_ports; i++)
+ host->ports[i]->print_id = ata_print_id++;
- ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
- host->n_ports = ent->n_ports;
- host->irq = ent->irq;
- host->irq2 = ent->irq2;
- host->iomap = ent->iomap;
- host->private_data = ent->private_data;
+ rc = ata_scsi_add_hosts(host, sht);
+ if (rc)
+ return rc;
- /* register each port bound to this device */
+ /* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
- unsigned long xfer_mode_mask;
- int irq_line = ent->irq;
-
- ap = ata_port_add(ent, host, i);
- host->ports[i] = ap;
- if (!ap)
- goto err_out;
+ struct ata_port *ap = host->ports[i];
+ int irq_line;
+ u32 scontrol;
+ unsigned long xfer_mask;
- /* dummy? */
- if (ent->dummy_port_mask & (1 << i)) {
- ata_port_printk(ap, KERN_INFO, "DUMMY\n");
- ap->ops = &ata_dummy_port_ops;
- continue;
- }
+ /* set SATA cable type if still unset */
+ if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
+ ap->cbl = ATA_CBL_SATA;
- /* start port */
- rc = ap->ops->port_start(ap);
- if (rc) {
- host->ports[i] = NULL;
- scsi_host_put(ap->scsi_host);
- goto err_out;
+ /* init sata_spd_limit to the current value */
+ if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
+ int spd = (scontrol >> 4) & 0xf;
+ ap->hw_sata_spd_limit &= (1 << spd) - 1;
}
+ ap->sata_spd_limit = ap->hw_sata_spd_limit;
- /* Report the secondary IRQ for second channel legacy */
- if (i == 1 && ent->irq2)
- irq_line = ent->irq2;
+ /* report the secondary IRQ for second channel legacy */
+ irq_line = host->irq;
+ if (i == 1 && host->irq2)
+ irq_line = host->irq2;
- xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
- (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
- (ap->pio_mask << ATA_SHIFT_PIO);
+ xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
+ ap->udma_mask);
/* print per-port info to dmesg */
- ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
- "ctl 0x%p bmdma 0x%p irq %d\n",
- ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
- ata_mode_string(xfer_mode_mask),
- ap->ioaddr.cmd_addr,
- ap->ioaddr.ctl_addr,
- ap->ioaddr.bmdma_addr,
- irq_line);
-
- /* freeze port before requesting IRQ */
- ata_eh_freeze_port(ap);
- }
-
- /* obtain irq, that may be shared between channels */
- rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
- ent->irq_flags, DRV_NAME, host);
- if (rc) {
- dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
- ent->irq, rc);
- goto err_out;
- }
-
- /* do we have a second IRQ for the other channel, eg legacy mode */
- if (ent->irq2) {
- /* We will get weird core code crashes later if this is true
- so trap it now */
- BUG_ON(ent->irq == ent->irq2);
-
- rc = devm_request_irq(dev, ent->irq2,
- ent->port_ops->irq_handler, ent->irq_flags,
- DRV_NAME, host);
- if (rc) {
- dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
- ent->irq2, rc);
- goto err_out;
- }
+ if (!ata_port_is_dummy(ap))
+ ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
+ "ctl 0x%p bmdma 0x%p irq %d\n",
+ ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
+ ata_mode_string(xfer_mask),
+ ap->ioaddr.cmd_addr,
+ ap->ioaddr.ctl_addr,
+ ap->ioaddr.bmdma_addr,
+ irq_line);
+ else
+ ata_port_printk(ap, KERN_INFO, "DUMMY\n");
}
- /* resource acquisition complete */
- devres_remove_group(dev, ata_device_add);
-
/* perform each probe synchronously */
DPRINTK("probe begin\n");
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- u32 scontrol;
int rc;
- /* init sata_spd_limit to the current value */
- if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
- int spd = (scontrol >> 4) & 0xf;
- ap->hw_sata_spd_limit &= (1 << spd) - 1;
- }
- ap->sata_spd_limit = ap->hw_sata_spd_limit;
-
- rc = scsi_add_host(ap->scsi_host, dev);
- if (rc) {
- ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
- /* FIXME: do something useful here */
- /* FIXME: handle unconditional calls to
- * scsi_scan_host and ata_host_remove, below,
- * at the very least
- */
- }
-
+ /* probe */
if (ap->ops->error_handler) {
struct ata_eh_info *ehi = &ap->eh_info;
unsigned long flags;
@@ -5928,6 +6359,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
ehi->action |= ATA_EH_SOFTRESET;
ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+ ap->pflags &= ~ATA_PFLAG_INITIALIZING;
ap->pflags |= ATA_PFLAG_LOADING;
ata_port_schedule_eh(ap);
@@ -5959,16 +6391,52 @@ int ata_device_add(const struct ata_probe_ent *ent)
ata_scsi_scan_host(ap);
}
- VPRINTK("EXIT, returning %u\n", ent->n_ports);
- return ent->n_ports; /* success */
-
- err_out:
- devres_release_group(dev, ata_device_add);
- VPRINTK("EXIT, returning %d\n", rc);
return 0;
}
/**
+ * ata_host_activate - start host, request IRQ and register it
+ * @host: target ATA host
+ * @irq: IRQ to request
+ * @irq_handler: irq_handler used when requesting IRQ
+ * @irq_flags: irq_flags used when requesting IRQ
+ * @sht: scsi_host_template to use when registering the host
+ *
+ * After allocating an ATA host and initializing it, most libata
+ * LLDs perform three steps to activate the host - start host,
+ * request IRQ and register it. This helper takes necessasry
+ * arguments and performs the three steps in one go.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_host_activate(struct ata_host *host, int irq,
+ irq_handler_t irq_handler, unsigned long irq_flags,
+ struct scsi_host_template *sht)
+{
+ int rc;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
+ dev_driver_string(host->dev), host);
+ if (rc)
+ return rc;
+
+ rc = ata_host_register(host, sht);
+ /* if failed, just free the IRQ and leave ports alone */
+ if (rc)
+ devm_free_irq(host->dev, irq, host);
+
+ return rc;
+}
+
+/**
* ata_port_detach - Detach ATA port in prepration of device removal
* @ap: ATA port to be detached
*
@@ -6017,9 +6485,9 @@ void ata_port_detach(struct ata_port *ap)
/* Flush hotplug task. The sequence is similar to
* ata_port_flush_task().
*/
- flush_workqueue(ata_aux_wq);
+ cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
cancel_delayed_work(&ap->hotplug_task);
- flush_workqueue(ata_aux_wq);
+ cancel_work_sync(&ap->hotplug_task.work);
skip_eh:
/* remove the associated SCSI host */
@@ -6043,32 +6511,6 @@ void ata_host_detach(struct ata_host *host)
ata_port_detach(host->ports[i]);
}
-struct ata_probe_ent *
-ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
-{
- struct ata_probe_ent *probe_ent;
-
- probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
- if (!probe_ent) {
- printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
- kobject_name(&(dev->kobj)));
- return NULL;
- }
-
- INIT_LIST_HEAD(&probe_ent->node);
- probe_ent->dev = dev;
-
- probe_ent->sht = port->sht;
- probe_ent->port_flags = port->flags;
- probe_ent->pio_mask = port->pio_mask;
- probe_ent->mwdma_mask = port->mwdma_mask;
- probe_ent->udma_mask = port->udma_mask;
- probe_ent->port_ops = port->port_ops;
- probe_ent->private_data = port->private_data;
-
- return probe_ent;
-}
-
/**
* ata_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr: IO address structure to be initialized
@@ -6334,6 +6776,10 @@ const struct ata_port_operations ata_dummy_port_ops = {
.port_stop = ata_dummy_noret,
};
+const struct ata_port_info ata_dummy_port_info = {
+ .port_ops = &ata_dummy_port_ops,
+};
+
/*
* libata is essentially a library of internal helper functions for
* low-level ATA host controller drivers. As such, the API/ABI is
@@ -6345,10 +6791,15 @@ EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_host_init);
-EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_host_alloc);
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
+EXPORT_SYMBOL_GPL(ata_host_start);
+EXPORT_SYMBOL_GPL(ata_host_register);
+EXPORT_SYMBOL_GPL(ata_host_activate);
EXPORT_SYMBOL_GPL(ata_host_detach);
EXPORT_SYMBOL_GPL(ata_sg_init);
EXPORT_SYMBOL_GPL(ata_sg_init_one);
@@ -6360,6 +6811,7 @@ EXPORT_SYMBOL_GPL(ata_tf_load);
EXPORT_SYMBOL_GPL(ata_tf_read);
EXPORT_SYMBOL_GPL(ata_noop_dev_select);
EXPORT_SYMBOL_GPL(ata_std_dev_select);
+EXPORT_SYMBOL_GPL(sata_print_link_status);
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
EXPORT_SYMBOL_GPL(ata_check_status);
@@ -6367,6 +6819,7 @@ EXPORT_SYMBOL_GPL(ata_altstatus);
EXPORT_SYMBOL_GPL(ata_exec_command);
EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_interrupt);
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_data_xfer);
EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
EXPORT_SYMBOL_GPL(ata_qc_prep);
@@ -6400,6 +6853,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_wait_ready);
EXPORT_SYMBOL_GPL(ata_port_queue_task);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6429,7 +6883,9 @@ EXPORT_SYMBOL_GPL(ata_timing_merge);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
+EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
+EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
@@ -6442,11 +6898,6 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter);
EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */
-#ifdef CONFIG_PM
-EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
-EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
-#endif /* CONFIG_PM */
-
EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
EXPORT_SYMBOL_GPL(ata_port_abort);
@@ -6461,3 +6912,8 @@ EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
EXPORT_SYMBOL_GPL(ata_irq_ack);
EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
EXPORT_SYMBOL_GPL(ata_dev_try_classify);
+
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
+EXPORT_SYMBOL_GPL(ata_cable_sata);