summaryrefslogtreecommitdiff
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c188
1 files changed, 132 insertions, 56 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e4c295a049f..315fea47e784 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -29,7 +29,17 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
@@ -47,6 +57,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value. Arch can override either
+ * the dfl or actual value as it sees fit. Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -373,8 +392,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
- if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
- best = r; /* Approximating prefetchable by non-prefetchable */
+ /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
+ if (r->flags & IORESOURCE_PREFETCH)
+ continue;
+ /* .. but we can put a prefetchable resource inside a non-prefetchable one */
+ if (!best)
+ best = r;
}
return best;
}
@@ -509,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
@@ -728,8 +751,8 @@ static int pci_save_pcie_state(struct pci_dev *dev)
u16 *cap;
u16 flags;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (pos <= 0)
+ pos = pci_pcie_cap(dev);
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -837,7 +860,7 @@ pci_save_state(struct pci_dev *dev)
int i;
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++)
- pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+ pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
@@ -1140,11 +1163,11 @@ pci_disable_device(struct pci_dev *dev)
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1155,7 +1178,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
/**
* pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
@@ -1202,7 +1225,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+ dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
@@ -1396,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -1413,7 +1437,8 @@ void pci_pm_init(struct pci_dev *dev)
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1510,7 +1535,7 @@ void pci_enable_ari(struct pci_dev *dev)
u16 ctrl;
struct pci_dev *bridge;
- if (!dev->is_pcie || dev->devfn)
+ if (!pci_is_pcie(dev) || dev->devfn)
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1518,10 +1543,10 @@ void pci_enable_ari(struct pci_dev *dev)
return;
bridge = dev->bus->self;
- if (!bridge || !bridge->is_pcie)
+ if (!bridge || !pci_is_pcie(bridge))
return;
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(bridge);
if (!pos)
return;
@@ -1536,6 +1561,54 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+ pci_acs_enable = 1;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+}
+
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
@@ -1669,9 +1742,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
return 0;
err_out:
- dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
- bar,
- pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
&pdev->resource[bar]);
return -EBUSY;
}
@@ -1866,31 +1937,6 @@ void pci_clear_master(struct pci_dev *dev)
__pci_set_master(dev, false);
}
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
-#ifndef PCI_CACHE_LINE_BYTES
-#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
-#endif
-
-/* This can be overridden by arch code. */
-/* Don't forget this is measured in 32-bit words, not bytes */
-u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
-
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
@@ -1901,13 +1947,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-static int
-pci_set_cacheline_size(struct pci_dev *dev)
+int pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
if (!pci_cache_line_size)
- return -EINVAL; /* The system doesn't support MWI. */
+ return -EINVAL;
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
@@ -1928,6 +1973,24 @@ pci_set_cacheline_size(struct pci_dev *dev)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2062,6 +2125,7 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
return 0;
}
@@ -2073,6 +2137,7 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dev.coherent_dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
return 0;
}
@@ -2099,9 +2164,9 @@ static int pcie_flr(struct pci_dev *dev, int probe)
int i;
int pos;
u32 cap;
- u16 status;
+ u16 status, control;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTTY;
@@ -2126,8 +2191,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
"proceeding with reset anyway\n");
clear:
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+
msleep(100);
return 0;
@@ -2191,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
@@ -2240,6 +2307,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
down(&dev->dev.sem);
}
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2450,7 +2521,7 @@ int pcie_get_readrq(struct pci_dev *dev)
int ret, cap;
u16 ctl;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
return -EINVAL;
@@ -2480,7 +2551,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
goto out;
@@ -2540,7 +2611,7 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return reg;
}
- dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+ dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
return 0;
}
@@ -2590,7 +2661,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
-spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
@@ -2723,6 +2794,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
static int __init pci_setup(char *str)
{
while (str) {