summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDong Aisheng <aisheng.dong@nxp.com>2019-12-02 18:05:17 +0800
committerDong Aisheng <aisheng.dong@nxp.com>2019-12-02 18:05:17 +0800
commitb138c331cbb1990857f2168fe6d981db1c41fa32 (patch)
tree4252790a724f068ae25725b45a41ec5d17cbf56f
parent89c380b1e879c1fa8ca08c3169f7ba2896f2be63 (diff)
parent447e002898dd459d285810b20e5769325982e845 (diff)
Merge branch 'pcie/next' into next
* pcie/next: (40 commits) LF-128 PCI: imx: turn off the clocks and regulators when link is down PCI: imx: add the imx pcie ep verification solution misc: pci_endpoint_test: Add the layerscape PCIe GEN4 EP device support PCI: mobiveil: Add workaround for unsupported request error PCI: mobiveil: Add PCIe Gen4 EP driver for NXP Layerscape SoCs ...
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt14
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt13
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt78
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/arm/kernel/bios32.c44
-rw-r--r--arch/arm64/kernel/pci.c44
-rw-r--r--drivers/misc/pci_endpoint_test.c2
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/dwc/Kconfig21
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6-ep.c176
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1601
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c25
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig50
-rw-r--r--drivers/pci/controller/mobiveil/Makefile7
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4-ep.c169
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c325
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c568
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c (renamed from drivers/pci/controller/pcie-mobiveil.c)581
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c59
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.c310
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h301
-rw-r--r--drivers/pci/pcie/portdrv_core.c29
-rw-r--r--drivers/pci/quirks.c9
-rw-r--r--include/dt-bindings/soc/imx8_hsio.h31
-rw-r--r--include/linux/pci.h1
28 files changed, 3867 insertions, 619 deletions
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index 8906f8d6efd5..27ecef52bd50 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -10,6 +10,9 @@ Required properties:
- "fsl,imx6qp-pcie"
- "fsl,imx7d-pcie"
- "fsl,imx8mq-pcie"
+ - "fsl,imx8mm-pcie"
+ - "fsl,imx8qm-pcie"
+ - "fsl,imx8qxp-pcie"
- reg: base address and length of the PCIe controller
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
@@ -69,6 +72,17 @@ Additional required properties for imx8mq-pcie:
- clock-names: Must include the following additional entries:
- "pcie_aux"
+Additional required properties for imx8 pcie:
+- hsio-cfg: hsio configration mode when the pcie node is supported.
+ mode 1: pciea 2 lanes and one sata ahci port.
+ mode 2: pciea 1 lane, pcieb 1 lane and one sata ahci port.
+ mode 3: pciea 2 lanes, pcieb 1 lane.
+- local-addr: the local address used in hsio module.
+ Example:
+ hsio-cfg = <PCIEAX2PCIEBX1>;
+ hsio = <&hsio>;
+ local-addr = <0x80000000>;
+
Example:
pcie@01000000 {
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index 99a386ea691c..992e917094b8 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -27,8 +27,12 @@ Required properties:
- reg: base addresses and lengths of the PCIe controller register blocks.
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
-- interrupt-names: Must include the following entries:
- "intr": The interrupt that is asserted for controller interrupts
+- interrupt-names: It could include the following entries:
+ "aer": Asserted for aer interrupt when chip support the aer interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for aer.
+ "pme": Asserted for pme interrupt when chip support the pme interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for pme.
+ ......
- fsl,pcie-scfg: Must include two entries.
The first entry must be a link to the SCFG device node
The second entry must be '0' or '1' based on physical PCIe controller index.
@@ -44,8 +48,9 @@ Example:
reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
- interrupt-names = "intr";
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>, /* aer interrupt */
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* pme interrupt */
+ interrupt-names = "aer", "pme";
fsl,pcie-scfg = <&scfg 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt b/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
new file mode 100644
index 000000000000..414a86c9c6af
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
@@ -0,0 +1,78 @@
+NXP Layerscape PCIe Gen4 controller
+
+This PCIe controller is based on the Mobiveil PCIe IP and thus inherits all
+the common properties defined in mobiveil-pcie.txt.
+
+HOST MODE
+=========
+Required properties:
+- compatible: should contain the platform identifier such as:
+ "fsl,lx2160a-pcie"
+- reg: base addresses and lengths of the PCIe controller register blocks.
+ "csr_axi_slave": Bridge config registers
+ "config_axi_slave": PCIe controller registers
+- interrupts: A list of interrupt outputs of the controller. Must contain an
+ entry for each entry in the interrupt-names property.
+- interrupt-names: It could include the following entries:
+ "intr": The interrupt that is asserted for controller interrupts
+ "aer": Asserted for aer interrupt when chip support the aer interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for aer.
+ "pme": Asserted for pme interrupt when chip support the pme interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for pme.
+- dma-coherent: Indicates that the hardware IP block can ensure the coherency
+ of the data transferred from/to the IP block. This can avoid the software
+ cache flush/invalid actions, and improve the performance significantly.
+- msi-parent : See the generic MSI binding described in
+ Documentation/devicetree/bindings/interrupt-controller/msi.txt.
+
+DEVICE MODE
+=========
+Required properties:
+- compatible: should contain the platform identifier such as:
+ "fsl,lx2160a-pcie-ep"
+- reg: base addresses and lengths of the PCIe controller register blocks.
+ "regs": PCIe controller registers.
+ "addr_space" EP device CPU address.
+- apio-wins: number of requested apio outbound windows.
+
+Optional Property:
+- max-functions: Maximum number of functions that can be configured (default 1).
+
+RC Example:
+
+ pcie@3400000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ dma-coherent;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+EP Example:
+
+ pcie_ep@3400000 {
+ compatible = "fsl,lx2160a-pcie-ep";
+ reg = <0x00 0x03400000 0x0 0x00100000
+ 0x80 0x00000000 0x8 0x00000000>;
+ reg-names = "regs", "addr_space";
+ apio-wins = <8>;
+ status = "disabled";
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 63b51f42f31b..ff1555138a7f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12456,6 +12456,16 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/pci/controller/dwc/*layerscape*
+PCI DRIVER FOR NXP LAYERSCAPE GEN4 CONTROLLER
+M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+M: Xiaowei Bao <xiaowei.bao@nxp.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
+F: drivers/pci/controller/mobibeil/pcie-layerscape-gen4.c
+F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4-ep.c
+
PCI DRIVER FOR GENERIC OF HOSTS
M: Will Deacon <will@kernel.org>
L: linux-pci@vger.kernel.org
@@ -12496,10 +12506,11 @@ F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MOBIVEIL PCIE IP
M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in>
M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+M: Xiaowei Bao <xiaowei.bao@nxp.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F: drivers/pci/controller/pcie-mobiveil.c
+F: drivers/pci/controller/mobiveil/pcie-mobiveil*
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index ed46ca69813d..cc9e5b3cdbcf 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -12,11 +12,14 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/of_irq.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/pci.h>
+#include "../../../drivers/pci/pcie/portdrv.h"
+
static int debug_pci;
/*
@@ -65,6 +68,47 @@ void pcibios_report_status(u_int status_mask, int warn)
}
/*
+ * Check device tree if the service interrupts are there
+ */
+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+{
+ int ret, count = 0;
+ struct device_node *np = NULL;
+
+ if (dev->bus->dev.of_node)
+ np = dev->bus->dev.of_node;
+
+ if (np == NULL)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return 0;
+
+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
+ * request irq for aer
+ */
+ if (mask & PCIE_PORT_SERVICE_AER) {
+ ret = of_irq_get_byname(np, "aer");
+ if (ret > 0) {
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
+ count++;
+ }
+ }
+
+ if (mask & PCIE_PORT_SERVICE_PME) {
+ ret = of_irq_get_byname(np, "pme");
+ if (ret > 0) {
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
+ count++;
+ }
+ }
+
+ /* TODO: add more service interrupts if there it is in the device tree*/
+
+ return count;
+}
+
+/*
* We don't use this to fix the device, but initialisation of it.
* It's not the correct use for this, but it works.
* Note that the arbiter/ISA bridge appears to be buggy, specifically in
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 570988c7a7ff..95c06f634f49 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -13,11 +13,14 @@
#include <linux/mm.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/slab.h>
+#include "../../../drivers/pci/pcie/portdrv.h"
+
#ifdef CONFIG_ACPI
/*
* Try to assign the IRQ number when probing a new device
@@ -32,6 +35,47 @@ int pcibios_alloc_irq(struct pci_dev *dev)
#endif
/*
+ * Check device tree if the service interrupts are there
+ */
+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+{
+ int ret, count = 0;
+ struct device_node *np = NULL;
+
+ if (dev->bus->dev.of_node)
+ np = dev->bus->dev.of_node;
+
+ if (np == NULL)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return 0;
+
+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
+ * request irq for aer
+ */
+ if (mask & PCIE_PORT_SERVICE_AER) {
+ ret = of_irq_get_byname(np, "aer");
+ if (ret > 0) {
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
+ count++;
+ }
+ }
+
+ if (mask & PCIE_PORT_SERVICE_PME) {
+ ret = of_irq_get_byname(np, "pme");
+ if (ret > 0) {
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
+ count++;
+ }
+ }
+
+ /* TODO: add more service interrupts if there it is in the device tree*/
+
+ return count;
+}
+
+/*
* raw_pci_read/write - Platform-specific PCI config space access.
*/
int raw_pci_read(unsigned int domain, unsigned int bus,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 6e208a060a58..8b145a7fe9f6 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -65,6 +65,7 @@
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_LX2160A 0x8d80
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
@@ -793,6 +794,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LX2160A) },
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 70e078238899..f639d385dae6 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -241,16 +241,6 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
-config PCIE_MOBIVEIL
- bool "Mobiveil AXI PCIe controller"
- depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
- help
- Say Y here if you want to enable support for the Mobiveil AXI PCIe
- Soft IP. It has up to 8 outbound and inbound windows
- for address translation and it is a PCIe Gen4 IP.
-
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF
@@ -289,4 +279,5 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/mobiveil/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index a2a22c9d91af..44414cfd45ea 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -27,11 +27,11 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
-obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
+obj-y += mobiveil/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0ba988b5b5bc..dd24e6b8ce23 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -94,6 +94,27 @@ config PCI_IMX6
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+config PCI_IMX6_COMPLIANCE_TEST
+ bool "Enable pcie compliance tests on imx6"
+ depends on PCI_IMX6
+ default n
+ help
+ Say Y here if you want do the compliance tests on imx6 pcie rc found
+ on FSL iMX SoCs.
+
+config EP_MODE_IN_EP_RC_SYS
+ bool "PCI Express EP mode in the IMX6 RC/EP interconnection system"
+ depends on PCI_IMX6
+
+config RC_MODE_IN_EP_RC_SYS
+ bool "PCI Express RC mode in the IMX6 RC/EP interconnection system"
+ depends on PCI_IMX6 && EP_MODE_IN_EP_RC_SYS!=y
+
+config PCI_IMX6_EP
+ bool "i.MX6 PCI Express EP skeleton driver"
+ depends on RC_MODE_IN_EP_RC_SYS
+ default y
+
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 69faff371f11..7ecb2966f3d2 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCI_IMX6_EP) += pci-imx6-ep.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
diff --git a/drivers/pci/controller/dwc/pci-imx6-ep.c b/drivers/pci/controller/dwc/pci-imx6-ep.c
new file mode 100644
index 000000000000..d3fc9e1856aa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-imx6-ep.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define DRV_DESCRIPTION "i.MX PCIE endpoint device driver"
+#define DRV_VERSION "version 0.1"
+#define DRV_NAME "imx_pcie_ep"
+
+struct imx_pcie_ep_priv {
+ struct pci_dev *pci_dev;
+};
+
+/**
+ * imx_pcie_ep_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @id: entry in id_tbl
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int imx_pcie_ep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0, index = 0, found = 0;
+ unsigned int hard_wired = 0, msi_addr = 0, local_addr;
+ struct resource cfg_res;
+ const char *name = NULL;
+ struct device_node *np = NULL;
+ struct device *dev = &pdev->dev;
+ struct imx_pcie_ep_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pci_dev = pdev;
+ if (pci_enable_device(pdev)) {
+ ret = -ENODEV;
+ goto out;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, priv);
+
+ ret = pci_enable_msi(priv->pci_dev);
+ if (ret < 0) {
+ dev_err(dev, "can't enable msi\n");
+ goto err_pci_unmap_mmio;
+ }
+
+ /* Use the first none-hard-wired port as ep */
+ while ((np = of_find_node_by_type(np, "pci"))) {
+ if (!of_device_is_available(np))
+ continue;
+ if (of_property_read_u32(np, "hard-wired", &hard_wired)) {
+ if (hard_wired == 0)
+ break;
+ }
+ }
+ if (of_property_read_u32(np, "local-addr", &local_addr))
+ local_addr = 0;
+
+ while (!of_property_read_string_index(np, "reg-names", index, &name)) {
+ if (strcmp("config", name)) {
+ index++;
+ continue;
+ }
+ found = 1;
+ break;
+ }
+
+ if (!found) {
+ dev_err(dev, "can't find config reg space.\n");
+ ret = -EINVAL;
+ goto err_pci_disable_msi;
+ }
+
+ ret = of_address_to_resource(np, index, &cfg_res);
+ if (ret) {
+ dev_err(dev, "can't get cfg_res.\n");
+ ret = -EINVAL;
+ goto err_pci_disable_msi;
+ } else {
+ msi_addr = cfg_res.start + resource_size(&cfg_res);
+ }
+
+ pr_info("msi_addr 0x%08x, local_addr 0x%08x\n", msi_addr, local_addr);
+ pci_bus_write_config_dword(pdev->bus, 0, 0x54, msi_addr);
+ if (local_addr) {
+ msi_addr = msi_addr & 0xFFFFFFF;
+ msi_addr |= (local_addr & 0xF0000000);
+ }
+ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x820, msi_addr);
+ /* configure rc's msi cap */
+ pci_bus_read_config_dword(pdev->bus->parent, 0, 0x50, &ret);
+ ret |= (PCI_MSI_FLAGS_ENABLE << 16);
+ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x50, ret);
+ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x828, 0x1);
+ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x82C, 0xFFFFFFFE);
+
+ return 0;
+
+err_pci_disable_msi:
+ pci_disable_msi(pdev);
+err_pci_unmap_mmio:
+ pci_disable_device(pdev);
+out:
+ kfree(priv);
+ return ret;
+}
+
+static void imx_pcie_ep_remove(struct pci_dev *pdev)
+{
+ struct imx_pcie_ep_priv *priv = pci_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+ pr_info("***imx pcie ep driver unload***\n");
+}
+
+static struct pci_device_id imx_pcie_ep_ids[] = {
+ {
+ .class = PCI_CLASS_MEMORY_RAM << 8,
+ .class_mask = ~0,
+ .vendor = 0xbeaf,
+ .device = 0xdead,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, imx_pcie_ep_ids);
+
+static struct pci_driver imx_pcie_ep_driver = {
+ .name = DRV_NAME,
+ .id_table = imx_pcie_ep_ids,
+ .probe = imx_pcie_ep_probe,
+ .remove = imx_pcie_ep_remove,
+};
+
+static int __init imx_pcie_ep_init(void)
+{
+ int ret;
+
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ ret = pci_register_driver(&imx_pcie_ep_driver);
+ if (ret)
+ pr_err("Unable to initialize PCI module\n");
+
+ return ret;
+}
+
+static void __exit imx_pcie_ep_exit(void)
+{
+ pci_unregister_driver(&imx_pcie_ep_driver);
+}
+
+module_exit(imx_pcie_ep_exit);
+module_init(imx_pcie_ep_init);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("imx_pcie_ep");
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index acfbd34032a8..f43bfd6090ce 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -8,6 +8,7 @@
* Author: Sean Cross <xobs@kosagi.com>
*/
+#include <dt-bindings/soc/imx8_hsio.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -17,9 +18,11 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -31,14 +34,19 @@
#include <linux/reset.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
+#define IMX8MQ_PCIE_LINK_CAP_REG_OFFSET 0x7c
+#define IMX8MQ_PCIE_LINK_CAP_L1EL_64US (BIT(18) | BIT(17))
+#define IMX8MQ_PCIE_L1SUB_CTRL1_REG_EN_MASK 0xf
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+#define IMX8_HSIO_PCIEB_BASE_ADDR 0x5f010000
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
@@ -48,11 +56,16 @@ enum imx6_pcie_variants {
IMX6QP,
IMX7D,
IMX8MQ,
+ IMX8MM,
+ IMX8QM,
+ IMX8QXP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP BIT(3)
+#define IMX6_PCIE_FLAG_SUPPORTS_L1SS BIT(4)
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -62,23 +75,34 @@ struct imx6_pcie_drvdata {
struct imx6_pcie {
struct dw_pcie *pci;
+ int clkreq_gpio;
+ int dis_gpio;
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
+ struct clk *pcie_per;
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct clk *pcie_aux;
+ struct clk *phy_per;
+ struct clk *misc_per;
struct regmap *iomuxc_gpr;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
struct reset_control *turnoff_reset;
+ struct reset_control *clkreq_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
+ u32 hsio_cfg;
+ u32 ext_osc;
+ u32 local_addr;
+ u32 hard_wired;
+ u32 dma_unroll_offset;
int link_gen;
struct regulator *vpcie;
void __iomem *phy_base;
@@ -87,10 +111,15 @@ struct imx6_pcie {
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ /* power domain for hsio gpio used by pcie */
+ struct device *pd_hsio_gpio;
const struct imx6_pcie_drvdata *drvdata;
+ struct regulator *epdev_on;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
+#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
@@ -106,6 +135,8 @@ struct imx6_pcie {
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
@@ -118,6 +149,35 @@ struct imx6_pcie {
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+/* DMA registers */
+#define MAX_PCIE_DMA_CHANNELS 8
+#define DMA_UNROLL_CDM_OFFSET (0x7 << 19)
+#define DMA_REG_OFFSET 0x970
+#define DMA_CTRL_VIEWPORT_OFF (DMA_REG_OFFSET + 0x8)
+#define DMA_WRITE_ENGINE_EN_OFF (DMA_REG_OFFSET + 0xC)
+#define DMA_WRITE_ENGINE_EN BIT(0)
+#define DMA_WRITE_DOORBELL (DMA_REG_OFFSET + 0x10)
+#define DMA_READ_ENGINE_EN_OFF (DMA_REG_OFFSET + 0x2C)
+#define DMA_READ_ENGINE_EN BIT(0)
+#define DMA_READ_DOORBELL (DMA_REG_OFFSET + 0x30)
+#define DMA_WRITE_INT_STS (DMA_REG_OFFSET + 0x4C)
+#define DMA_WRITE_INT_MASK (DMA_REG_OFFSET + 0x54)
+#define DMA_WRITE_INT_CLR (DMA_REG_OFFSET + 0x58)
+#define DMA_READ_INT_STS (DMA_REG_OFFSET + 0xA0)
+#define DMA_READ_INT_MASK (DMA_REG_OFFSET + 0xA8)
+#define DMA_READ_INT_CLR (DMA_REG_OFFSET + 0xAC)
+#define DMA_DONE_INT_STS 0xFF
+#define DMA_ABORT_INT_STS (0xFF << 16)
+#define DMA_VIEWPOT_SEL_OFF (DMA_REG_OFFSET + 0xFC)
+#define DMA_CHANNEL_CTRL_1 (DMA_REG_OFFSET + 0x100)
+#define DMA_CHANNEL_CTRL_1_LIE BIT(3)
+#define DMA_CHANNEL_CTRL_2 (DMA_REG_OFFSET + 0x104)
+#define DMA_TRANSFER_SIZE (DMA_REG_OFFSET + 0x108)
+#define DMA_SAR_LOW (DMA_REG_OFFSET + 0x10C)
+#define DMA_SAR_HIGH (DMA_REG_OFFSET + 0x110)
+#define DMA_DAR_LOW (DMA_REG_OFFSET + 0x114)
+#define DMA_DAR_HIGH (DMA_REG_OFFSET + 0x118)
+
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
#define PCIE_PHY_ATEOVRD_EN BIT(2)
@@ -149,10 +209,157 @@ struct imx6_pcie {
#define PCIE_PHY_CMN_REG26 0x98
#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+#define PCIE_PHY_CMN_REG62 0x188
+#define PCIE_PHY_CMN_REG62_PLL_CLK_OUT 0x08
+#define PCIE_PHY_CMN_REG64 0x190
+#define PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM 0x8C
+#define PCIE_PHY_CMN_REG75 0x1D4
+#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3
+#define PCIE_PHY_TRSV_REG5 0x414
+#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D
+#define PCIE_PHY_TRSV_REG6 0x418
+#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF
+
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+/* iMX8 HSIO registers */
+#define IMX8QM_CSR_PHYX2_OFFSET 0x00000
+#define IMX8QM_CSR_PHYX1_OFFSET 0x10000
+#define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4
+#define IMX8QM_CSR_PCIEA_OFFSET 0x20000
+#define IMX8QM_CSR_PCIEB_OFFSET 0x30000
+#define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4
+#define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8
+#define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC
+#define IMX8QM_CSR_MISC_OFFSET 0x50000
+
+#define IMX8QM_CTRL_LTSSM_ENABLE BIT(4)
+#define IMX8QM_CTRL_READY_ENTR_L23 BIT(5)
+#define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9)
+#define IMX8QM_CTRL_BUTTON_RST_N BIT(21)
+#define IMX8QM_CTRL_PERST_N BIT(22)
+#define IMX8QM_CTRL_POWER_UP_RST_N BIT(23)
+
+#define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13)
+#define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19)
+#define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4)
+#define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12)
+
+#define IMX8QM_PCIE_TYPE_MASK (0xF << 24)
+
+#define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3
+#define IMX8QM_PHY_APB_RSTN_0 BIT(0)
+#define IMX8QM_PHY_APB_RSTN_1 BIT(1)
+
+#define IMX8QM_MISC_IOB_RXENA BIT(0)
+#define IMX8QM_MISC_IOB_TXENA BIT(1)
+#define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2)
+#define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3)
+#define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4)
+#define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12)
+#define IMX8QM_MISC_PCIE_AB_SELECT BIT(13)
+
+#define IMX8MM_GPR_PCIE_REF_CLK_SEL (0x3 << 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_PLL (0x3 << 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_EXT (0x2 << 24)
+#define IMX8MM_GPR_PCIE_AUX_EN BIT(19)
+#define IMX8MM_GPR_PCIE_CMN_RST BIT(18)
+#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17)
+#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
+
+/*
+ * The default value of the reserved ddr memory
+ * used to verify EP/RC memory space access operations.
+ * The layout of the 1G ddr on SD boards
+ * [imx6qdl-sd-ard boards]0x1000_0000 ~ 0x4FFF_FFFF
+ * [imx6sx,imx7d platforms]0x8000_0000 ~ 0xBFFF_FFFF
+ *
+ */
+static u32 ddr_test_region = 0, test_region_size = SZ_2M;
+static bool dma_w_end, dma_r_end, dma_en;
+
+static bool imx6_pcie_readable_reg(struct device *dev, unsigned int reg)
+{
+ enum imx6_pcie_variants variant;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ variant = imx6_pcie->drvdata->variant;
+ if (variant == IMX8QXP) {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PHYX1_OFFSET + IMX8QM_CSR_PHYX_STTS0_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_STTS0_OFFSET:
+ return true;
+
+ default:
+ return false;
+ }
+ } else {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX2_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PHYX2_OFFSET + IMX8QM_CSR_PHYX_STTS0_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_STTS0_OFFSET:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+static bool imx6_pcie_writeable_reg(struct device *dev, unsigned int reg)
+{
+ enum imx6_pcie_variants variant;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ variant = imx6_pcie->drvdata->variant;
+ if (variant == IMX8QXP) {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEB_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ return true;
+
+ default:
+ return false;
+ }
+ } else {
+ switch (reg) {
+ case IMX8QM_CSR_PHYX2_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET:
+ case IMX8QM_CSR_MISC_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL1_OFFSET:
+ case IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+static const struct regmap_config imx6_pcie_regconfig = {
+ .max_register = IMX8QM_CSR_MISC_OFFSET,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .num_reg_defaults_raw = IMX8QM_CSR_MISC_OFFSET / sizeof(uint32_t) + 1,
+ .readable_reg = imx6_pcie_readable_reg,
+ .writeable_reg = imx6_pcie_writeable_reg,
+ .cache_type = REGCACHE_NONE,
+};
+
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -371,53 +578,32 @@ static int imx6_pcie_attach_pd(struct device *dev)
return -EINVAL;
}
- return 0;
-}
-
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
+ case IMX8QM:
+ case IMX8QXP:
+ imx6_pcie->pd_hsio_gpio = dev_pm_domain_attach_by_name(dev, "hsio_gpio");
+ if (IS_ERR(imx6_pcie->pd_hsio_gpio))
+ return PTR_ERR(imx6_pcie->pd_hsio_gpio);
+
+ link = device_link_add(dev, imx6_pcie->pd_hsio_gpio,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to hsio_gpio pd.\n");
+ return -EINVAL;
+ }
+
break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ default:
break;
}
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
-
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
+ return 0;
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -457,6 +643,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX7D:
break;
case IMX8MQ:
+ case IMX8MM:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -475,6 +662,34 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
break;
+ case IMX8QXP:
+ case IMX8QM:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_axi clock\n");
+ break;
+ }
+ ret = clk_prepare_enable(imx6_pcie->pcie_per);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_per clock\n");
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->phy_per);
+ if (unlikely(ret)) {
+ clk_disable_unprepare(imx6_pcie->pcie_per);
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ dev_err(dev, "unable to enable phy per clock\n");
+ }
+ ret = clk_prepare_enable(imx6_pcie->misc_per);
+ if (unlikely(ret)) {
+ clk_disable_unprepare(imx6_pcie->phy_per);
+ clk_disable_unprepare(imx6_pcie->pcie_per);
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ dev_err(dev, "unable to enable misc per clock\n");
+ }
+ break;
}
return ret;
@@ -492,48 +707,226 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
PHY_PLL_LOCK_WAIT_TIMEOUT))
dev_err(dev, "PCIe PLL lock timeout\n");
}
-
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static void imx8_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
+ u32 val, retries = 0, tmp = 0, orig = 0;
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
- int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
- if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES;
+ retries++) {
+ tmp = readl(imx6_pcie->phy_base + PCIE_PHY_CMN_REG75);
+ if (tmp == PCIE_PHY_CMN_REG75_PLL_DONE)
+ break;
+ udelay(10);
+ }
+ break;
+
+ case IMX8QXP:
+ case IMX8QM:
+ for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES;
+ retries++) {
+ if (imx6_pcie->hsio_cfg == PCIEAX1PCIEBX1SATA) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET + 0x4,
+ &tmp);
+ if (imx6_pcie->controller_id == 0) /* pciea 1 lanes */
+ orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ else /* pcieb 1 lanes */
+ orig = IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2PCIEBX1) {
+ val = IMX8QM_CSR_PHYX2_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PHYX_STTS0_OFFSET,
+ &tmp);
+ orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ if (imx6_pcie->controller_id == 0) /* pciea 2 lanes */
+ orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2SATA) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET + 0x4,
+ &tmp);
+ orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+ orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK;
+ }
+ tmp &= orig;
+ if (tmp == orig)
+ break;
+ udelay(10);
}
+ break;
+
+ default:
+ break;
}
+ if (retries >= PHY_PLL_LOCK_WAIT_MAX_RETRIES)
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static void imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+{
+ int ret;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
- }
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
+ if (ret)
dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
- }
/* allow the clocks to stabilize */
usleep_range(200, 500);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MQ:
+ case IMX8MM:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ case IMX8QXP:
+ case IMX8QM:
+ clk_disable_unprepare(imx6_pcie->pcie_per);
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ clk_disable_unprepare(imx6_pcie->phy_per);
+ clk_disable_unprepare(imx6_pcie->misc_per);
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ int i;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MM:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ case IMX8QXP:
+ imx6_pcie_clk_enable(imx6_pcie);
+ val = IMX8QM_CSR_PCIEB_OFFSET;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_BUTTON_RST_N,
+ IMX8QM_CTRL_BUTTON_RST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PERST_N,
+ IMX8QM_CTRL_PERST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_POWER_UP_RST_N,
+ IMX8QM_CTRL_POWER_UP_RST_N);
+ break;
+ case IMX8QM:
+ imx6_pcie_clk_enable(imx6_pcie);
+ for (i = 0; i <= imx6_pcie->controller_id; i++) {
+ val = IMX8QM_CSR_PCIEA_OFFSET + i * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_BUTTON_RST_N,
+ IMX8QM_CTRL_BUTTON_RST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PERST_N,
+ IMX8QM_CTRL_PERST_N);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_POWER_UP_RST_N,
+ IMX8QM_CTRL_POWER_UP_RST_N);
+ }
+ break;
+ }
+
+ if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+ int ret = regulator_disable(imx6_pcie->vpcie);
+
+ if (ret)
+ dev_err(dev, "failed to disable vpcie regulator: %d\n",
+ ret);
+ }
+}
+
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ int ret, i;
+ u32 val, tmp;
+
+ if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8QXP:
+ case IMX8QM:
+ /* ClKs had been enabled */
+ break;
+ default:
+ imx6_pcie_clk_enable(imx6_pcie);
+ break;
+ }
/* Some boards don't have PCIe reset GPIO. */
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
@@ -545,8 +938,62 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
}
switch (imx6_pcie->drvdata->variant) {
+ case IMX8QXP:
+ case IMX8QM:
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */
+ for (i = 0; i < 100; i++) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_STTS0_OFFSET,
+ &tmp);
+ if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0)
+ break;
+ udelay(10);
+ }
+
+ if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0)
+ dev_err(dev, "ERROR PM_REQ_CORE_RST is still set.\n");
+
+ /* wait for phy pll lock firstly. */
+ imx8_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
case IMX8MQ:
+ case IMX8MM:
reset_control_deassert(imx6_pcie->pciephy_reset);
+
+ imx8_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ val = imx6_pcie_grp_offset(imx6_pcie);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, val,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+
+ if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_L1SS) {
+ /*
+ * Configure the CLK_REQ# high, let the L1SS
+ * automatically controlled by HW later.
+ */
+ reset_control_deassert(imx6_pcie->clkreq_reset);
+ /*
+ * Configure the L1 latency of rc to less than 64us
+ * Otherwise, the L1/L1SUB wouldn't be enable by ASPM.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = readl(pci->dbi_base + SZ_1M +
+ IMX8MQ_PCIE_LINK_CAP_REG_OFFSET);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= IMX8MQ_PCIE_LINK_CAP_L1EL_64US;
+ writel(val, pci->dbi_base + SZ_1M +
+ IMX8MQ_PCIE_LINK_CAP_REG_OFFSET);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -587,43 +1034,223 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
}
return;
-
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
}
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
unsigned int mask, val;
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
+ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
+ if (imx6_pcie->drvdata->variant == IMX8QM
+ || imx6_pcie->drvdata->variant == IMX8QXP) {
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val, IMX8QM_PCIE_TYPE_MASK,
+ PCI_EXP_TYPE_ENDPOINT << 24);
+ } else {
+ if (unlikely(imx6_pcie->controller_id))
+ /* iMX8MQ second PCIE */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE >> 4,
+ PCI_EXP_TYPE_ENDPOINT << 8);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ENDPOINT << 12);
+ }
} else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
+ if (imx6_pcie->drvdata->variant == IMX8QM ||
+ imx6_pcie->drvdata->variant == IMX8QXP) {
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val, IMX8QM_PCIE_TYPE_MASK,
+ PCI_EXP_TYPE_ROOT_PORT << 24);
+ } else if (imx6_pcie->drvdata->variant == IMX8MQ &&
+ imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ mask, val);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ mask, val);
+ }
}
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
}
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
+ unsigned int offset;
+
switch (imx6_pcie->drvdata->variant) {
+ case IMX8QXP:
+ case IMX8QM:
+ if (imx6_pcie->hsio_cfg == PCIEAX2SATA) {
+ /*
+ * bit 0 rx ena 1.
+ * bit12 PHY_X1_EPCS_SEL 1.
+ * bit13 phy_ab_select 0.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ IMX8QM_MISC_PHYX1_EPCS_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ 0);
+ } else if (imx6_pcie->hsio_cfg == PCIEAX1PCIEBX1SATA) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ IMX8QM_MISC_PHYX1_EPCS_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ IMX8QM_MISC_PCIE_AB_SELECT);
+ } else if (imx6_pcie->hsio_cfg == PCIEAX2PCIEBX1) {
+ /*
+ * bit 0 rx ena 1.
+ * bit12 PHY_X1_EPCS_SEL 0.
+ * bit13 phy_ab_select 1.
+ */
+ if (imx6_pcie->controller_id)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX1_OFFSET,
+ IMX8QM_PHY_APB_RSTN_0,
+ IMX8QM_PHY_APB_RSTN_0);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_PHYX2_OFFSET,
+ IMX8QM_PHYX2_CTRL0_APB_MASK,
+ IMX8QM_PHY_APB_RSTN_0
+ | IMX8QM_PHY_APB_RSTN_1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PHYX1_EPCS_SEL,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_PCIE_AB_SELECT,
+ IMX8QM_MISC_PCIE_AB_SELECT);
+ }
+
+ if (imx6_pcie->ext_osc) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_RXENA,
+ IMX8QM_MISC_IOB_RXENA);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_TXENA,
+ 0);
+ } else {
+ /* Try to used the internal pll as ref clk */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_RXENA,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_MISC_IOB_TXENA,
+ IMX8QM_MISC_IOB_TXENA);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX8QM_CSR_MISC_OFFSET,
+ IMX8QM_CSR_MISC_IOB_A_0_TXOE
+ | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK,
+ IMX8QM_CSR_MISC_IOB_A_0_TXOE
+ | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2);
+ }
+
+ break;
+ case IMX8MM:
+ offset = imx6_pcie_grp_offset(imx6_pcie);
+
+ dev_info(imx6_pcie->pci->dev, "%s REF_CLK is used!.\n",
+ imx6_pcie->ext_osc ? "EXT" : "PLL");
+ if (imx6_pcie->ext_osc) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_EXT);
+ udelay(100);
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ udelay(200);
+ } else {
+ /* Configure the internal PLL as REF clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ IMX8MM_GPR_PCIE_REF_CLK_PLL);
+ udelay(100);
+ /* Configure the PHY */
+ writel(PCIE_PHY_CMN_REG62_PLL_CLK_OUT,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG62);
+ writel(PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG64);
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ udelay(200);
+ }
+
+ /*
+ * In order to pass the compliance tests.
+ * Configure the TRSV regiser of iMX8MM PCIe PHY.
+ */
+ writel(PCIE_PHY_TRSV_REG5_GEN1_DEEMP,
+ imx6_pcie->phy_base + PCIE_PHY_TRSV_REG5);
+ writel(PCIE_PHY_TRSV_REG6_GEN2_DEEMP,
+ imx6_pcie->phy_base + PCIE_PHY_TRSV_REG6);
+
+ break;
case IMX8MQ:
/*
* TODO: Currently this code assumes external
@@ -740,6 +1367,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_ltssm_enable(struct device *dev)
{
+ u32 val;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
switch (imx6_pcie->drvdata->variant) {
@@ -752,8 +1380,19 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
+ case IMX8MM:
reset_control_deassert(imx6_pcie->apps_reset);
break;
+ case IMX8QXP:
+ case IMX8QM:
+ /* Bit4 of the CTRL2 */
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_LTSSM_ENABLE,
+ IMX8QM_CTRL_LTSSM_ENABLE);
+ break;
}
}
@@ -781,11 +1420,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
if (ret)
goto err_reset_phy;
- if (imx6_pcie->link_gen == 2) {
+ if (imx6_pcie->link_gen >= 2) {
/* Allow Gen2 mode after the link is up. */
tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ tmp |= imx6_pcie->link_gen;
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/*
@@ -833,23 +1472,52 @@ err_reset_phy:
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
+ if (!IS_ENABLED(CONFIG_PCI_IMX6_COMPLIANCE_TEST)) {
+ imx6_pcie_clk_disable(imx6_pcie);
+ if (imx6_pcie->vpcie != NULL)
+ regulator_disable(imx6_pcie->vpcie);
+ if (imx6_pcie->epdev_on != NULL)
+ regulator_disable(imx6_pcie->epdev_on);
+ }
+
return ret;
}
+static void pci_imx_set_msi_en(struct pcie_port *pp)
+{
+ u16 val;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (pci_msi_enabled()) {
+ val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+ PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+ val);
+ }
+}
+
static int imx6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ if (gpio_is_valid(imx6_pcie->dis_gpio))
+ gpio_set_value_cansleep(imx6_pcie->dis_gpio, 1);
+
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
imx6_setup_phy_mpll(imx6_pcie);
- dw_pcie_setup_rc(pp);
- imx6_pcie_establish_link(imx6_pcie);
+ if (!IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
+ dw_pcie_setup_rc(pp);
+ pci_imx_set_msi_en(pp);
+ if (imx6_pcie_establish_link(imx6_pcie))
+ return -ENODEV;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+ }
return 0;
}
@@ -885,13 +1553,285 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
return 0;
}
+static u64 imx6_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ struct pcie_port *pp = &pcie->pp;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pcie);
+
+ if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP)
+ return (cpu_addr + imx6_pcie->local_addr - pp->mem_base);
+ else
+ return cpu_addr;
+}
+
static const struct dw_pcie_ops dw_pcie_ops = {
/* No special ops needed, but pcie-designware still expects this struct */
+ .cpu_addr_fixup = imx6_pcie_cpu_addr_fixup,
};
+static ssize_t ep_bar0_addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie *pci = imx6_pcie->pci;
+
+ return sprintf(buf, "imx-pcie-bar0-addr-info start 0x%08x\n",
+ readl(pci->dbi_base + PCI_BASE_ADDRESS_0));
+}
+
+static ssize_t ep_bar0_addr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ u32 bar_start;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie *pci = imx6_pcie->pci;
+
+ if (sscanf(buf, "%x\n", &bar_start) != 1)
+ return -EINVAL;
+ writel(bar_start, pci->dbi_base + PCI_BASE_ADDRESS_0);
+
+ return count;
+}
+
+static void imx6_pcie_regions_setup(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8QM:
+ case IMX8QXP:
+ case IMX8MQ:
+ case IMX8MM:
+ /*
+ * RPMSG reserved 4Mbytes, but only used up to 2Mbytes.
+ * The left 2Mbytes can be used here.
+ */
+ if (ddr_test_region == 0)
+ dev_err(dev, "invalid ddr test region.\n");
+ break;
+ case IMX6SX:
+ case IMX7D:
+ ddr_test_region = 0xb0000000;
+ break;
+
+ case IMX6Q:
+ case IMX6QP:
+ ddr_test_region = 0x40000000;
+ break;
+ }
+ dev_info(dev, "ddr_test_region is 0x%08x.\n", ddr_test_region);
+
+ dw_pcie_prog_outbound_atu(pci, 0, 0, pp->mem_base,
+ ddr_test_region, test_region_size);
+}
+
+static DEVICE_ATTR_RW(ep_bar0_addr);
+
+static struct attribute *imx6_pcie_ep_attrs[] = {
+ &dev_attr_ep_bar0_addr.attr,
+ NULL
+};
+
+static struct attribute_group imx6_pcie_attrgroup = {
+ .attrs = imx6_pcie_ep_attrs,
+};
+
+static void imx6_pcie_setup_ep(struct dw_pcie *pci)
+{
+ int ret;
+ u32 val;
+ u32 lanes;
+ struct device_node *np = pci->dev->of_node;
+
+ ret = of_property_read_u32(np, "num-lanes", &lanes);
+ if (ret)
+ lanes = 0;
+
+ /* set the number of lanes */
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_MODE_MASK;
+ switch (lanes) {
+ case 1:
+ val |= PORT_LINK_MODE_1_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ /* set link width speed control register */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ switch (lanes) {
+ case 1:
+ val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ /* get iATU unroll support */
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ pci->iatu_unroll_enabled = 1;
+ dev_info(pci->dev, "iATU unroll: %s\n",
+ pci->iatu_unroll_enabled ? "enabled" : "disabled");
+
+ /* CMD reg:I/O space, MEM space, and Bus Master Enable */
+ writel(readl(pci->dbi_base + PCI_COMMAND)
+ | PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER,
+ pci->dbi_base + PCI_COMMAND);
+
+ /*
+ * configure the class_rev(emaluate one memory ram ep device),
+ * bar0 and bar1 of ep
+ */
+ writel(0xdeadbeaf, pci->dbi_base + PCI_VENDOR_ID);
+ writel((readl(pci->dbi_base + PCI_CLASS_REVISION) & 0xFFFF)
+ | (PCI_CLASS_MEMORY_RAM << 16),
+ pci->dbi_base + PCI_CLASS_REVISION);
+ writel(0xdeadbeaf, pci->dbi_base
+ + PCI_SUBSYSTEM_VENDOR_ID);
+
+ /* 32bit none-prefetchable 8M bytes memory on bar0 */
+ writel(0x0, pci->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(SZ_8M - 1, pci->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_0);
+
+ /* None used bar1 */
+ writel(0x0, pci->dbi_base + PCI_BASE_ADDRESS_1);
+ writel(0, pci->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_1);
+
+ /* 4K bytes IO on bar2 */
+ writel(0x1, pci->dbi_base + PCI_BASE_ADDRESS_2);
+ writel(SZ_4K - 1, pci->dbi_base + (1 << 12) +
+ PCI_BASE_ADDRESS_2);
+
+ /*
+ * 32bit prefetchable 1M bytes memory on bar3
+ * FIXME BAR MASK3 is not changeable, the size
+ * is fixed to 256 bytes.
+ */
+ writel(0x8, pci->dbi_base + PCI_BASE_ADDRESS_3);
+ writel(SZ_1M - 1, pci->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_3);
+
+ /*
+ * 64bit prefetchable 1M bytes memory on bar4-5.
+ * FIXME BAR4,5 are not enabled yet
+ */
+ writel(0xc, pci->dbi_base + PCI_BASE_ADDRESS_4);
+ writel(SZ_1M - 1, pci->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_4);
+ writel(0, pci->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_5);
+}
+
+static irqreturn_t imx6_pcie_dma_isr(int irq, void *param)
+{
+ u32 irqs, offset;
+ struct pcie_port *pp = (struct pcie_port *)param;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ offset = imx6_pcie->dma_unroll_offset;
+
+ /* check write isr */
+ irqs = readl(pci->dbi_base + offset + DMA_WRITE_INT_STS);
+ if (irqs & DMA_DONE_INT_STS) {
+ /* write 1 clear */
+ writel(irqs & DMA_DONE_INT_STS,
+ pci->dbi_base + offset + DMA_WRITE_INT_CLR);
+ dma_w_end = 1;
+ } else if (irqs & DMA_ABORT_INT_STS) {
+ pr_info("imx pcie dma write error 0x%0x.\n", irqs);
+ }
+ /* check read isr */
+ irqs = readl(pci->dbi_base + offset + DMA_READ_INT_STS);
+ if (irqs & DMA_DONE_INT_STS) {
+ /* write 1 clear */
+ writel(irqs & DMA_DONE_INT_STS,
+ pci->dbi_base + offset + DMA_READ_INT_CLR);
+ dma_r_end = 1;
+ } else if (irqs & DMA_ABORT_INT_STS) {
+ pr_info("imx pcie dma read error 0x%0x.", irqs);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * imx6_pcie_local_dma_start - Start one local iMX PCIE DMA.
+ * @pp: the port start the dma transmission.
+ * @dir: direction of the dma, 1 read, 0 write;
+ * @chl: the channel num of the iMX PCIE DMA(0 - 7).
+ * @src: source DMA address.
+ * @dst: destination DMA address.
+ * @len: transfer length.
+ */
+static int imx6_pcie_local_dma_start(struct pcie_port *pp, bool dir,
+ unsigned int chl, dma_addr_t src, dma_addr_t dst,
+ unsigned int len)
+{
+ u32 offset, doorbell, unroll_cal;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (pp == NULL)
+ return -EINVAL;
+ if (chl > MAX_PCIE_DMA_CHANNELS)
+ return -EINVAL;
+
+ offset = imx6_pcie->dma_unroll_offset;
+ /* enable dma engine, dir 1:read. 0:write. */
+ if (dir)
+ writel(DMA_READ_ENGINE_EN,
+ pci->dbi_base + offset
+ + DMA_READ_ENGINE_EN_OFF);
+ else
+ writel(DMA_WRITE_ENGINE_EN,
+ pci->dbi_base + offset
+ + DMA_WRITE_ENGINE_EN_OFF);
+ writel(0x0, pci->dbi_base + offset + DMA_WRITE_INT_MASK);
+ writel(0x0, pci->dbi_base + offset + DMA_READ_INT_MASK);
+ /* ch dir and ch num */
+ if (offset == 0) {
+ writel((dir << 31) | chl, pci->dbi_base + DMA_VIEWPOT_SEL_OFF);
+ writel(DMA_CHANNEL_CTRL_1_LIE,
+ pci->dbi_base + DMA_CHANNEL_CTRL_1);
+ writel(0x0, pci->dbi_base + DMA_CHANNEL_CTRL_2);
+ writel(len, pci->dbi_base + DMA_TRANSFER_SIZE);
+ writel((u32)src, pci->dbi_base + DMA_SAR_LOW);
+ writel(0x0, pci->dbi_base + DMA_SAR_HIGH);
+ writel((u32)dst, pci->dbi_base + DMA_DAR_LOW);
+ writel(0x0, pci->dbi_base + DMA_DAR_HIGH);
+ } else {
+ unroll_cal = DMA_UNROLL_CDM_OFFSET
+ + 0x200 * (chl + 1) + 0x100 * dir;
+ writel(DMA_CHANNEL_CTRL_1_LIE, pci->dbi_base + unroll_cal);
+ writel(0x0, pci->dbi_base + unroll_cal + 0x4);
+ writel(len, pci->dbi_base + unroll_cal + 0x8);
+ writel((u32)src, pci->dbi_base + unroll_cal + 0xc);
+ writel(0x0, pci->dbi_base + unroll_cal + 0x10);
+ writel((u32)dst, pci->dbi_base + unroll_cal + 0x14);
+ writel(0x0, pci->dbi_base + unroll_cal + 0x18);
+ }
+
+ doorbell = dir ? DMA_READ_DOORBELL : DMA_WRITE_DOORBELL;
+ writel(chl, pci->dbi_base + offset + doorbell);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static void imx6_pcie_ltssm_disable(struct device *dev)
{
+ u32 val;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
switch (imx6_pcie->drvdata->variant) {
@@ -901,8 +1841,22 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
IMX6Q_GPR12_PCIE_CTL_2, 0);
break;
case IMX7D:
+ case IMX8MQ:
+ case IMX8MM:
reset_control_assert(imx6_pcie->apps_reset);
break;
+ case IMX8QXP:
+ case IMX8QM:
+ /* Bit4 of the CTRL2 */
+ val = IMX8QM_CSR_PCIEA_OFFSET
+ + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_LTSSM_ENABLE, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ val + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_READY_ENTR_L23, 0);
+ break;
default:
dev_err(dev, "ltssm_disable not supported\n");
}
@@ -910,6 +1864,8 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
+ int i;
+ u32 dst, val;
struct device *dev = imx6_pcie->pci->dev;
/* Some variants have a turnoff reset in DT */
@@ -928,6 +1884,41 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ case IMX8QXP:
+ case IMX8QM:
+ dst = IMX8QM_CSR_PCIEA_OFFSET + imx6_pcie->controller_id * SZ_64K;
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PM_XMT_TURNOFF,
+ IMX8QM_CTRL_PM_XMT_TURNOFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_PM_XMT_TURNOFF,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_CTRL2_OFFSET,
+ IMX8QM_CTRL_READY_ENTR_L23,
+ IMX8QM_CTRL_READY_ENTR_L23);
+ /* check the L2 is entered or not. */
+ for (i = 0; i < 10000; i++) {
+ regmap_read(imx6_pcie->iomuxc_gpr,
+ dst + IMX8QM_CSR_PCIE_STTS0_OFFSET,
+ &val);
+ if (val & IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2)
+ break;
+ udelay(10);
+ }
+ if ((val & IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2) == 0)
+ dev_err(dev, "PCIE%d can't enter into L2.\n",
+ imx6_pcie->controller_id);
+ break;
default:
dev_err(dev, "PME_Turn_Off not implemented\n");
return;
@@ -944,39 +1935,15 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
-{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MQ:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
-
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_clk_disable(imx6_pcie);
imx6_pcie_ltssm_disable(dev);
+ imx6_pcie_clk_disable(imx6_pcie);
return 0;
}
@@ -994,6 +1961,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
+ pci_imx_set_msi_en(pp);
ret = imx6_pcie_establish_link(imx6_pcie);
if (ret < 0)
@@ -1014,10 +1982,11 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct imx6_pcie *imx6_pcie;
struct device_node *np;
- struct resource *dbi_base;
+ struct resource *dbi_base, *hsio_res;
struct device_node *node = dev->of_node;
+ void __iomem *iomem;
+ struct regmap_config regconfig = imx6_pcie_regconfig;
int ret;
- u16 val;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -1055,7 +2024,55 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ if (of_property_read_u32(node, "hsio-cfg", &imx6_pcie->hsio_cfg))
+ imx6_pcie->hsio_cfg = 0;
+ if (of_property_read_u32(node, "ext_osc", &imx6_pcie->ext_osc) < 0)
+ imx6_pcie->ext_osc = 0;
+
+ if (of_property_read_u32(node, "local-addr", &imx6_pcie->local_addr))
+ imx6_pcie->local_addr = 0;
+ if (of_property_read_u32(node, "hard-wired", &imx6_pcie->hard_wired))
+ imx6_pcie->hard_wired = 0;
+
+ np = of_parse_phandle(node, "reserved-region", 0);
+ if (np) {
+ struct resource res;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dev, "failed to get reserved region address\n");
+ of_node_put(np);
+ return -EINVAL;
+ }
+ ddr_test_region = res.start + SZ_2M;
+ of_node_put(np);
+ }
+
/* Fetch GPIOs */
+ imx6_pcie->clkreq_gpio = of_get_named_gpio(node, "clkreq-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->clkreq_gpio)) {
+ devm_gpio_request_one(&pdev->dev, imx6_pcie->clkreq_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe CLKREQ");
+ } else if (imx6_pcie->clkreq_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->clkreq_gpio;
+ }
+
+ imx6_pcie->dis_gpio = of_get_named_gpio(node, "disable-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->dis_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->dis_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe DIS");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable gpio\n");
+ return ret;
+ }
+ } else if (imx6_pcie->dis_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->dis_gpio;
+ }
+
+ imx6_pcie->epdev_on = devm_regulator_get(&pdev->dev,
+ "epdev_on");
+ if (IS_ERR(imx6_pcie->epdev_on))
+ return -EPROBE_DEFER;
+
imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
@@ -1102,6 +2119,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MQ:
+ case IMX8MM:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux)) {
dev_err(dev, "pcie_aux clock source missing or invalid\n");
@@ -1126,6 +2144,53 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->apps_reset);
}
break;
+ case IMX8QM:
+ case IMX8QXP:
+ if (dbi_base->start == IMX8_HSIO_PCIEB_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
+
+ imx6_pcie->pcie_per = devm_clk_get(dev, "pcie_per");
+ if (IS_ERR(imx6_pcie->pcie_per)) {
+ dev_err(dev, "pcie_per clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_per);
+ }
+
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+ dev_err(&pdev->dev,
+ "pcie clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+ }
+
+ imx6_pcie->phy_per = devm_clk_get(dev, "phy_per");
+ if (IS_ERR(imx6_pcie->phy_per)) {
+ dev_err(dev, "failed to get per clock.\n");
+ return PTR_ERR(imx6_pcie->phy_per);
+ }
+
+ imx6_pcie->misc_per = devm_clk_get(dev, "misc_per");
+ if (IS_ERR(imx6_pcie->misc_per)) {
+ dev_err(dev, "failed to get per clock.\n");
+ return PTR_ERR(imx6_pcie->misc_per);
+ }
+
+ hsio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "hsio");
+ if (hsio_res) {
+ iomem = devm_ioremap_resource(dev, hsio_res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ imx6_pcie->iomuxc_gpr =
+ devm_regmap_init_mmio(dev, iomem, &regconfig);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+ } else {
+ dev_err(dev, "missing *hsio* reg space\n");
+ }
+ break;
default:
break;
}
@@ -1137,12 +2202,20 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->turnoff_reset);
}
+ imx6_pcie->clkreq_reset = devm_reset_control_get_optional_exclusive(dev, "clkreq");
+ if (IS_ERR(imx6_pcie->clkreq_reset)) {
+ dev_err(dev, "Failed to get CLKREQ reset control\n");
+ return PTR_ERR(imx6_pcie->clkreq_reset);
+ }
+
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ if (imx6_pcie->iomuxc_gpr == NULL) {
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
}
/* Grab PCIe PHY Tx Settings */
@@ -1185,16 +2258,259 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx6_add_pcie_port(imx6_pcie, pdev);
- if (ret < 0)
- return ret;
+ ret = regulator_enable(imx6_pcie->epdev_on);
+ if (ret)
+ dev_err(dev, "failed to enable the epdev_on regulator\n");
+
+ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)
+ && (imx6_pcie->hard_wired == 0)) {
+ int i = 0, irq;
+ u32 val, tv_count1, tv_count2;
+ dma_addr_t test_reg1_dma, test_reg2_dma;
+ void *test_reg1, *test_reg2;
+ void __iomem *pcie_arb_base_addr;
+ struct timespec64 tv1s, tv1e, tv2s, tv2e;
+ struct resource_entry *win, *tmp;
+ LIST_HEAD(res);
+ struct pcie_port *pp = &pci->pp;
+ unsigned long timeout = jiffies + msecs_to_jiffies(300000);
+
+ /* add attributes for device */
+ imx6_pcie_attrgroup.attrs = imx6_pcie_ep_attrs;
+ ret = sysfs_create_group(&pdev->dev.kobj, &imx6_pcie_attrgroup);
+ if (ret)
+ return -EINVAL;
- if (pci_msi_enabled()) {
- val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
- PCI_MSI_FLAGS);
- val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
- val);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &pp->io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ if (ret) {
+ dev_err(dev, "missing ranges property\n");
+ pci_free_resource_list(&res);
+ return ret;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_MEM:
+ pp->mem = win->res;
+ pp->mem->name = "MEM";
+ pp->mem_size = resource_size(pp->mem);
+ pp->mem_bus_addr = pp->mem->start - win->offset;
+ break;
+ }
+ }
+
+ pp->mem_base = pp->mem->start;
+ pp->ops = &imx6_pcie_host_ops;
+ dev_info(dev, " try to initialize pcie ep.\n");
+ ret = imx6_pcie_host_init(pp);
+ if (ret) {
+ dev_info(dev, " fail to initialize pcie ep.\n");
+ return ret;
+ }
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ imx6_pcie_setup_ep(pci);
+ pci_imx_set_msi_en(pp);
+ platform_set_drvdata(pdev, imx6_pcie);
+ imx6_pcie_regions_setup(dev);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /*
+ * iMX6SX PCIe has the stand-alone power domain.
+ * refer to the initialization for iMX6SX PCIe,
+ * release the PCIe PHY reset here,
+ * before LTSSM enable is set.
+ */
+ if (imx6_pcie->drvdata->variant == IMX6SX)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ BIT(19), 0 << 19);
+
+ /* assert LTSSM enable */
+ imx6_pcie_ltssm_enable(dev);
+
+ dev_info(dev, "PCIe EP: waiting for link up...\n");
+ /* link is indicated by the bit4 of DB_R1 register */
+ do {
+ usleep_range(10, 20);
+ if (time_after(jiffies, timeout)) {
+ dev_info(dev, "PCIe EP: link down.\n");
+ return 0;
+ }
+ val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
+ } while ((val & 0x10) == 0);
+
+ /* self io test */
+ /* Check the DMA INT exist or not */
+ irq = platform_get_irq_byname(pdev, "dma");
+ if (irq > 0)
+ dma_en = 1;
+ else
+ dma_en = 0;
+ if (dma_en) {
+ /* configure the DMA INT ISR */
+ ret = request_irq(irq, imx6_pcie_dma_isr,
+ IRQF_SHARED, "imx-pcie-dma", pp);
+ if (ret) {
+ pr_err("register interrupt %d failed, rc %d\n",
+ irq, ret);
+ dma_en = 0;
+ }
+ test_reg1 = dma_alloc_coherent(dev, test_region_size,
+ &test_reg1_dma, GFP_KERNEL);
+ test_reg2 = dma_alloc_coherent(dev, test_region_size,
+ &test_reg2_dma, GFP_KERNEL);
+ if (!(test_reg1 && test_reg2))
+ dma_en = 0; /* Roll back to PIO. */
+ dma_r_end = dma_w_end = 0;
+
+ val = readl(pci->dbi_base + DMA_CTRL_VIEWPORT_OFF);
+ if (val == 0xffffffff)
+ imx6_pcie->dma_unroll_offset =
+ DMA_UNROLL_CDM_OFFSET - DMA_REG_OFFSET;
+ else
+ imx6_pcie->dma_unroll_offset = 0;
+ }
+
+ if (unlikely(dma_en == 0)) {
+ test_reg1 = devm_kzalloc(&pdev->dev,
+ test_region_size, GFP_KERNEL);
+ if (!test_reg1) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ test_reg2 = devm_kzalloc(&pdev->dev,
+ test_region_size, GFP_KERNEL);
+ if (!test_reg2) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ }
+
+ pcie_arb_base_addr = ioremap_nocache(pp->mem_base,
+ test_region_size);
+ if (!pcie_arb_base_addr) {
+ dev_err(dev, "ioremap error in ep io test\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ for (i = 0; i < test_region_size; i = i + 4) {
+ writel(0xE6600D00 + i, test_reg1 + i);
+ writel(0xDEADBEAF, test_reg2 + i);
+ }
+
+ /* PCIe EP start the data transfer after link up */
+ dev_info(dev, "pcie ep: Starting data transfer...\n");
+ ktime_get_real_ts64(&tv1s);
+
+
+ /* EP write the test region to remote RC's DDR memory */
+ if (dma_en) {
+ imx6_pcie_local_dma_start(pp, 0, 0, test_reg1_dma,
+ imx6_pcie_cpu_addr_fixup(imx6_pcie->pci, pp->mem_base),
+ test_region_size);
+ timeout = jiffies + msecs_to_jiffies(300);
+ do {
+ udelay(1);
+ if (time_after(jiffies, timeout)) {
+ dev_info(dev, "dma write no end ...\n");
+ break;
+ }
+ } while (!dma_w_end);
+ } else {
+ memcpy((unsigned int *)pcie_arb_base_addr,
+ (unsigned int *)test_reg1,
+ test_region_size);
+ }
+
+ ktime_get_real_ts64(&tv1e);
+
+ ktime_get_real_ts64(&tv2s);
+ /* EP read the test region back from remote RC's DDR memory */
+ if (dma_en) {
+ imx6_pcie_local_dma_start(pp, 1, 0,
+ imx6_pcie_cpu_addr_fixup(imx6_pcie->pci, pp->mem_base),
+ test_reg2_dma, test_region_size);
+ timeout = jiffies + msecs_to_jiffies(300);
+ do {
+ udelay(1);
+ if (time_after(jiffies, timeout)) {
+ dev_info(dev, "dma read no end\n");
+ break;
+ }
+ } while (!dma_r_end);
+ } else {
+ memcpy((unsigned int *)test_reg2,
+ (unsigned int *)pcie_arb_base_addr,
+ test_region_size);
+ }
+
+ ktime_get_real_ts64(&tv2e);
+ if (memcmp(test_reg2, test_reg1, test_region_size) == 0) {
+ tv_count1 = (tv1e.tv_sec - tv1s.tv_sec)
+ * USEC_PER_SEC
+ + (tv1e.tv_nsec - tv1s.tv_nsec) / 1000;
+ tv_count2 = (tv2e.tv_sec - tv2s.tv_sec)
+ * USEC_PER_SEC
+ + (tv2e.tv_nsec - tv2s.tv_nsec) / 1000;
+
+ dev_info(dev, "ep: Data %s transfer is successful.\n",
+ dma_en ? "DMA" : "PIO");
+ dev_info(dev, "ep: Data write %dus speed:%ldMB/s.\n",
+ tv_count1,
+ ((test_region_size/1024)
+ * MSEC_PER_SEC)
+ /(tv_count1));
+ dev_info(dev, "ep: Data read %dus speed:%ldMB/s.\n",
+ tv_count2,
+ ((test_region_size/1024)
+ * MSEC_PER_SEC)
+ /(tv_count2));
+ } else {
+ dev_info(dev, "ep: Data transfer is failed.\n");
+ } /* end of self io test. */
+ } else {
+ ret = imx6_add_pcie_port(imx6_pcie, pdev);
+ if (ret < 0) {
+ if (IS_ENABLED(CONFIG_PCI_IMX6_COMPLIANCE_TEST)) {
+ /* The PCIE clocks wouldn't be turned off */
+ dev_info(dev, "To do the compliance tests.\n");
+ ret = 0;
+ } else {
+ dev_err(dev, "unable to add pcie port.\n");
+ }
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)
+ && (imx6_pcie->hard_wired == 0))
+ imx6_pcie_regions_setup(&pdev->dev);
+
+ /*
+ * If the L1SS is enabled, disable the over ride after link up.
+ * Let the the CLK_REQ# controlled by HW L1SS automatically.
+ */
+ ret = imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_L1SS;
+ if (IS_ENABLED(CONFIG_PCIEASPM_POWER_SUPERSAVE) && (ret > 0)) {
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MM:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ 0);
+ break;
+ default:
+ break;
+ };
+ }
}
return 0;
@@ -1224,7 +2540,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX7D] = {
.variant = IMX7D,
@@ -1232,6 +2549,23 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_SUPPORTS_L1SS,
+ },
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_SUPPORTS_L1SS,
+ },
+ [IMX8QM] = {
+ .variant = IMX8QM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
+ },
+ [IMX8QXP] = {
+ .variant = IMX8QXP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_IMX6_CPU_ADDR_FIXUP,
},
};
@@ -1240,7 +2574,10 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
- { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8qm-pcie", .data = &drvdata[IMX8QM], },
+ { .compatible = "fsl,imx8qxp-pcie", .data = &drvdata[IMX8QXP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3a5fa26d5e56..f24f79a70d9a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -263,6 +263,7 @@ static const struct ls_pcie_drvdata ls2088_drvdata = {
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 0f36a926059a..a785a4f0c231 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -156,8 +156,8 @@ static void dw_pci_bottom_mask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
+ ~pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -175,8 +175,8 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
+ ~pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -653,18 +653,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
if (!pp->ops->msi_host_init) {
+ /* Program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ lower_32_bits((u64)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+ upper_32_bits((u64)pp->msi_data));
+
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
- }
+ 4, &pp->irq_mask[ctrl]);
}
/* Setup RC BARs */
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
new file mode 100644
index 000000000000..0696b6e98321
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Mobiveil PCIe Core Support"
+ depends on PCI
+
+config PCIE_MOBIVEIL
+ bool
+
+config PCIE_MOBIVEIL_HOST
+ bool
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL
+
+config PCIE_MOBIVEIL_EP
+ bool
+ depends on PCI_ENDPOINT
+ select PCIE_MOBIVEIL
+
+config PCIE_MOBIVEIL_PLAT
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
+config PCIE_LAYERSCAPE_GEN4
+ bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
+ depends on PCI
+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs. And the PCIe controller work in RC mode
+ by setting the RCW[HOST_AGT_PEX] to 0.
+
+config PCIE_LAYERSCAPE_GEN4_EP
+ bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
+ depends on PCI
+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on PCI_ENDPOINT
+ select PCIE_MOBIVEIL_EP
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs. And the PCIe controller work in EP mode
+ by setting the RCW[HOST_AGT_PEX] to 1.
+endmenu
diff --git a/drivers/pci/controller/mobiveil/Makefile b/drivers/pci/controller/mobiveil/Makefile
new file mode 100644
index 000000000000..6f548566dde8
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
+obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
+obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4_EP) += pcie-layerscape-gen4-ep.o
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4-ep.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4-ep.c
new file mode 100644
index 000000000000..78a3b250c23f
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4-ep.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright 2019 NXP
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-mobiveil.h"
+
+#define PCIE_LX2_BAR_NUM 4
+
+#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
+
+struct ls_pcie_g4_ep {
+ struct mobiveil_pcie *mv_pci;
+};
+
+static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
+ { .compatible = "fsl,lx2160a-pcie-ep",},
+ { },
+};
+
+static const struct pci_epc_features ls_pcie_g4_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = (1 << BAR_4) | (1 << BAR_5),
+};
+
+static const struct pci_epc_features*
+ls_pcie_g4_ep_get_features(struct mobiveil_pcie_ep *ep)
+{
+ return &ls_pcie_g4_epc_features;
+}
+
+static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
+{
+ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
+ int win_idx;
+ u8 bar;
+ u32 val;
+
+ /*
+ * Errata: unsupported request error on inbound posted write
+ * transaction, PCIe controller reports advisory error instead
+ * of uncorrectable error message to RC.
+ * workaround: set the bit20(unsupported_request_Error_severity) with
+ * value 1 in uncorrectable_Error_Severity_Register, make the
+ * unsupported request error generate the fatal error.
+ */
+ val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
+ val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
+ csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
+
+ ep->bar_num = PCIE_LX2_BAR_NUM;
+
+ for (bar = BAR_0; bar < ep->epc->max_functions * ep->bar_num; bar++)
+ mobiveil_pcie_ep_reset_bar(mv_pci, bar);
+
+ for (win_idx = 0; win_idx < ep->apio_wins; win_idx++)
+ mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
+}
+
+static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct mobiveil_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = ls_pcie_g4_ep_init,
+ .raise_irq = ls_pcie_g4_ep_raise_irq,
+ .get_features = ls_pcie_g4_ep_get_features,
+};
+
+static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_ep,
+ struct platform_device *pdev)
+{
+ struct mobiveil_pcie *mv_pci = ls_ep->mv_pci;
+ struct device *dev = &pdev->dev;
+ struct mobiveil_pcie_ep *ep;
+ struct resource *res;
+ int ret;
+
+ ep = &mv_pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = mobiveil_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize layerscape endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mobiveil_pcie *mv_pci;
+ struct ls_pcie_g4_ep *ls_ep;
+ struct resource *res;
+ int ret;
+
+ ls_ep = devm_kzalloc(dev, sizeof(*ls_ep), GFP_KERNEL);
+ if (!ls_ep)
+ return -ENOMEM;
+
+ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
+ if (!mv_pci)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(mv_pci->csr_axi_slave_base))
+ return PTR_ERR(mv_pci->csr_axi_slave_base);
+
+ mv_pci->pdev = pdev;
+ ls_ep->mv_pci = mv_pci;
+
+ platform_set_drvdata(pdev, ls_ep);
+
+ ret = ls_pcie_gen4_add_pcie_ep(ls_ep, pdev);
+
+ return ret;
+}
+
+static struct platform_driver ls_pcie_g4_ep_driver = {
+ .driver = {
+ .name = "layerscape-pcie-gen4-ep",
+ .of_match_table = ls_pcie_g4_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
new file mode 100644
index 000000000000..98c56d7e0a4b
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Gen4 host controller driver for NXP Layerscape SoCs
+ *
+ * Copyright 2019 NXP
+ *
+ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-mobiveil.h"
+
+#define REV_1_0 (0x10)
+
+/* LUT and PF control registers */
+#define PCIE_LUT_OFF 0x80000
+#define PCIE_LUT_GCR (0x28)
+#define PCIE_LUT_GCR_RRE (0)
+#define PCIE_PF_OFF 0xc0000
+#define PCIE_PF_INT_STAT 0x18
+#define PF_INT_STAT_PABRST BIT(31)
+
+#define PCIE_PF_DBG 0x7fc
+#define PF_DBG_LTSSM_MASK 0x3f
+#define PF_DBG_LTSSM_L0 0x2d /* L0 state */
+#define PF_DBG_WE BIT(31)
+#define PF_DBG_PABR BIT(27)
+
+#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
+
+struct ls_pcie_g4 {
+ struct mobiveil_pcie pci;
+ struct delayed_work dwork;
+ int irq;
+ u8 rev;
+};
+
+static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 header_type;
+
+ header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+static void workaround_A011451(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ /* Set ACK latency timeout */
+ val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
+ val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
+ val |= (4 << ACK_LAT_TO_VAL_SHIFT);
+ csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
+}
+
+static int ls_pcie_g4_host_init(struct mobiveil_pcie *pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+
+ pcie->rev = csr_readb(pci, PCI_REVISION_ID);
+
+ if (pcie->rev == REV_1_0)
+ workaround_A011451(pcie);
+
+ return 0;
+}
+
+static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ u32 state;
+
+ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ state = state & PF_DBG_LTSSM_MASK;
+
+ if (state == PF_DBG_LTSSM_L0)
+ return 1;
+
+ return 0;
+}
+
+static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+
+ csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ /* Clear the interrupt status */
+ csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
+
+ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
+ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
+ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ struct device *dev = &mv_pci->pdev->dev;
+ u32 val, act_stat;
+ int to = 100;
+
+ /* Poll for pab_csb_reset to set and PAB activity to clear */
+ do {
+ usleep_range(10, 15);
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
+ act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
+ } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
+ if (to < 0) {
+ dev_err(dev, "Poll PABRST&PABACT timeout\n");
+ return;
+ }
+
+ /* clear PEX_RESET bit in PEX_PF0_DBG register */
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_PABR;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val &= ~PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ mobiveil_host_init(mv_pci, true);
+
+ to = 100;
+ while (!ls_pcie_g4_link_up(mv_pci) && to--)
+ usleep_range(200, 250);
+ if (to < 0)
+ dev_err(dev, "PCIe link training timeout\n");
+}
+
+static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
+{
+ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PAB_INTP_RESET) {
+ ls_pcie_g4_disable_interrupt(pcie);
+ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
+ }
+
+ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
+ struct platform_device *pdev = mv_pci->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "intr");
+ if (pcie->irq < 0) {
+ dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq);
+ return pcie->irq;
+ }
+ ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ls_pcie_g4_reset(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u16 ctrl;
+
+ ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
+ ls_pcie_g4_reinit_hw(pcie);
+ ls_pcie_g4_enable_interrupt(pcie);
+}
+
+static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct mobiveil_pcie *pci = bus->sysdata;
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ int ret;
+
+ if (pcie->rev == REV_1_0)
+ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
+ 0 << PCIE_LUT_GCR_RRE);
+
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+
+ if (pcie->rev == REV_1_0)
+ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
+ 1 << PCIE_LUT_GCR_RRE);
+
+ return ret;
+}
+
+static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
+ .interrupt_init = ls_pcie_g4_interrupt_init,
+ .read_other_conf = ls_pcie_g4_read_other_conf,
+};
+
+static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
+ .link_up = ls_pcie_g4_link_up,
+ .host_init = ls_pcie_g4_host_init,
+};
+
+static int __init ls_pcie_g4_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct mobiveil_pcie *mv_pci;
+ struct ls_pcie_g4 *pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_parse_phandle(np, "msi-parent", 0)) {
+ dev_err(dev, "Failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ mv_pci = &pcie->pci;
+
+ mv_pci->pdev = pdev;
+ mv_pci->ops = &ls_pcie_g4_pab_ops;
+ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
+ mv_pci->bridge = bridge;
+
+ platform_set_drvdata(pdev, pcie);
+
+ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
+
+ ret = mobiveil_pcie_host_probe(mv_pci);
+ if (ret) {
+ dev_err(dev, "Fail to probe\n");
+ return ret;
+ }
+
+ if (!ls_pcie_g4_is_bridge(pcie))
+ return -ENODEV;
+
+ ls_pcie_g4_enable_interrupt(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id ls_pcie_g4_of_match[] = {
+ { .compatible = "fsl,lx2160a-pcie", },
+ { },
+};
+
+static struct platform_driver ls_pcie_g4_driver = {
+ .driver = {
+ .name = "layerscape-pcie-gen4",
+ .of_match_table = ls_pcie_g4_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c
new file mode 100644
index 000000000000..66c0e25bba08
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mobiveil PCIe Endpoint controller driver
+ *
+ * Copyright 2019 NXP
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/platform_device.h>
+#include "pcie-mobiveil.h"
+
+static void mobiveil_pcie_ep_func_select(struct mobiveil_pcie *pcie, u8 func_no)
+{
+ u32 func_num;
+
+ /*
+ * select to access the config space of func_no by setting func_no
+ * to FUNC_SEL_SHIFT bit of PAB_CTRL register.
+ */
+ func_num = csr_readl(pcie, PAB_CTRL);
+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
+ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
+ csr_writel(pcie, func_num, PAB_CTRL);
+}
+
+static void mobiveil_pcie_ep_func_deselect(struct mobiveil_pcie *pcie)
+{
+ u32 func_num;
+
+ /*
+ * clear the FUNC_SEL_SHIFT bits when access other registers except
+ * config space register.
+ */
+ func_num = csr_readl(pcie, PAB_CTRL);
+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
+ csr_writel(pcie, func_num, PAB_CTRL);
+}
+
+static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie, u8 bar)
+{
+ csr_writel(pcie, bar, GPEX_BAR_SELECT);
+ csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
+ csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
+}
+
+void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie, u8 bar)
+{
+ __mobiveil_pcie_ep_reset_bar(pcie, bar);
+}
+
+static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
+ u8 func_no, u8 cap_ptr, u8 cap)
+{
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = csr_readw(pcie, cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __mobiveil_pcie_ep_find_next_cap(pcie, func_no,
+ next_cap_ptr, cap);
+}
+
+static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie_ep *ep,
+ u8 func_no, u8 cap)
+{
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ u8 next_cap_ptr;
+ u16 reg;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ return __mobiveil_pcie_ep_find_next_cap(pcie, func_no,
+ next_cap_ptr, cap);
+}
+
+static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_header *hdr)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
+ csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
+ csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
+ csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
+ csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
+ PCI_CLASS_DEVICE);
+ csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
+ csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
+ csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
+ csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ return 0;
+}
+
+static void mobiveil_pcie_ep_inbound_win(struct mobiveil_pcie_ep *ep,
+ u8 func_no, enum pci_barno bar,
+ dma_addr_t cpu_addr)
+{
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
+}
+
+static int mobiveil_pcie_ep_outbound_win(struct mobiveil_pcie_ep *ep,
+ phys_addr_t phys_addr,
+ u64 pci_addr, u8 func_no,
+ size_t size)
+{
+ u32 free_win;
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ free_win = find_first_zero_bit(ep->apio_wins_map, ep->apio_wins);
+ if (free_win >= ep->apio_wins) {
+ dev_err(&pcie->pdev->dev, "No free outbound window\n");
+ return -EINVAL;
+ }
+
+ program_ob_windows_ep(pcie, func_no, free_win, phys_addr,
+ pci_addr, MEM_WINDOW_TYPE, size);
+
+ set_bit(free_win, ep->apio_wins_map);
+ ep->apio_addr[free_win] = phys_addr;
+
+ return 0;
+}
+
+static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+
+ if (bar < ep->bar_num) {
+ __mobiveil_pcie_ep_reset_bar(pcie, func_no * ep->bar_num + bar);
+
+ mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
+ }
+}
+
+static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+
+ if (bar < ep->bar_num) {
+ mobiveil_pcie_ep_inbound_win(ep, func_no, bar,
+ epf_bar->phys_addr);
+
+ csr_writel(pcie, func_no * ep->bar_num + bar,
+ GPEX_BAR_SELECT);
+ csr_writel(pcie, lower_32_bits(~(size - 1)),
+ GPEX_BAR_SIZE_LDW);
+ csr_writel(pcie, upper_32_bits(~(size - 1)),
+ GPEX_BAR_SIZE_UDW);
+ }
+
+ return 0;
+}
+
+static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
+ phys_addr_t addr,
+ u32 *atu_index)
+{
+ u32 index;
+
+ for (index = 0; index < ep->apio_wins; index++) {
+ if (ep->apio_addr[index] != addr)
+ continue;
+ *atu_index = index;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+ phys_addr_t addr)
+{
+ int ret;
+ u32 atu_index;
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
+ if (ret < 0)
+ return;
+
+ mobiveil_pcie_disable_ob_win(pcie, atu_index);
+ clear_bit(atu_index, ep->apio_wins_map);
+}
+
+static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
+ phys_addr_t addr,
+ u64 pci_addr, size_t size)
+{
+ int ret;
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ ret = mobiveil_pcie_ep_outbound_win(ep, addr, pci_addr, func_no, size);
+ if (ret) {
+ dev_err(&pcie->pdev->dev, "Failed to enable address\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ u32 val, reg;
+ u8 msi_cap;
+
+ msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ if (!msi_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = msi_cap + PCI_MSI_FLAGS;
+ val = csr_readw(pcie, reg);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return val;
+}
+
+static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
+ u8 func_no, u8 interrupts)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ u32 val, reg;
+ u8 msi_cap;
+
+ msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ if (!msi_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = msi_cap + PCI_MSI_FLAGS;
+ val = csr_readw(pcie, reg);
+ val &= ~PCI_MSI_FLAGS_QMASK;
+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ csr_writew(pcie, val, reg);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ return 0;
+}
+
+static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ u32 val, reg;
+ u8 msix_cap;
+
+ msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+ if (!msix_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = msix_cap + PCI_MSIX_FLAGS;
+ val = csr_readw(pcie, reg);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
+ return val;
+}
+
+static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
+ u16 interrupts)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ u32 val, reg;
+ u8 msix_cap;
+
+ msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+ if (!msix_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = msix_cap + PCI_MSIX_FLAGS;
+ val = csr_readw(pcie, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
+ csr_writew(pcie, val, reg);
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ return 0;
+}
+
+static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+}
+
+static const struct pci_epc_features*
+mobiveil_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ return ep->ops->get_features(ep);
+}
+
+static const struct pci_epc_ops epc_ops = {
+ .write_header = mobiveil_pcie_ep_write_header,
+ .set_bar = mobiveil_pcie_ep_set_bar,
+ .clear_bar = mobiveil_pcie_ep_clear_bar,
+ .map_addr = mobiveil_pcie_ep_map_addr,
+ .unmap_addr = mobiveil_pcie_ep_unmap_addr,
+ .set_msi = mobiveil_pcie_ep_set_msi,
+ .get_msi = mobiveil_pcie_ep_get_msi,
+ .set_msix = mobiveil_pcie_ep_set_msix,
+ .get_msix = mobiveil_pcie_ep_get_msix,
+ .raise_irq = mobiveil_pcie_ep_raise_irq,
+ .get_features = mobiveil_pcie_ep_get_features,
+};
+
+int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
+{
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+
+ dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
+
+ return -EINVAL;
+}
+
+int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num)
+{
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+ u16 msg_ctrl, msg_data;
+ u32 msg_addr_lower, msg_addr_upper, reg;
+ u64 msg_addr;
+ bool has_upper;
+ int ret;
+ u8 msi_cap;
+
+ msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ if (!msi_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_select(pcie, func_no);
+
+ reg = msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = csr_readw(pcie, reg);
+ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
+ reg = msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = csr_readl(pcie, reg);
+ if (has_upper) {
+ reg = msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = csr_readl(pcie, reg);
+ reg = msi_cap + PCI_MSI_DATA_64;
+ msg_data = csr_readw(pcie, reg);
+ } else {
+ msg_addr_upper = 0;
+ reg = msi_cap + PCI_MSI_DATA_32;
+ msg_data = csr_readw(pcie, reg);
+ }
+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
+ msg_addr, epc->mem->page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
+
+ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+ return 0;
+}
+
+int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+ u32 msg_addr_upper, msg_addr_lower;
+ u32 msg_data;
+ u64 msg_addr;
+ u8 msix_cap;
+ int ret;
+
+ msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+ if (!msix_cap)
+ return -EINVAL;
+
+ mobiveil_pcie_ep_func_deselect(pcie);
+
+ msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
+ PCI_MSIX_ENTRY_LOWER_ADDR +
+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
+ msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
+ PCI_MSIX_ENTRY_UPPER_ADDR +
+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
+ PCI_MSIX_ENTRY_DATA +
+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
+
+ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
+ msg_addr, epc->mem->page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data, ep->msi_mem);
+
+ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+ return 0;
+}
+
+void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->page_size);
+
+ pci_epc_mem_exit(epc);
+}
+
+int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep)
+{
+ int ret;
+ void *addr;
+ struct pci_epc *epc;
+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!pcie->csr_axi_slave_base) {
+ dev_err(dev, "csr_base is not populated\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "apio-wins", &ep->apio_wins);
+ if (ret < 0) {
+ dev_err(dev, "Unable to read apio-wins property\n");
+ return ret;
+ }
+
+ if (ep->apio_wins > MAX_IATU_OUT) {
+ dev_err(dev, "Invalid apio-wins\n");
+ return -EINVAL;
+ }
+ ep->apio_wins_map = devm_kcalloc(dev,
+ BITS_TO_LONGS(ep->apio_wins),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ep->apio_wins_map)
+ return -ENOMEM;
+
+ addr = devm_kcalloc(dev, ep->apio_wins, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ ep->apio_addr = addr;
+
+ mobiveil_pcie_enable_bridge_pio(pcie);
+ mobiveil_pcie_enable_engine_apio(pcie);
+ mobiveil_pcie_enable_engine_ppio(pcie);
+ mobiveil_pcie_enable_msi_ep(pcie);
+
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "Failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize address space\n");
+ return ret;
+ }
+
+ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
+ epc->mem->page_size);
+ if (!ep->msi_mem) {
+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a45a6447b01d..bde28fa6cafc 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -3,10 +3,12 @@
* PCIe host controller driver for Mobiveil PCIe Host controller
*
* Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
* Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -23,274 +25,21 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "../pci.h"
-
-/* register offsets and bit positions */
-
-/*
- * translation tables are grouped into windows, each window registers are
- * grouped into blocks of 4 or 16 registers each
- */
-#define PAB_REG_BLOCK_SIZE 16
-#define PAB_EXT_REG_BLOCK_SIZE 4
-
-#define PAB_REG_ADDR(offset, win) \
- (offset + (win * PAB_REG_BLOCK_SIZE))
-#define PAB_EXT_REG_ADDR(offset, win) \
- (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-
-#define LTSSM_STATUS 0x0404
-#define LTSSM_STATUS_L0_MASK 0x3f
-#define LTSSM_STATUS_L0 0x2d
-
-#define PAB_CTRL 0x0808
-#define AMBA_PIO_ENABLE_SHIFT 0
-#define PEX_PIO_ENABLE_SHIFT 1
-#define PAGE_SEL_SHIFT 13
-#define PAGE_SEL_MASK 0x3f
-#define PAGE_LO_MASK 0x3ff
-#define PAGE_SEL_OFFSET_SHIFT 10
-
-#define PAB_AXI_PIO_CTRL 0x0840
-#define APIO_EN_MASK 0xf
-
-#define PAB_PEX_PIO_CTRL 0x08c0
-#define PIO_ENABLE_SHIFT 0
-
-#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
-#define PAB_INTP_INTX_MASK 0x01e0
-#define PAB_INTP_MSI_MASK 0x8
-
-#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-#define WIN_ENABLE_SHIFT 0
-#define WIN_TYPE_SHIFT 1
-#define WIN_TYPE_MASK 0x3
-#define WIN_SIZE_MASK 0xfffffc00
-
-#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
-
-#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
-#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
-#define AXI_WINDOW_ALIGN_MASK 3
-
-#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-#define PAB_BUS_SHIFT 24
-#define PAB_DEVICE_SHIFT 19
-#define PAB_FUNCTION_SHIFT 16
-
-#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
-#define PAB_INTP_AXI_PIO_CLASS 0x474
-
-#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-#define AMAP_CTRL_EN_SHIFT 0
-#define AMAP_CTRL_TYPE_SHIFT 1
-#define AMAP_CTRL_TYPE_MASK 3
-
-#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
-#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
-#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
-#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
-#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
-
-/* starting offset of INTX bits in status register */
-#define PAB_INTX_START 5
-
-/* supported number of MSI interrupts */
-#define PCI_NUM_MSI 16
-
-/* MSI registers */
-#define MSI_BASE_LO_OFFSET 0x04
-#define MSI_BASE_HI_OFFSET 0x08
-#define MSI_SIZE_OFFSET 0x0c
-#define MSI_ENABLE_OFFSET 0x14
-#define MSI_STATUS_OFFSET 0x18
-#define MSI_DATA_OFFSET 0x20
-#define MSI_ADDR_L_OFFSET 0x24
-#define MSI_ADDR_H_OFFSET 0x28
-
-/* outbound and inbound window definitions */
-#define WIN_NUM_0 0
-#define WIN_NUM_1 1
-#define CFG_WINDOW_TYPE 0
-#define IO_WINDOW_TYPE 1
-#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-#define MAX_PIO_WINDOWS 8
-
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_MIN 90000
-#define LINK_WAIT_MAX 100000
-
-#define PAGED_ADDR_BNDRY 0xc00
-#define OFFSET_TO_PAGE_ADDR(off) \
- ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
-#define OFFSET_TO_PAGE_IDX(off) \
- ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
-
-struct mobiveil_msi { /* MSI information */
- struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- phys_addr_t msi_pages_phys;
- int num_of_vectors;
- DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
-};
-
-struct mobiveil_pcie {
- struct platform_device *pdev;
- struct list_head resources;
- void __iomem *config_axi_slave_base; /* endpoint config base */
- void __iomem *csr_axi_slave_base; /* root port config base */
- void __iomem *apb_csr_base; /* MSI register base */
- phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
- struct irq_domain *intx_domain;
- raw_spinlock_t intx_mask_lock;
- int irq;
- int apio_wins;
- int ppio_wins;
- int ob_wins_configured; /* configured outbound windows */
- int ib_wins_configured; /* configured inbound windows */
- struct resource *ob_io_res;
- char root_bus_nr;
- struct mobiveil_msi msi;
-};
-
-/*
- * mobiveil_pcie_sel_page - routine to access paged register
- *
- * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
- * for this scheme to work extracted higher 6 bits of the offset will be
- * written to pg_sel field of PAB_CTRL register and rest of the lower 10
- * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
- */
-static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
-{
- u32 val;
-
- val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
- val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
- val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
-
- writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
-}
-
-static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
-{
- if (off < PAGED_ADDR_BNDRY) {
- /* For directly accessed registers, clear the pg_sel field */
- mobiveil_pcie_sel_page(pcie, 0);
- return pcie->csr_axi_slave_base + off;
- }
-
- mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
- return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
-}
-
-static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
-{
- if ((uintptr_t)addr & (size - 1)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- switch (size) {
- case 4:
- *val = readl(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- case 1:
- *val = readb(addr);
- break;
- default:
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
-{
- if ((uintptr_t)addr & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- switch (size) {
- case 4:
- writel(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- case 1:
- writeb(val, addr);
- break;
- default:
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
-{
- void *addr;
- u32 val;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_read(addr, size, &val);
- if (ret)
- dev_err(&pcie->pdev->dev, "read CSR address failed\n");
-
- return val;
-}
-
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
-{
- void *addr;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_write(addr, size, val);
- if (ret)
- dev_err(&pcie->pdev->dev, "write CSR address failed\n");
-}
-
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
-{
- return csr_read(pcie, off, 0x4);
-}
-
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
-{
- csr_write(pcie, val, off, 0x4);
-}
-
-static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
-{
- return (csr_readl(pcie, LTSSM_STATUS) &
- LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
-}
+#include "pcie-mobiveil.h"
static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
struct mobiveil_pcie *pcie = bus->sysdata;
/* Only one device down on each root port */
- if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
return false;
/*
* Do not read more than one device on the bus directly
* attached to RC
*/
- if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
+ if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
return false;
return true;
@@ -310,7 +59,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
return NULL;
/* RC config access */
- if (bus->number == pcie->root_bus_nr)
+ if (bus->number == pcie->rp.root_bus_nr)
return pcie->csr_axi_slave_base + where;
/*
@@ -325,12 +74,23 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
- return pcie->config_axi_slave_base + where;
+ return pcie->rp.config_axi_slave_base + where;
}
+static int mobiveil_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+ struct root_port *rp = &pcie->rp;
+
+ if (bus->number > rp->root_bus_nr && rp->ops->read_other_conf)
+ return rp->ops->read_other_conf(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
static struct pci_ops mobiveil_pcie_ops = {
.map_bus = mobiveil_pcie_map_bus,
- .read = pci_generic_config_read,
+ .read = mobiveil_pcie_config_read,
.write = pci_generic_config_write,
};
@@ -339,7 +99,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
struct device *dev = &pcie->pdev->dev;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
@@ -364,7 +124,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->intx_domain,
+ virq = irq_find_mapping(pcie->rp.intx_domain,
bit + 1);
if (virq)
generic_handle_irq(virq);
@@ -427,10 +187,10 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"config_axi_slave");
- pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->config_axi_slave_base))
- return PTR_ERR(pcie->config_axi_slave_base);
- pcie->ob_io_res = res;
+ pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->rp.config_axi_slave_base))
+ return PTR_ERR(pcie->rp.config_axi_slave_base);
+ pcie->rp.ob_io_res = res;
/* map csr resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -440,12 +200,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
return PTR_ERR(pcie->csr_axi_slave_base);
pcie->pcie_reg_base = res->start;
- /* map MSI config resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
- pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->apb_csr_base))
- return PTR_ERR(pcie->apb_csr_base);
-
/* read the number of windows requested */
if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
pcie->apio_wins = MAX_PIO_WINDOWS;
@@ -453,116 +207,15 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
pcie->ppio_wins = MAX_PIO_WINDOWS;
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq <= 0) {
- dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
- return -ENODEV;
- }
-
return 0;
}
-static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->ppio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max inbound windows reached !\n");
- return;
- }
-
- value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
- value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
-
- csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
-
- csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
-
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
-
- pcie->ib_wins_configured++;
-}
-
-/*
- * routine to program the outbound windows
- */
-static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->apio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max outbound windows reached !\n");
- return;
- }
-
- /*
- * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
- * to 4 KB in PAB_AXI_AMAP_CTRL register
- */
- value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
- value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
-
- csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
-
- /*
- * program AXI window base with appropriate value in
- * PAB_AXI_AMAP_AXI_WIN0 register
- */
- csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
-
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
-
- pcie->ob_wins_configured++;
-}
-
-static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
-{
- int retries;
-
- /* check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (mobiveil_pcie_link_up(pcie))
- return 0;
-
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- }
-
- dev_err(&pcie->pdev->dev, "link never came up\n");
-
- return -ETIMEDOUT;
-}
-
static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
{
phys_addr_t msg_addr = pcie->pcie_reg_base;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->num_of_vectors = PCI_NUM_MSI;
msi->msi_pages_phys = (phys_addr_t)msg_addr;
writel_relaxed(lower_32_bits(msg_addr),
@@ -573,16 +226,21 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
}
-static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
{
u32 value, pab_ctrl, type;
struct resource_entry *win;
- /* setup bus numbers */
- value = csr_readl(pcie, PCI_PRIMARY_BUS);
- value &= 0xff000000;
- value |= 0x00ff0100;
- csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ pcie->ib_wins_configured = 0;
+ pcie->ob_wins_configured = 0;
+
+ if (!reinit) {
+ /* setup bus numbers */
+ value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ }
/*
* program Bus Master Enable Bit in Command Register in PAB Config
@@ -600,9 +258,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
csr_writel(pcie, pab_ctrl, PAB_CTRL);
- csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
-
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
@@ -624,20 +279,24 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
*/
/* config outbound translation window */
- program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
- CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
+ program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
/* memory inbound translation window */
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &pcie->resources) {
- if (resource_type(win->res) == IORESOURCE_MEM)
+ resource_list_for_each_entry(win, pcie->resources) {
+ if (resource_type(win->res) == IORESOURCE_MEM) {
type = MEM_WINDOW_TYPE;
- else if (resource_type(win->res) == IORESOURCE_IO)
+ } else if (resource_type(win->res) == IORESOURCE_IO) {
type = IO_WINDOW_TYPE;
- else
+ } else if (resource_type(win->res) == IORESOURCE_BUS) {
+ pcie->rp.root_bus_nr = win->res->start;
+ continue;
+ } else {
continue;
+ }
/* configure outbound translation window */
program_ob_windows(pcie, pcie->ob_wins_configured,
@@ -652,8 +311,9 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
value |= (PCI_CLASS_BRIDGE_PCI << 16);
csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
- /* setup MSI hardware registers */
- mobiveil_pcie_enable_msi(pcie);
+ /* Platform specific host init */
+ if (pcie->ops->host_init)
+ return pcie->ops->host_init(pcie);
return 0;
}
@@ -667,11 +327,11 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
}
static void mobiveil_unmask_intx_irq(struct irq_data *data)
@@ -683,11 +343,11 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
}
static struct irq_chip intx_irq_chip = {
@@ -755,7 +415,7 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
unsigned int nr_irqs, void *args)
{
struct mobiveil_pcie *pcie = domain->host_data;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
unsigned long bit;
WARN_ON(nr_irqs != 1);
@@ -782,7 +442,7 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_lock(&msi->lock);
@@ -803,9 +463,9 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- mutex_init(&pcie->msi.lock);
+ mutex_init(&msi->lock);
msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, pcie);
if (!msi->dev_domain) {
@@ -832,15 +492,15 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
int ret;
/* setup INTx */
- pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
- if (!pcie->intx_domain) {
+ if (!pcie->rp.intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
}
- raw_spin_lock_init(&pcie->intx_mask_lock);
+ raw_spin_lock_init(&pcie->rp.intx_mask_lock);
/* setup MSI */
ret = mobiveil_allocate_msi_domains(pcie);
@@ -850,24 +510,56 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
return 0;
}
-static int mobiveil_pcie_probe(struct platform_device *pdev)
+static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
{
- struct mobiveil_pcie *pcie;
- struct pci_bus *bus;
- struct pci_bus *child;
- struct pci_host_bridge *bridge;
- struct device *dev = &pdev->dev;
- resource_size_t iobase;
+ struct device *dev = &pcie->pdev->dev;
+ struct resource *res;
int ret;
- /* allocate the PCIe port */
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
- if (!bridge)
- return -ENOMEM;
+ if (pcie->rp.ops->interrupt_init)
+ return pcie->rp.ops->interrupt_init(pcie);
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
+ "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
+ if (pcie->rp.irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
+ return -ENODEV;
+ }
- pcie = pci_host_bridge_priv(bridge);
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
- pcie->pdev = pdev;
+ irq_set_chained_handler_and_data(pcie->rp.irq,
+ mobiveil_pcie_isr, pcie);
+
+ /* Enable interrupts */
+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+ return 0;
+}
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
+{
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge = pcie->bridge;
+ struct device *dev = &pcie->pdev->dev;
+ resource_size_t iobase;
+ int ret;
ret = mobiveil_pcie_parse_dt(pcie);
if (ret) {
@@ -875,44 +567,40 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
/* parse the host bridge base addresses from the device tree file */
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
+ &bridge->windows, &iobase);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
return ret;
}
+ pcie->resources = &bridge->windows;
+
/*
* configure all inbound and outbound windows and prepare the RC for
* config access
*/
- ret = mobiveil_host_init(pcie);
+ ret = mobiveil_host_init(pcie, false);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
- goto error;
+ return ret;
}
- /* initialize the IRQ domains */
- ret = mobiveil_pcie_init_irq_domain(pcie);
+ ret = mobiveil_pcie_interrupt_init(pcie);
if (ret) {
- dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ dev_err(dev, "Interrupt init failed\n");
+ return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
-
- ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+ ret = devm_request_pci_bus_resources(dev, pcie->resources);
if (ret)
- goto error;
+ return ret;
/* Initialize bridge */
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
- bridge->busnr = pcie->root_bus_nr;
+ bridge->busnr = pcie->rp.root_bus_nr;
bridge->ops = &mobiveil_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
@@ -920,13 +608,13 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_bringup_link(pcie);
if (ret) {
dev_info(dev, "link bring-up failed\n");
- goto error;
+ return ret;
}
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -936,29 +624,4 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-error:
- pci_free_resource_list(&pcie->resources);
- return ret;
}
-
-static const struct of_device_id mobiveil_pcie_of_match[] = {
- {.compatible = "mbvl,gpex40-pcie",},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
-
-static struct platform_driver mobiveil_pcie_driver = {
- .probe = mobiveil_pcie_probe,
- .driver = {
- .name = "mobiveil-pcie",
- .of_match_table = mobiveil_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mobiveil_pcie_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
-MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
new file mode 100644
index 000000000000..9c62fc58530c
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pcie-mobiveil.h"
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+
+ pcie->pdev = pdev;
+
+ return mobiveil_pcie_host_probe(pcie);
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
new file mode 100644
index 000000000000..6d47164e5eeb
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-mobiveil.h"
+
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+{
+ void *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ if (pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return (csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ib_wins_configured++;
+}
+
+/*
+ * routine to program the outbound windows
+ */
+void __program_ob_windows(struct mobiveil_pcie *pcie, u8 func_no, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+
+ csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ csr_writel(pcie, func_no, PAB_AXI_AMAP_PCI_HDR_PARAM(win_num));
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+}
+
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size)
+{
+ if (win_num >= pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ __program_ob_windows(pcie, 0, win_num, cpu_addr,
+ pci_addr, type, size);
+
+ pcie->ob_wins_configured++;
+}
+
+void program_ob_windows_ep(struct mobiveil_pcie *pcie, u8 func_no, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ if (size & (size - 1))
+ size = 1 << (1 + ilog2(size));
+
+ __program_ob_windows(pcie, func_no, win_num, cpu_addr,
+ pci_addr, type, size);
+}
+
+void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
+ int bar, u64 phys)
+{
+ csr_writel(pcie, upper_32_bits(phys),
+ PAB_EXT_PEX_BAR_AMAP(func_no, bar));
+ csr_writel(pcie, lower_32_bits(phys) | PEX_BAR_AMAP_EN,
+ PAB_PEX_BAR_AMAP(func_no, bar));
+}
+
+void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pcie,
+ u8 func_no, u8 bar)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_PEX_BAR_AMAP(func_no, bar));
+ val &= ~(1 << 0);
+ csr_writel(pcie, val, PAB_PEX_BAR_AMAP(func_no, bar));
+}
+
+void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ val &= ~(1 << WIN_ENABLE_SHIFT);
+ csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
+}
+
+void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pcie)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_CTRL);
+ val |= 1 << AMBA_PIO_ENABLE_SHIFT;
+ val |= 1 << PEX_PIO_ENABLE_SHIFT;
+ csr_writel(pcie, val, PAB_CTRL);
+}
+
+void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pcie)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ val |= APIO_EN_MASK;
+ csr_writel(pcie, val, PAB_AXI_PIO_CTRL);
+}
+
+void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pcie)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ val |= 1 << PIO_ENABLE_SHIFT;
+ csr_writel(pcie, val, PAB_PEX_PIO_CTRL);
+}
+
+void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pcie)
+{
+ u32 val;
+
+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ val |= PAB_INTP_PAMR;
+ csr_writel(pcie, val, PAB_INTP_AMBA_MISC_ENB);
+}
+
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
new file mode 100644
index 000000000000..a40707e33d43
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#ifndef _PCIE_MOBIVEIL_H
+#define _PCIE_MOBIVEIL_H
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#include "../../pci.h"
+
+#define MAX_IATU_OUT 256
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
+#define FUNC_SEL_SHIFT 19
+#define FUNC_SEL_MASK 0x1ff
+#define MSI_SW_CTRL_EN BIT(29)
+
+#define PAB_ACTIVITY_STAT 0x81c
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_PAMR BIT(0)
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_RESET BIT(1)
+#define PAB_INTP_MSI BIT(3)
+#define PAB_INTP_INTA BIT(5)
+#define PAB_INTP_INTB BIT(6)
+#define PAB_INTP_INTC BIT(7)
+#define PAB_INTP_INTD BIT(8)
+#define PAB_INTP_PCIE_UE BIT(9)
+#define PAB_INTP_IE_PMREDI BIT(29)
+#define PAB_INTP_IE_EC BIT(30)
+#define PAB_INTP_MSI_MASK PAB_INTP_MSI
+#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
+ PAB_INTP_INTC | PAB_INTP_INTD)
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
+
+#define PAB_AXI_AMAP_PCI_HDR_PARAM(win) PAB_EXT_REG_ADDR(0x5ba0, win)
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define GPEX_ACK_REPLAY_TO 0x438
+#define ACK_LAT_TO_VAL_MASK 0x1fff
+#define ACK_LAT_TO_VAL_SHIFT 0
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* PPIO WINs EP mode */
+#define PAB_PEX_BAR_AMAP(func, bar) (0x1ba0 + 0x20 * func + 4 * bar)
+#define PAB_EXT_PEX_BAR_AMAP(func, bar) (0x84a0 + 0x20 * func + 4 * bar)
+#define PEX_BAR_AMAP_EN BIT(0)
+
+#define PAB_MSIX_TABLE_PBA_ACCESS 0xD000
+
+#define GPEX_BAR_ENABLE 0x4D4
+#define GPEX_BAR_SIZE_LDW 0x4D8
+#define GPEX_BAR_SIZE_UDW 0x4DC
+#define GPEX_BAR_SELECT 0x4E0
+
+#define CFG_UNCORRECTABLE_ERROR_SEVERITY 0x10c
+#define UNSUPPORTED_REQUEST_ERROR_SHIFT 20
+#define CFG_UNCORRECTABLE_ERROR_MASK 0x108
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
+
+struct mobiveil_pcie;
+struct mobiveil_pcie_ep;
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_rp_ops {
+ int (*interrupt_init)(struct mobiveil_pcie *pcie);
+ int (*read_other_conf)(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+};
+
+struct root_port {
+ u8 root_bus_nr;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ struct resource *ob_io_res;
+ struct mobiveil_rp_ops *ops;
+ int irq;
+ raw_spinlock_t intx_mask_lock;
+ struct irq_domain *intx_domain;
+ struct mobiveil_msi msi;
+};
+
+struct mobiveil_pab_ops {
+ int (*link_up)(struct mobiveil_pcie *pcie);
+ int (*host_init)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_pcie_ep_ops {
+ void (*ep_init)(struct mobiveil_pcie_ep *ep);
+ int (*raise_irq)(struct mobiveil_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num);
+ const struct pci_epc_features* (*get_features)
+ (struct mobiveil_pcie_ep *ep);
+};
+
+struct mobiveil_pcie_ep {
+ struct pci_epc *epc;
+ const struct mobiveil_pcie_ep_ops *ops;
+ phys_addr_t phys_base;
+ size_t addr_size;
+ size_t page_size;
+ phys_addr_t *apio_addr;
+ unsigned long *apio_wins_map;
+ u32 apio_wins;
+ void __iomem *msi_mem;
+ phys_addr_t msi_mem_phys;
+ u8 bar_num;
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ struct list_head *resources;
+ void __iomem *csr_axi_slave_base; /* PAB registers base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ u32 apio_wins;
+ u32 ppio_wins;
+ u32 ob_wins_configured; /* configured outbound windows */
+ u32 ib_wins_configured; /* configured inbound windows */
+ const struct mobiveil_pab_ops *ops;
+ struct root_port rp;
+ struct pci_host_bridge *bridge;
+ struct mobiveil_pcie_ep ep;
+};
+
+#define to_mobiveil_pcie_from_ep(endpoint) \
+ container_of((endpoint), struct mobiveil_pcie, ep)
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
+void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return csr_read(pcie, off, 0x4);
+}
+
+static inline u32 csr_readw(struct mobiveil_pcie *pcie, u32 off)
+{
+ return csr_read(pcie, off, 0x2);
+}
+
+static inline u32 csr_readb(struct mobiveil_pcie *pcie, u32 off)
+{
+ return csr_read(pcie, off, 0x1);
+}
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+ csr_write(pcie, val, off, 0x4);
+}
+
+static inline void csr_writew(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+ csr_write(pcie, val, off, 0x2);
+}
+
+static inline void csr_writeb(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+ csr_write(pcie, val, off, 0x1);
+}
+
+void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
+ int bar, u64 phys);
+void program_ob_windows_ep(struct mobiveil_pcie *pcie, u8 func_num, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size);
+void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pci,
+ u8 func_no, u8 bar);
+void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num);
+int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep);
+int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no);
+int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num);
+int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pci, u8 bar);
+u8 mobiveil_pcie_ep_get_bar_num(struct mobiveil_pcie_ep *ep, u8 func_no);
+void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pci);
+void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pci);
+void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pci);
+void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pci);
+#endif /* _PCIE_MOBIVEIL_H */
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..ec3461213c2e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -37,6 +37,20 @@ static void release_pcie_device(struct device *dev)
kfree(to_pcie_device(dev));
}
+/**
+ * pcibios_check_service_irqs - check irqs in the device tree
+ * @dev: PCI Express port to handle
+ * @irqs: Array of irqs to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 means no service irqs in the device tree
+ *
+ */
+int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+{
+ return 0;
+}
+
/*
* Fill in *pme, *aer, *dpc with the relevant Interrupt Message Numbers if
* services are enabled in "mask". Return the number of MSI/MSI-X vectors
@@ -165,10 +179,25 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
+ int irq = -1;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = -1;
+ /* Check if some platforms owns independent irq pins for AER/PME etc.
+ * Some platforms may own independent AER/PME interrupts and set
+ * them in the device tree file.
+ */
+ ret = pcibios_check_service_irqs(dev, irqs, mask);
+ if (ret) {
+ if (dev->irq)
+ irq = dev->irq;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ if (irqs[i] == -1)
+ irqs[i] = irq;
+ return 0;
+ }
+
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 320255e5e8f8..901310dea8d7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1355,6 +1355,10 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
occur when mode detecting */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+/* Quirk the CYW4356 WIFI chip because the firmware still doesn't support
+ D3 mode */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_BROADCOM, 0x43ec,
+ PCI_CLASS_NETWORK_OTHER, 8, quirk_no_ata_d3);
/*
* This was originally an Alpha-specific thing, but it really fits here.
@@ -2448,6 +2452,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x43ec, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x43ef, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL_EXT, 0x2b42, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL_EXT, 0x2b43, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL_EXT, 0x2b44, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
diff --git a/include/dt-bindings/soc/imx8_hsio.h b/include/dt-bindings/soc/imx8_hsio.h
new file mode 100644
index 000000000000..3cf1056b63d7
--- /dev/null
+++ b/include/dt-bindings/soc/imx8_hsio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_IMX8_HSIO_H
+#define __DT_BINDINGS_IMX8_HSIO_H
+
+/*
+ * imx8qm hsio has pciea, pcieb and sata modules, and hsio
+ * can be configured to the following different work modes.
+ * 1 - pciea 2 lanes and one sata ahci port.
+ * 2 - pciea 1 lane, pcieb 1 lane and one sata ahci port.
+ * 3 - pciea 2 lanes, pcieb 1 lane.
+ * Choose one mode, refer to the exact hardware board design.
+ */
+#define PCIEAX2SATA 1
+#define PCIEAX1PCIEBX1SATA 2
+#define PCIEAX2PCIEBX1 3
+
+#endif /* __DT_BINDINGS_IMX8_HSIO_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f9088c89a534..07becf3e50fa 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2021,6 +2021,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) {}
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;