#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+/*
+ * The default values of the RC's reserved ddr memory
+ * used to verify EP mode.
+ * BTW, here is the layout of the 1G ddr on SD boards
+ * 0x1000_0000 ~ 0x4FFF_FFFF
+ */
+static u32 rc_ddr_test_region = 0x40000000;
+static u32 test_region_size = SZ_2M;
+
struct imx6_pcie {
int reset_gpio;
int power_on_gpio;
goto err_pcie_ref;
}
- ret = clk_prepare_enable(imx6_pcie->lvds_gate);
- if (ret) {
- dev_err(pp->dev, "unable to enable lvds_gate\n");
- goto err_lvds_gate;
+ if (!IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)
+ && !IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)) {
+ ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable lvds_gate\n");
+ goto err_lvds_gate;
+ }
}
ret = clk_prepare_enable(imx6_pcie->pcie_axi);
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS))
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ENDPOINT << 12);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT << 12);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
return 0;
}
+static void imx_pcie_regions_setup(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct pcie_port *pp = &imx6_pcie->pp;
+
+ /*
+ * region0 outbound used to access RC's reserved ddr memory
+ */
+ writel(0, pp->dbi_base + PCIE_ATU_VIEWPORT);
+ writel(0x01000000, pp->dbi_base + PCIE_ATU_LOWER_BASE);
+ writel(0, pp->dbi_base + PCIE_ATU_UPPER_BASE);
+ writel(0x01000000 + test_region_size,
+ pp->dbi_base + PCIE_ATU_LIMIT);
+
+ writel(rc_ddr_test_region,
+ pp->dbi_base + PCIE_ATU_LOWER_TARGET);
+ writel(0, pp->dbi_base + PCIE_ATU_UPPER_TARGET);
+ writel(PCIE_ATU_TYPE_MEM, pp->dbi_base + PCIE_ATU_CR1);
+ writel(PCIE_ATU_ENABLE, pp->dbi_base + PCIE_ATU_CR2);
+}
+
+static ssize_t imx_pcie_rc_memw_info(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "imx-pcie-rc-memw-info start 0x%08x, size 0x%08x\n",
+ rc_ddr_test_region, test_region_size);
+}
+
+static ssize_t
+imx_pcie_rc_memw_start(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 memw_start;
+
+ sscanf(buf, "%x\n", &memw_start);
+
+ if (memw_start < 0x10000000) {
+ dev_err(dev, "Invalid memory start address.\n");
+ dev_info(dev, "For example: echo 0x41000000 > /sys/...");
+ return -1;
+ }
+
+ if (rc_ddr_test_region != memw_start) {
+ rc_ddr_test_region = memw_start;
+ /* Re-setup the iATU */
+ imx_pcie_regions_setup(dev);
+ }
+
+ return count;
+}
+
+static ssize_t
+imx_pcie_rc_memw_size(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 memw_size;
+
+ sscanf(buf, "%x\n", &memw_size);
+
+ if ((memw_size > (SZ_16M - SZ_16K)) || (memw_size < SZ_64K)) {
+ dev_err(dev, "Invalid, should be [SZ_64K,SZ_16M - SZ_16KB].\n");
+ dev_info(dev, "For example: echo 0x800000 > /sys/...");
+ return -1;
+ }
+
+ if (test_region_size != memw_size) {
+ test_region_size = memw_size;
+ /* Re-setup the iATU */
+ imx_pcie_regions_setup(dev);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(rc_memw_info, S_IRUGO, imx_pcie_rc_memw_info, NULL);
+static DEVICE_ATTR(rc_memw_start_set, S_IWUGO, NULL, imx_pcie_rc_memw_start);
+static DEVICE_ATTR(rc_memw_size_set, S_IWUGO, NULL, imx_pcie_rc_memw_size);
+
+static struct attribute *imx_pcie_attrs[] = {
+ /*
+ * The start address, and the limitation (64KB ~ (16MB - 16KB))
+ * of the ddr mem window reserved by RC, and used for EP to access.
+ * BTW, these attrs are only configured at EP side.
+ */
+ &dev_attr_rc_memw_info.attr,
+ &dev_attr_rc_memw_start_set.attr,
+ &dev_attr_rc_memw_size_set.attr,
+ NULL
+};
+
+static struct attribute_group imx_pcie_attrgroup = {
+ .attrs = imx_pcie_attrs,
+};
+
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
- int ret;
+ int ret, i;
+ void *test_reg1, *test_reg2;
+ void __iomem *pcie_arb_base_addr;
+ struct timeval tv1, tv2, tv3;
+ u32 tv_count1, tv_count2;
imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
+ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
+ /* add attributes for device */
+ ret = sysfs_create_group(&pdev->dev.kobj, &imx_pcie_attrgroup);
+ if (ret)
+ return -EINVAL;
+ }
+
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
goto err;
}
- ret = imx6_add_pcie_port(pp, pdev);
- if (ret < 0)
- goto err;
+ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
+ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
+ /* Prepare the test regions and data */
+ test_reg1 = devm_kzalloc(&pdev->dev,
+ test_region_size, GFP_KERNEL);
+ if (!test_reg1) {
+ pr_err("pcie ep: can't alloc the test reg1.\n");
+ ret = PTR_ERR(test_reg1);
+ goto err;
+ }
+
+ test_reg2 = devm_kzalloc(&pdev->dev,
+ test_region_size, GFP_KERNEL);
+ if (!test_reg2) {
+ pr_err("pcie ep: can't alloc the test reg2.\n");
+ ret = PTR_ERR(test_reg1);
+ goto err;
+ }
+
+ pcie_arb_base_addr = ioremap_cached(0x01000000,
+ test_region_size);
+
+ if (!pcie_arb_base_addr) {
+ pr_err("error with ioremap in ep selftest\n");
+ ret = PTR_ERR(pcie_arb_base_addr);
+ goto err;
+ }
+
+ for (i = 0; i < test_region_size; i = i + 4) {
+ writel(0xE6600D00 + i, test_reg1 + i);
+ writel(0xDEADBEAF, test_reg2 + i);
+ }
+ }
+
+ imx6_pcie_init_phy(pp);
+
+ imx6_pcie_deassert_core_reset(pp);
+
+ /* assert LTSSM enable */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+
+ dev_info(&pdev->dev, "PCIe EP: waiting for link up...\n");
+
+ platform_set_drvdata(pdev, imx6_pcie);
+ /* link is indicated by the bit4 of DB_R1 register */
+ do {
+ usleep_range(10, 20);
+ } while ((readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & 0x10) == 0);
+
+ /* CMD reg:I/O space, MEM space, and Bus Master Enable */
+ writel(readl(pp->dbi_base + PCI_COMMAND)
+ | PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER,
+ pp->dbi_base + PCI_COMMAND);
+
+ /*
+ * configure the class_rev(emaluate one memory ram ep device),
+ * bar0 and bar1 of ep
+ */
+ writel(0xdeadbeaf, pp->dbi_base + PCI_VENDOR_ID);
+ writel(readl(pp->dbi_base + PCI_CLASS_REVISION)
+ | (PCI_CLASS_MEMORY_RAM << 16),
+ pp->dbi_base + PCI_CLASS_REVISION);
+ writel(0xdeadbeaf, pp->dbi_base
+ + PCI_SUBSYSTEM_VENDOR_ID);
+
+ /* 32bit none-prefetchable 8M bytes memory on bar0 */
+ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(SZ_8M - 1, pp->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_0);
+
+ /* None used bar1 */
+ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_1);
+ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_1);
+
+ /* 4K bytes IO on bar2 */
+ writel(0x1, pp->dbi_base + PCI_BASE_ADDRESS_2);
+ writel(SZ_4K - 1, pp->dbi_base + (1 << 12) +
+ PCI_BASE_ADDRESS_2);
+
+ /*
+ * 32bit prefetchable 1M bytes memory on bar3
+ * FIXME BAR MASK3 is not changable, the size
+ * is fixed to 256 bytes.
+ */
+ writel(0x8, pp->dbi_base + PCI_BASE_ADDRESS_3);
+ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_3);
+
+ /*
+ * 64bit prefetchable 1M bytes memory on bar4-5.
+ * FIXME BAR4,5 are not enabled yet
+ */
+ writel(0xc, pp->dbi_base + PCI_BASE_ADDRESS_4);
+ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
+ + PCI_BASE_ADDRESS_4);
+ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_5);
+
+ /* Re-setup the iATU */
+ imx_pcie_regions_setup(&pdev->dev);
+
+ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
+ /* PCIe EP start the data transfer after link up */
+ pr_info("pcie ep: Starting data transfer...\n");
+ do_gettimeofday(&tv1);
+
+ memcpy((unsigned long *)pcie_arb_base_addr,
+ (unsigned long *)test_reg1,
+ test_region_size);
+
+ do_gettimeofday(&tv2);
+
+ memcpy((unsigned long *)test_reg2,
+ (unsigned long *)pcie_arb_base_addr,
+ test_region_size);
+
+ do_gettimeofday(&tv3);
+
+ if (memcmp(test_reg2, test_reg1, test_region_size) == 0) {
+ tv_count1 = (tv2.tv_sec - tv1.tv_sec)
+ * USEC_PER_SEC
+ + tv2.tv_usec - tv1.tv_usec;
+ tv_count2 = (tv3.tv_sec - tv2.tv_sec)
+ * USEC_PER_SEC
+ + tv3.tv_usec - tv2.tv_usec;
+
+ pr_info("pcie ep: Data transfer is successful."
+ " tv_count1 %dus,"
+ " tv_count2 %dus.\n",
+ tv_count1, tv_count2);
+ pr_info("pcie ep: Data write speed:%ldMB/s.\n",
+ ((test_region_size/1024)
+ * MSEC_PER_SEC)
+ /(tv_count1));
+ pr_info("pcie ep: Data read speed:%ldMB/s.\n",
+ ((test_region_size/1024)
+ * MSEC_PER_SEC)
+ /(tv_count2));
+ } else {
+ pr_info("pcie ep: Data transfer is failed.\n");
+ }
+ }
+ } else {
+ ret = imx6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto err;
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx6_pcie);
+ }
return 0;
err: