2 * Synopsys Designware PCIe host controller driver
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
7 * Author: Jingoo Han <jg1.han@samsung.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/irqdomain.h>
15 #include <linux/of_address.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci_regs.h>
18 #include <linux/platform_device.h>
20 #include "pcie-designware.h"
22 static struct pci_ops dw_pcie_ops;
24 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
29 if (pp->ops->rd_own_conf)
30 return pp->ops->rd_own_conf(pp, where, size, val);
32 pci = to_dw_pcie_from_pp(pp);
33 return dw_pcie_read(pci->dbi_base + where, size, val);
36 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
41 if (pp->ops->wr_own_conf)
42 return pp->ops->wr_own_conf(pp, where, size, val);
44 pci = to_dw_pcie_from_pp(pp);
45 return dw_pcie_write(pci->dbi_base + where, size, val);
48 static struct irq_chip dw_msi_irq_chip = {
50 .irq_enable = pci_msi_unmask_irq,
51 .irq_disable = pci_msi_mask_irq,
52 .irq_mask = pci_msi_mask_irq,
53 .irq_unmask = pci_msi_unmask_irq,
57 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
61 irqreturn_t ret = IRQ_NONE;
63 for (i = 0; i < MAX_MSI_CTRLS; i++) {
64 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
71 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
72 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
73 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
75 generic_handle_irq(irq);
83 void dw_pcie_msi_init(struct pcie_port *pp)
87 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
88 msi_target = virt_to_phys((void *)pp->msi_data);
90 /* program the msi_data */
91 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
92 (u32)(msi_target & 0xffffffff));
93 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
94 (u32)(msi_target >> 32 & 0xffffffff));
97 static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
99 unsigned int res, bit, val;
101 res = (irq / 32) * 12;
103 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
105 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
108 static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
109 unsigned int nvec, unsigned int pos)
113 for (i = 0; i < nvec; i++) {
114 irq_set_msi_desc_off(irq_base, i, NULL);
115 /* Disable corresponding interrupt on MSI controller */
116 if (pp->ops->msi_clear_irq)
117 pp->ops->msi_clear_irq(pp, pos + i);
119 dw_pcie_msi_clear_irq(pp, pos + i);
122 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
125 static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
127 unsigned int res, bit, val;
129 res = (irq / 32) * 12;
131 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
133 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
136 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
139 struct pcie_port *pp;
141 pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
142 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
143 order_base_2(no_irqs));
147 irq = irq_find_mapping(pp->irq_domain, pos0);
152 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
153 * descs so there is no need to allocate descs here. We can therefore
154 * assume that if irq_find_mapping above returns non-zero, then the
155 * descs are also successfully allocated.
158 for (i = 0; i < no_irqs; i++) {
159 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
160 clear_irq_range(pp, irq, i, pos0);
163 /*Enable corresponding interrupt in MSI interrupt controller */
164 if (pp->ops->msi_set_irq)
165 pp->ops->msi_set_irq(pp, pos0 + i);
167 dw_pcie_msi_set_irq(pp, pos0 + i);
171 desc->nvec_used = no_irqs;
172 desc->msi_attrib.multiple = order_base_2(no_irqs);
181 static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
186 if (pp->ops->get_msi_addr)
187 msi_target = pp->ops->get_msi_addr(pp);
189 msi_target = virt_to_phys((void *)pp->msi_data);
191 msg.address_lo = (u32)(msi_target & 0xffffffff);
192 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
194 if (pp->ops->get_msi_data)
195 msg.data = pp->ops->get_msi_data(pp, pos);
199 pci_write_msi_msg(irq, &msg);
202 static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
203 struct msi_desc *desc)
206 struct pcie_port *pp = pdev->bus->sysdata;
208 if (desc->msi_attrib.is_msix)
211 irq = assign_irq(1, desc, &pos);
215 dw_msi_setup_msg(pp, irq, pos);
220 static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
223 #ifdef CONFIG_PCI_MSI
225 struct msi_desc *desc;
226 struct pcie_port *pp = pdev->bus->sysdata;
228 /* MSI-X interrupts are not supported */
229 if (type == PCI_CAP_ID_MSIX)
232 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
233 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
235 irq = assign_irq(nvec, desc, &pos);
239 dw_msi_setup_msg(pp, irq, pos);
247 static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
249 struct irq_data *data = irq_get_irq_data(irq);
250 struct msi_desc *msi = irq_data_get_msi_desc(data);
251 struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
253 clear_irq_range(pp, irq, 1, data->hwirq);
256 static struct msi_controller dw_pcie_msi_chip = {
257 .setup_irq = dw_msi_setup_irq,
258 .setup_irqs = dw_msi_setup_irqs,
259 .teardown_irq = dw_msi_teardown_irq,
262 static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
263 irq_hw_number_t hwirq)
265 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
266 irq_set_chip_data(irq, domain->host_data);
271 static const struct irq_domain_ops msi_domain_ops = {
272 .map = dw_pcie_msi_map,
275 int dw_pcie_host_init(struct pcie_port *pp)
277 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
278 struct device *dev = pci->dev;
279 struct device_node *np = dev->of_node;
280 struct platform_device *pdev = to_platform_device(dev);
281 struct pci_bus *bus, *child;
282 struct resource *cfg_res;
285 struct resource_entry *win, *tmp;
287 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
289 pp->cfg0_size = resource_size(cfg_res) / 2;
290 pp->cfg1_size = resource_size(cfg_res) / 2;
291 pp->cfg0_base = cfg_res->start;
292 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
293 } else if (!pp->va_cfg0_base) {
294 dev_err(dev, "missing *config* reg space\n");
297 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
301 ret = devm_request_pci_bus_resources(dev, &res);
305 /* Get the I/O and memory ranges from DT */
306 resource_list_for_each_entry_safe(win, tmp, &res) {
307 switch (resource_type(win->res)) {
309 ret = pci_remap_iospace(win->res, pp->io_base);
311 dev_warn(dev, "error %d: failed to map resource %pR\n",
313 resource_list_destroy_entry(win);
316 pp->io->name = "I/O";
317 pp->io_size = resource_size(pp->io);
318 pp->io_bus_addr = pp->io->start - win->offset;
323 pp->mem->name = "MEM";
324 pp->mem_size = resource_size(pp->mem);
325 pp->mem_bus_addr = pp->mem->start - win->offset;
329 pp->cfg0_size = resource_size(pp->cfg) / 2;
330 pp->cfg1_size = resource_size(pp->cfg) / 2;
331 pp->cfg0_base = pp->cfg->start;
332 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
340 if (!pci->dbi_base) {
341 pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
342 resource_size(pp->cfg));
343 if (!pci->dbi_base) {
344 dev_err(dev, "error with ioremap\n");
350 pp->mem_base = pp->mem->start;
352 if (!pp->va_cfg0_base) {
353 pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
355 if (!pp->va_cfg0_base) {
356 dev_err(dev, "error with ioremap in function\n");
362 if (!pp->va_cfg1_base) {
363 pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
365 if (!pp->va_cfg1_base) {
366 dev_err(dev, "error with ioremap\n");
372 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
374 pci->num_viewport = 2;
376 if (IS_ENABLED(CONFIG_PCI_MSI)) {
377 if (!pp->ops->msi_host_init) {
378 pp->irq_domain = irq_domain_add_linear(dev->of_node,
379 MAX_MSI_IRQS, &msi_domain_ops,
381 if (!pp->irq_domain) {
382 dev_err(dev, "irq domain init failed\n");
387 for (i = 0; i < MAX_MSI_IRQS; i++)
388 irq_create_mapping(pp->irq_domain, i);
390 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
396 if (pp->ops->host_init)
397 pp->ops->host_init(pp);
399 pp->root_bus_nr = pp->busn->start;
400 if (IS_ENABLED(CONFIG_PCI_MSI)) {
401 bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
402 &dw_pcie_ops, pp, &res,
404 dw_pcie_msi_chip.dev = dev;
406 bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
413 if (pp->ops->scan_bus)
414 pp->ops->scan_bus(pp);
417 /* support old dtbs that incorrectly describe IRQs */
418 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
421 pci_bus_size_bridges(bus);
422 pci_bus_assign_resources(bus);
424 list_for_each_entry(child, &bus->children, node)
425 pcie_bus_configure_settings(child);
427 pci_bus_add_devices(bus);
431 pci_free_resource_list(&res);
435 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
436 u32 devfn, int where, int size, u32 *val)
439 u32 busdev, cfg_size;
441 void __iomem *va_cfg_base;
442 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
444 if (pp->ops->rd_other_conf)
445 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
447 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
448 PCIE_ATU_FUNC(PCI_FUNC(devfn));
450 if (bus->parent->number == pp->root_bus_nr) {
451 type = PCIE_ATU_TYPE_CFG0;
452 cpu_addr = pp->cfg0_base;
453 cfg_size = pp->cfg0_size;
454 va_cfg_base = pp->va_cfg0_base;
456 type = PCIE_ATU_TYPE_CFG1;
457 cpu_addr = pp->cfg1_base;
458 cfg_size = pp->cfg1_size;
459 va_cfg_base = pp->va_cfg1_base;
462 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
465 ret = dw_pcie_read(va_cfg_base + where, size, val);
466 if (pci->num_viewport <= 2)
467 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
468 PCIE_ATU_TYPE_IO, pp->io_base,
469 pp->io_bus_addr, pp->io_size);
474 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
475 u32 devfn, int where, int size, u32 val)
478 u32 busdev, cfg_size;
480 void __iomem *va_cfg_base;
481 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
483 if (pp->ops->wr_other_conf)
484 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
486 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
487 PCIE_ATU_FUNC(PCI_FUNC(devfn));
489 if (bus->parent->number == pp->root_bus_nr) {
490 type = PCIE_ATU_TYPE_CFG0;
491 cpu_addr = pp->cfg0_base;
492 cfg_size = pp->cfg0_size;
493 va_cfg_base = pp->va_cfg0_base;
495 type = PCIE_ATU_TYPE_CFG1;
496 cpu_addr = pp->cfg1_base;
497 cfg_size = pp->cfg1_size;
498 va_cfg_base = pp->va_cfg1_base;
501 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
504 ret = dw_pcie_write(va_cfg_base + where, size, val);
505 if (pci->num_viewport <= 2)
506 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
507 PCIE_ATU_TYPE_IO, pp->io_base,
508 pp->io_bus_addr, pp->io_size);
513 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
516 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
518 /* If there is no link, then there is no device */
519 if (bus->number != pp->root_bus_nr) {
520 if (!dw_pcie_link_up(pci))
524 /* access only one slot on each root port */
525 if (bus->number == pp->root_bus_nr && dev > 0)
531 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
534 struct pcie_port *pp = bus->sysdata;
536 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
538 return PCIBIOS_DEVICE_NOT_FOUND;
541 if (bus->number == pp->root_bus_nr)
542 return dw_pcie_rd_own_conf(pp, where, size, val);
544 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
547 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
548 int where, int size, u32 val)
550 struct pcie_port *pp = bus->sysdata;
552 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
553 return PCIBIOS_DEVICE_NOT_FOUND;
555 if (bus->number == pp->root_bus_nr)
556 return dw_pcie_wr_own_conf(pp, where, size, val);
558 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
561 static struct pci_ops dw_pcie_ops = {
562 .read = dw_pcie_rd_conf,
563 .write = dw_pcie_wr_conf,
566 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
570 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
571 if (val == 0xffffffff)
577 void dw_pcie_setup_rc(struct pcie_port *pp)
580 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
585 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
586 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
588 /* setup interrupt pins */
589 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
592 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
594 /* setup bus numbers */
595 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
598 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
600 /* setup command register */
601 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
603 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
604 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
605 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
608 * If the platform provides ->rd_other_conf, it means the platform
609 * uses its own address translation component rather than ATU, so
610 * we should not program the ATU here.
612 if (!pp->ops->rd_other_conf) {
613 /* get iATU unroll support */
614 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
615 dev_dbg(pci->dev, "iATU unroll: %s\n",
616 pci->iatu_unroll_enabled ? "enabled" : "disabled");
618 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
619 PCIE_ATU_TYPE_MEM, pp->mem_base,
620 pp->mem_bus_addr, pp->mem_size);
621 if (pci->num_viewport > 2)
622 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
623 PCIE_ATU_TYPE_IO, pp->io_base,
624 pp->io_bus_addr, pp->io_size);
627 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
629 /* program correct class for RC */
630 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
632 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
633 val |= PORT_LOGIC_SPEED_CHANGE;
634 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);