#include <asm/cacheflush.h>
#include <asm/pgtable.h>
-#include <mach/sysmmu.h>
-
/* We does not consider super section mapping (16MB) */
#define SECT_ORDER 20
#define LPAGE_ORDER 16
static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
{
- return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+ return (unsigned long *)phys_to_virt(
+ lv2table_base(sent)) + lv2ent_offset(iova);
}
enum exynos_sysmmu_inttype {
* translated. This is 0 if @itype is SYSMMU_BUSERROR.
*/
typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr);
+ phys_addr_t pgtable_base, unsigned long fault_addr);
static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
REG_PAGE_FAULT_ADDR,
struct device *sysmmu; /* System MMU's device descriptor */
struct device *dev; /* Owner of system MMU */
char *dbgname;
- int nsfrs;
- void __iomem **sfrbases;
- struct clk *clk[2];
+ void __iomem *sfrbase;
+ struct clk *clk;
int activations;
rwlock_t lock;
struct iommu_domain *domain;
sysmmu_fault_handler_t fault_handler;
- unsigned long pgtable;
+ phys_addr_t pgtable;
};
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
}
static int default_fault_handler(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr)
+ phys_addr_t pgtable_base, unsigned long fault_addr)
{
unsigned long *ent;
if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
itype = SYSMMU_FAULT_UNKNOWN;
- pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
- sysmmu_fault_name[itype], fault_addr, pgtable_base);
+ pr_err("%s occurred at 0x%lx(Page table base: %pa)\n",
+ sysmmu_fault_name[itype], fault_addr, &pgtable_base);
- ent = section_entry(__va(pgtable_base), fault_addr);
+ ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
pr_err("\tLv1 entry: 0x%lx\n", *ent);
if (lv1ent_page(ent)) {
{
/* SYSMMU is in blocked when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- struct resource *irqres;
- struct platform_device *pdev;
enum exynos_sysmmu_inttype itype;
unsigned long addr = -1;
-
- int i, ret = -ENOSYS;
+ int ret = -ENOSYS;
read_lock(&data->lock);
WARN_ON(!is_sysmmu_active(data));
- pdev = to_platform_device(data->sysmmu);
- for (i = 0; i < (pdev->num_resources / 2); i++) {
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (irqres && ((int)irqres->start == irq))
- break;
- }
-
- if (i == pdev->num_resources) {
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
itype = SYSMMU_FAULT_UNKNOWN;
- } else {
- itype = (enum exynos_sysmmu_inttype)
- __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
- if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
- itype = SYSMMU_FAULT_UNKNOWN;
- else
- addr = __raw_readl(
- data->sfrbases[i] + fault_reg_offset[itype]);
- }
+ else
+ addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
if (data->domain)
- ret = report_iommu_fault(data->domain, data->dev,
- addr, itype);
+ ret = report_iommu_fault(data->domain, data->dev, addr, itype);
if ((ret == -ENOSYS) && data->fault_handler) {
unsigned long base = data->pgtable;
if (itype != SYSMMU_FAULT_UNKNOWN)
- base = __raw_readl(
- data->sfrbases[i] + REG_PT_BASE_ADDR);
+ base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
ret = data->fault_handler(itype, base, addr);
}
if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
- __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+ __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
else
dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
data->dbgname, sysmmu_fault_name[itype]);
if (itype != SYSMMU_FAULT_UNKNOWN)
- sysmmu_unblock(data->sfrbases[i]);
+ sysmmu_unblock(data->sfrbase);
read_unlock(&data->lock);
{
unsigned long flags;
bool disabled = false;
- int i;
write_lock_irqsave(&data->lock, flags);
if (!set_sysmmu_inactive(data))
goto finish;
- for (i = 0; i < data->nsfrs; i++)
- __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
- if (data->clk[1])
- clk_disable(data->clk[1]);
- if (data->clk[0])
- clk_disable(data->clk[0]);
+ if (!IS_ERR(data->clk))
+ clk_disable(data->clk);
disabled = true;
data->pgtable = 0;
static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
unsigned long pgtable, struct iommu_domain *domain)
{
- int i, ret = 0;
+ int ret = 0;
unsigned long flags;
write_lock_irqsave(&data->lock, flags);
goto finish;
}
- if (data->clk[0])
- clk_enable(data->clk[0]);
- if (data->clk[1])
- clk_enable(data->clk[1]);
+ if (!IS_ERR(data->clk))
+ clk_enable(data->clk);
data->pgtable = pgtable;
- for (i = 0; i < data->nsfrs; i++) {
- __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
-
- if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
- /* System MMU version is 3.x */
- __raw_writel((1 << 12) | (2 << 28),
- data->sfrbases[i] + REG_MMU_CFG);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
- }
-
- __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ __sysmmu_set_ptbase(data->sfrbase, pgtable);
+ if ((readl(data->sfrbase + REG_MMU_VERSION) >> 28) == 3) {
+ /* System MMU version is 3.x */
+ __raw_writel((1 << 12) | (2 << 28),
+ data->sfrbase + REG_MMU_CFG);
+ __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 0);
+ __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 1);
}
+ __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+
data->domain = domain;
dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
if (WARN_ON(ret < 0)) {
pm_runtime_put(data->sysmmu);
dev_err(data->sysmmu,
- "(%s) Already enabled with page table %#lx\n",
+ "(%s) Already enabled with page table %#x\n",
data->dbgname, data->pgtable);
} else {
data->dev = dev;
read_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate_entry(
- data->sfrbases[i], iova);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbase, iova);
+ sysmmu_unblock(data->sfrbase);
}
} else {
dev_dbg(data->sysmmu,
read_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate(data->sfrbases[i]);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate(data->sfrbase);
+ sysmmu_unblock(data->sfrbase);
}
} else {
dev_dbg(data->sysmmu,
static int exynos_sysmmu_probe(struct platform_device *pdev)
{
- int i, ret;
- struct device *dev;
+ int ret;
+ struct device *dev = &pdev->dev;
struct sysmmu_drvdata *data;
-
- dev = &pdev->dev;
+ struct resource *res;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
goto err_alloc;
}
- ret = dev_set_drvdata(dev, data);
- if (ret) {
- dev_dbg(dev, "Unabled to initialize driver data\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "Unable to find IOMEM region\n");
+ ret = -ENOENT;
goto err_init;
}
- data->nsfrs = pdev->num_resources / 2;
- data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
- GFP_KERNEL);
- if (data->sfrbases == NULL) {
- dev_dbg(dev, "Not enough memory\n");
- ret = -ENOMEM;
- goto err_init;
+ data->sfrbase = ioremap(res->start, resource_size(res));
+ if (!data->sfrbase) {
+ dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", res->start);
+ ret = -ENOENT;
+ goto err_res;
}
- for (i = 0; i < data->nsfrs; i++) {
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_dbg(dev, "Unable to find IOMEM region\n");
- ret = -ENOENT;
- goto err_res;
- }
-
- data->sfrbases[i] = ioremap(res->start, resource_size(res));
- if (!data->sfrbases[i]) {
- dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
- res->start);
- ret = -ENOENT;
- goto err_res;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_dbg(dev, "Unable to find IRQ resource\n");
+ goto err_irq;
}
- for (i = 0; i < data->nsfrs; i++) {
- ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
- dev_dbg(dev, "Unable to find IRQ resource\n");
- goto err_irq;
- }
-
- ret = request_irq(ret, exynos_sysmmu_irq, 0,
- dev_name(dev), data);
- if (ret) {
- dev_dbg(dev, "Unabled to register interrupt handler\n");
- goto err_irq;
- }
+ ret = request_irq(ret, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to register interrupt handler\n");
+ goto err_irq;
}
if (dev_get_platdata(dev)) {
- char *deli, *beg;
- struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
-
- beg = platdata->clockname;
-
- for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
- /* NOTHING */;
-
- if (*deli == '\0')
- deli = NULL;
- else
- *deli = '\0';
-
- data->clk[0] = clk_get(dev, beg);
- if (IS_ERR(data->clk[0])) {
- data->clk[0] = NULL;
+ data->clk = clk_get(dev, "sysmmu");
+ if (IS_ERR(data->clk))
dev_dbg(dev, "No clock descriptor registered\n");
- }
-
- if (data->clk[0] && deli) {
- *deli = ',';
- data->clk[1] = clk_get(dev, deli + 1);
- if (IS_ERR(data->clk[1]))
- data->clk[1] = NULL;
- }
-
- data->dbgname = platdata->dbgname;
}
data->sysmmu = dev;
__set_fault_handler(data, &default_fault_handler);
+ platform_set_drvdata(pdev, data);
+
if (dev->parent)
pm_runtime_enable(dev);
dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
return 0;
err_irq:
- while (i-- > 0) {
- int irq;
-
- irq = platform_get_irq(pdev, i);
- free_irq(irq, data);
- }
+ free_irq(platform_get_irq(pdev, 0), data);
err_res:
- while (data->nsfrs-- > 0)
- iounmap(data->sfrbases[data->nsfrs]);
- kfree(data->sfrbases);
+ iounmap(data->sfrbase);
err_init:
kfree(data);
err_alloc:
for (i = 0; i < NUM_LV1ENTRIES; i++)
if (lv1ent_page(priv->pgtable + i))
- kfree(__va(lv2table_base(priv->pgtable + i)));
+ kfree(phys_to_virt(lv2table_base(priv->pgtable + i)));
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct exynos_iommu_domain *priv = domain->priv;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
spin_lock_irqsave(&priv->lock, flags);
- ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+ ret = __exynos_sysmmu_enable(data, pagetable, domain);
if (ret == 0) {
/* 'data->node' must not be appeared in priv->clients */
spin_unlock_irqrestore(&priv->lock, flags);
if (ret < 0) {
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
pm_runtime_put(data->sysmmu);
- } else if (ret > 0) {
- dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
- __func__, __pa(priv->pgtable));
- } else {
- dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
- __func__, __pa(priv->pgtable));
+ return ret;
}
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
+ __func__, &pagetable, (ret == 0) ? "" : ", again");
+
return ret;
}
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct exynos_iommu_domain *priv = domain->priv;
struct list_head *pos;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
bool found = false;
goto finish;
if (__exynos_sysmmu_disable(data)) {
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
list_del_init(&data->node);
} else {
- dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
- __func__, __pa(priv->pgtable));
+ dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
+ __func__, &pagetable);
}
finish:
if (!pent)
return NULL;
- *sent = mk_lv1ent_page(__pa(pent));
+ *sent = mk_lv1ent_page(virt_to_phys(pent));
*pgcounter = NUM_LV2ENTRIES;
pgtable_flush(pent, pent + NUM_LV2ENTRIES);
pgtable_flush(sent, sent + 1);