return 0;
}
+static dma_addr_t intel_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return __intel_map_single(dev, page_to_phys(page) + offset, size,
+ dir, to_pci_dev(dev)->dma_mask);
+}
+
dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int dir)
{
spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}
-void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- int dir)
+static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
}
}
+void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+ int dir)
+{
+ intel_unmap_page(dev, dev_addr, size, dir, NULL);
+}
+
void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
- int nelems, int dir)
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
}
int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
- int dir)
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
void *addr;
int i;
return nelems;
}
-static struct dma_mapping_ops intel_dma_ops = {
+struct dma_map_ops intel_dma_ops = {
.alloc_coherent = intel_alloc_coherent,
.free_coherent = intel_free_coherent,
- .map_single = intel_map_single,
- .unmap_single = intel_unmap_single,
.map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg,
+ .map_page = intel_map_page,
+ .unmap_page = intel_unmap_page,
};
static inline int iommu_domain_cache_init(void)