In preparation for some IOVA allocation improvements, clean up all the
explicit struct iova usage such that all our mapping, unmapping and
cleanup paths deal exclusively with addresses rather than implementation
details. In the process, a few of the things we're touching get renamed
for the sake of internal consistency.
Reviewed-by: Nate Watterson <nwatters@codeaurora.org>
Tested-by: Nate Watterson <nwatters@codeaurora.org>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
- dma_addr_t dma_limit, struct device *dev)
+static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+ size_t size, dma_addr_t dma_limit, struct device *dev)
{
struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
{
struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
- unsigned long length = iova_align(iovad, size) >> shift;
+ unsigned long iova_len = size >> shift;
struct iova *iova = NULL;
if (domain->geometry.force_aperture)
struct iova *iova = NULL;
if (domain->geometry.force_aperture)
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
- iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
+ iova = alloc_iova(iovad, iova_len, DMA_BIT_MASK(32) >> shift,
true);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
*/
if (!iova)
true);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
*/
if (!iova)
- iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+ iova = alloc_iova(iovad, iova_len, dma_limit >> shift, true);
+ return (dma_addr_t)iova->pfn_lo << shift;
-/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
+static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+ dma_addr_t iova, size_t size)
- struct iova_domain *iovad = cookie_iovad(domain);
- unsigned long shift = iova_shift(iovad);
- unsigned long pfn = dma_addr >> shift;
- struct iova *iova = find_iova(iovad, pfn);
- size_t size;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iova *iova_rbnode;
+ iova_rbnode = find_iova(iovad, iova_pfn(iovad, iova));
+ if (WARN_ON(!iova_rbnode))
- size = iova_size(iova) << shift;
- size -= iommu_unmap(domain, pfn << shift, size);
- /* ...and if we can't, then something is horribly, horribly wrong */
- WARN_ON(size > 0);
- __free_iova(iovad, iova);
+ __free_iova(iovad, iova_rbnode);
+}
+
+static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+ size_t size)
+{
+ struct iova_domain *iovad = cookie_iovad(domain);
+ size_t iova_off = iova_offset(iovad, dma_addr);
+
+ dma_addr -= iova_off;
+ size = iova_align(iovad, size + iova_off);
+
+ WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ iommu_dma_free_iova(domain->iova_cookie, dma_addr, size);
}
static void __iommu_dma_free_pages(struct page **pages, int count)
}
static void __iommu_dma_free_pages(struct page **pages, int count)
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_ERROR_CODE;
}
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_ERROR_CODE;
}
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
struct sg_table sgt;
struct page **pages;
struct sg_table sgt;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
- iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ size = iova_align(iovad, size);
+ iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
if (!iova)
goto out_free_pages;
- size = iova_align(iovad, size);
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
< size)
goto out_free_sg;
< size)
goto out_free_sg;
sg_free_table(&sgt);
return pages;
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
sg_free_table(&sgt);
return pages;
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, size);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot)
{
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
size_t iova_off = iova_offset(iovad, phys);
- size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
+ size = iova_align(iovad, size + iova_off);
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;
if (!iova)
return DMA_ERROR_CODE;
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
- __free_iova(iovad, iova);
+ if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ iommu_dma_free_iova(cookie, iova, size);
- return dma_addr + iova_off;
+ return iova + iova_off;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
int nents, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
int nents, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
struct scatterlist *s, *prev = NULL;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
- iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
if (!iova)
goto out_restore_sg;
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
+ if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
- return __finalise_sg(dev, sg, nents, dma_addr);
+ return __finalise_sg(dev, sg, nents, iova);
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, iova_len);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
+ dma_addr_t start, end;
+ struct scatterlist *tmp;
+ int i;
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
+ start = sg_dma_address(sg);
+ for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ if (sg_dma_len(tmp) == 0)
+ break;
+ sg = tmp;
+ }
+ end = sg_dma_address(sg) + sg_dma_len(sg);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
struct iova_domain *iovad = cookie_iovad(domain);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
struct iova_domain *iovad = cookie_iovad(domain);
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
msi_page->phys = msi_addr;
if (iovad) {
msi_page->phys = msi_addr;
if (iovad) {
- iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;
if (!iova)
goto out_free_page;
- msi_page->iova = iova_dma_addr(iovad, iova);
} else {
msi_page->iova = cookie->msi_iova;
cookie->msi_iova += size;
} else {
msi_page->iova = cookie->msi_iova;
cookie->msi_iova += size;
out_free_iova:
if (iovad)
out_free_iova:
if (iovad)
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, size);
else
cookie->msi_iova -= size;
out_free_page:
else
cookie->msi_iova -= size;
out_free_page: