ARM: dma-mapping: move all dma bounce code to separate dma ops structure
This patch removes dma bounce hooks from the common dma mapping implementation on ARM architecture and creates a separate set of dma_map_ops for dma bounce devices. Change-Id: I42d7869b4f74ffa5f36a4a7526bc0c55aaf6bab7 [lauraa: conflicts due to code cruft] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org> Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
committed by
Stephen Boyd
parent
3f47a1438c
commit
0e8fe4a111
@@ -34,6 +34,75 @@
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
/*
|
||||
* The DMA API is built upon the notion of "buffer ownership". A buffer
|
||||
* is either exclusively owned by the CPU (and therefore may be accessed
|
||||
* by it) or exclusively owned by the DMA device. These helper functions
|
||||
* represent the transitions between these two ownership states.
|
||||
*
|
||||
* Note, however, that on later ARMs, this notion does not work due to
|
||||
* speculative prefetches. We model our approach on the assumption that
|
||||
* the CPU does do speculative prefetches, which means we clean caches
|
||||
* before transfers and delay cache invalidation until transfer completion.
|
||||
*
|
||||
* Private support functions: these are not part of the API and are
|
||||
* liable to change. Drivers must not use these.
|
||||
*/
|
||||
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_cpu_to_dev(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_cpu_to_dev(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_dev_to_cpu(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_dev_to_cpu(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_cpu_to_dev(page, off, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
@@ -81,9 +150,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev,
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
@@ -92,9 +158,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev,
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
if (!dmabounce_sync_for_device(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
@@ -836,7 +899,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
}
|
||||
/* FIXME: non-speculating: flush on bidirectional mappings? */
|
||||
}
|
||||
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
||||
|
||||
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
@@ -856,7 +918,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
|
||||
set_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
|
||||
|
||||
/**
|
||||
* arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
@@ -974,9 +1035,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
||||
return -EIO;
|
||||
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
*dev->dma_mask = dma_mask;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user