gpu: ion: Map everything into IOMMU with 64K pages.
Due to performance issues with 4K pages and the IOMMU we should use 64K pages. However, minimum allocation from ION is 4K so instead of mapping on demand we need to map the full heap into the IOMMU when the first request for mapping comes. Only unmap everything from the IOMMU when the last buffer is freed. CRs-fixed: 348606 Change-Id: Ic1793f5caaff2f69bf1fb7e8c8b3bd03378131b8 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> [sboyd: drop board file, fmem, memory changes] Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
This commit is contained in:
committed by
Stephen Boyd
parent
7fdaea2b50
commit
6c18ca537d
@@ -75,8 +75,14 @@ extern int msm_use_iommu(void);
|
|||||||
extern int msm_iommu_map_extra(struct iommu_domain *domain,
|
extern int msm_iommu_map_extra(struct iommu_domain *domain,
|
||||||
unsigned long start_iova,
|
unsigned long start_iova,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
unsigned long page_size,
|
||||||
int cached);
|
int cached);
|
||||||
|
|
||||||
|
extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
|
||||||
|
unsigned long start_iova,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned long page_size);
|
||||||
|
|
||||||
extern int msm_iommu_map_contig_buffer(unsigned long phys,
|
extern int msm_iommu_map_contig_buffer(unsigned long phys,
|
||||||
unsigned int domain_no,
|
unsigned int domain_no,
|
||||||
unsigned int partition_no,
|
unsigned int partition_no,
|
||||||
@@ -115,12 +121,18 @@ static inline int msm_use_iommu(void)
|
|||||||
static inline int msm_iommu_map_extra(struct iommu_domain *domain,
|
static inline int msm_iommu_map_extra(struct iommu_domain *domain,
|
||||||
unsigned long start_iova,
|
unsigned long start_iova,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
unsigned long page_size,
|
||||||
int cached)
|
int cached)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
|
||||||
|
unsigned long start_iova,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned long page_size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline int msm_iommu_map_contig_buffer(unsigned long phys,
|
static inline int msm_iommu_map_contig_buffer(unsigned long phys,
|
||||||
unsigned int domain_no,
|
unsigned int domain_no,
|
||||||
@@ -141,7 +153,6 @@ static inline void msm_iommu_unmap_contig_buffer(unsigned long iova,
|
|||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
#include <mach/iommu_domains.h>
|
#include <mach/iommu_domains.h>
|
||||||
#include <mach/socinfo.h>
|
#include <mach/socinfo.h>
|
||||||
|
|
||||||
/* dummy 4k for overmapping */
|
/* dummy 64K for overmapping */
|
||||||
char iommu_dummy[2*PAGE_SIZE-4];
|
char iommu_dummy[2*SZ_64K-4];
|
||||||
|
|
||||||
struct msm_iommu_domain_state {
|
struct msm_iommu_domain_state {
|
||||||
struct msm_iommu_domain *domains;
|
struct msm_iommu_domain *domains;
|
||||||
@@ -36,34 +36,51 @@ static struct msm_iommu_domain_state domain_state;
|
|||||||
int msm_iommu_map_extra(struct iommu_domain *domain,
|
int msm_iommu_map_extra(struct iommu_domain *domain,
|
||||||
unsigned long start_iova,
|
unsigned long start_iova,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
unsigned long page_size,
|
||||||
int cached)
|
int cached)
|
||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret_value = 0;
|
||||||
struct scatterlist *sglist;
|
unsigned long order = get_order(page_size);
|
||||||
unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
|
unsigned long aligned_size = ALIGN(size, page_size);
|
||||||
struct page *dummy_page = phys_to_page(
|
unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
|
||||||
PFN_ALIGN(virt_to_phys(iommu_dummy)));
|
unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
|
||||||
|
unsigned long temp_iova = start_iova;
|
||||||
|
|
||||||
sglist = vmalloc(sizeof(*sglist) * nrpages);
|
for (i = 0; i < nrpages; i++) {
|
||||||
if (!sglist) {
|
int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
|
||||||
ret = -ENOMEM;
|
cached);
|
||||||
goto err1;
|
if (ret) {
|
||||||
|
pr_err("%s: could not map %lx in domain %p, error: %d\n",
|
||||||
|
__func__, start_iova, domain, ret);
|
||||||
|
ret_value = -EAGAIN;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
temp_iova += page_size;
|
||||||
}
|
}
|
||||||
|
return ret_value;
|
||||||
sg_init_table(sglist, nrpages);
|
out:
|
||||||
|
for (; i > 0; --i) {
|
||||||
for (i = 0; i < nrpages; i++)
|
temp_iova -= page_size;
|
||||||
sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
|
iommu_unmap(domain, start_iova, page_size);
|
||||||
|
|
||||||
ret = iommu_map_range(domain, start_iova, sglist, size, cached);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("%s: could not map extra %lx in domain %p\n",
|
|
||||||
__func__, start_iova, domain);
|
|
||||||
}
|
}
|
||||||
|
return ret_value;
|
||||||
|
}
|
||||||
|
|
||||||
vfree(sglist);
|
void msm_iommu_unmap_extra(struct iommu_domain *domain,
|
||||||
err1:
|
unsigned long start_iova,
|
||||||
return ret;
|
unsigned long size,
|
||||||
|
unsigned long page_size)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
unsigned long order = get_order(page_size);
|
||||||
|
unsigned long aligned_size = ALIGN(size, page_size);
|
||||||
|
unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
|
||||||
|
unsigned long temp_iova = start_iova;
|
||||||
|
|
||||||
|
for (i = 0; i < nrpages; ++i) {
|
||||||
|
iommu_unmap(domain, temp_iova, page_size);
|
||||||
|
temp_iova += page_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
|
static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
|
||||||
|
|||||||
@@ -407,7 +407,7 @@ struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys,
|
|||||||
|
|
||||||
if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
|
if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
|
||||||
msm_iommu_map_extra
|
msm_iommu_map_extra
|
||||||
(d, temp_va, length,
|
(d, temp_va, length, SZ_4K,
|
||||||
(IOMMU_READ | IOMMU_WRITE));
|
(IOMMU_READ | IOMMU_WRITE));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -330,7 +330,8 @@ int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
|
|
||||||
if (extra) {
|
if (extra) {
|
||||||
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
||||||
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
|
||||||
|
SZ_4K, prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,8 +64,12 @@
|
|||||||
* kernel space (un-cached).
|
* kernel space (un-cached).
|
||||||
* @umap_count: the total number of times this heap has been mapped in
|
* @umap_count: the total number of times this heap has been mapped in
|
||||||
* user space.
|
* user space.
|
||||||
|
* @iommu_iova: saved iova when mapping full heap at once.
|
||||||
|
* @iommu_partition: partition used to map full heap.
|
||||||
* @reusable: indicates if the memory should be reused via fmem.
|
* @reusable: indicates if the memory should be reused via fmem.
|
||||||
* @reserved_vrange: reserved virtual address range for use with fmem
|
* @reserved_vrange: reserved virtual address range for use with fmem
|
||||||
|
* @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
|
||||||
|
* @iommu_2x_map_domain: Indicates the domain to use for overmapping.
|
||||||
*/
|
*/
|
||||||
struct ion_cp_heap {
|
struct ion_cp_heap {
|
||||||
struct ion_heap heap;
|
struct ion_heap heap;
|
||||||
@@ -84,8 +88,13 @@ struct ion_cp_heap {
|
|||||||
unsigned long kmap_cached_count;
|
unsigned long kmap_cached_count;
|
||||||
unsigned long kmap_uncached_count;
|
unsigned long kmap_uncached_count;
|
||||||
unsigned long umap_count;
|
unsigned long umap_count;
|
||||||
|
unsigned long iommu_iova[MAX_DOMAINS];
|
||||||
|
unsigned long iommu_partition[MAX_DOMAINS];
|
||||||
int reusable;
|
int reusable;
|
||||||
void *reserved_vrange;
|
void *reserved_vrange;
|
||||||
|
int iommu_map_all;
|
||||||
|
int iommu_2x_map_domain;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@@ -251,6 +260,29 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
|
|||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iommu_unmap_all(unsigned long domain_num,
|
||||||
|
struct ion_cp_heap *cp_heap)
|
||||||
|
{
|
||||||
|
unsigned long left_to_unmap = cp_heap->total_size;
|
||||||
|
unsigned long page_size = SZ_64K;
|
||||||
|
|
||||||
|
struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
|
||||||
|
if (domain) {
|
||||||
|
unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
|
||||||
|
|
||||||
|
while (left_to_unmap) {
|
||||||
|
iommu_unmap(domain, temp_iova, page_size);
|
||||||
|
temp_iova += page_size;
|
||||||
|
left_to_unmap -= page_size;
|
||||||
|
}
|
||||||
|
if (domain_num == cp_heap->iommu_2x_map_domain)
|
||||||
|
msm_iommu_unmap_extra(domain, temp_iova,
|
||||||
|
cp_heap->total_size, SZ_64K);
|
||||||
|
} else {
|
||||||
|
pr_err("Unable to get IOMMU domain %lu\n", domain_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
@@ -269,6 +301,26 @@ void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
|||||||
pr_err("%s: unable to transition heap to T-state\n",
|
pr_err("%s: unable to transition heap to T-state\n",
|
||||||
__func__);
|
__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Unmap everything if we previously mapped the whole heap at once. */
|
||||||
|
if (!cp_heap->allocated_bytes) {
|
||||||
|
unsigned int i;
|
||||||
|
for (i = 0; i < MAX_DOMAINS; ++i) {
|
||||||
|
if (cp_heap->iommu_iova[i]) {
|
||||||
|
unsigned long vaddr_len = cp_heap->total_size;
|
||||||
|
|
||||||
|
if (i == cp_heap->iommu_2x_map_domain)
|
||||||
|
vaddr_len <<= 1;
|
||||||
|
iommu_unmap_all(i, cp_heap);
|
||||||
|
|
||||||
|
msm_free_iova_address(cp_heap->iommu_iova[i], i,
|
||||||
|
cp_heap->iommu_partition[i],
|
||||||
|
vaddr_len);
|
||||||
|
}
|
||||||
|
cp_heap->iommu_iova[i] = 0;
|
||||||
|
cp_heap->iommu_partition[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
mutex_unlock(&cp_heap->lock);
|
mutex_unlock(&cp_heap->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -576,6 +628,74 @@ int ion_cp_unsecure_heap(struct ion_heap *heap)
|
|||||||
return ret_value;
|
return ret_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
|
||||||
|
int partition, unsigned long prot)
|
||||||
|
{
|
||||||
|
unsigned long left_to_map = cp_heap->total_size;
|
||||||
|
unsigned long page_size = SZ_64K;
|
||||||
|
int ret_value = 0;
|
||||||
|
unsigned long virt_addr_len = cp_heap->total_size;
|
||||||
|
struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
|
||||||
|
|
||||||
|
/* If we are mapping into the video domain we need to map twice the
|
||||||
|
* size of the heap to account for prefetch issue in video core.
|
||||||
|
*/
|
||||||
|
if (domain_num == cp_heap->iommu_2x_map_domain)
|
||||||
|
virt_addr_len <<= 1;
|
||||||
|
|
||||||
|
if (cp_heap->total_size & (SZ_64K-1)) {
|
||||||
|
pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
|
||||||
|
ret_value = -EINVAL;
|
||||||
|
}
|
||||||
|
if (cp_heap->base & (SZ_64K-1)) {
|
||||||
|
pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
|
||||||
|
ret_value = -EINVAL;
|
||||||
|
}
|
||||||
|
if (!ret_value && domain) {
|
||||||
|
unsigned long temp_phys = cp_heap->base;
|
||||||
|
unsigned long temp_iova =
|
||||||
|
msm_allocate_iova_address(domain_num, partition,
|
||||||
|
virt_addr_len, SZ_64K);
|
||||||
|
if (!temp_iova) {
|
||||||
|
pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
|
||||||
|
__func__, domain_num, partition);
|
||||||
|
ret_value = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
cp_heap->iommu_iova[domain_num] = temp_iova;
|
||||||
|
|
||||||
|
while (left_to_map) {
|
||||||
|
int ret = iommu_map(domain, temp_iova, temp_phys,
|
||||||
|
page_size, prot);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("%s: could not map %lx in domain %p, error: %d\n",
|
||||||
|
__func__, temp_iova, domain, ret);
|
||||||
|
ret_value = -EAGAIN;
|
||||||
|
goto free_iova;
|
||||||
|
}
|
||||||
|
temp_iova += page_size;
|
||||||
|
temp_phys += page_size;
|
||||||
|
left_to_map -= page_size;
|
||||||
|
}
|
||||||
|
if (domain_num == cp_heap->iommu_2x_map_domain)
|
||||||
|
ret_value = msm_iommu_map_extra(domain, temp_iova,
|
||||||
|
cp_heap->total_size,
|
||||||
|
SZ_64K, prot);
|
||||||
|
if (ret_value)
|
||||||
|
goto free_iova;
|
||||||
|
} else {
|
||||||
|
pr_err("Unable to get IOMMU domain %lu\n", domain_num);
|
||||||
|
ret_value = -ENOMEM;
|
||||||
|
}
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
free_iova:
|
||||||
|
msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
|
||||||
|
partition, virt_addr_len);
|
||||||
|
out:
|
||||||
|
return ret_value;
|
||||||
|
}
|
||||||
|
|
||||||
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
|
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
|
||||||
struct ion_iommu_map *data,
|
struct ion_iommu_map *data,
|
||||||
unsigned int domain_num,
|
unsigned int domain_num,
|
||||||
@@ -587,6 +707,8 @@ static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long extra;
|
unsigned long extra;
|
||||||
|
struct ion_cp_heap *cp_heap =
|
||||||
|
container_of(buffer->heap, struct ion_cp_heap, heap);
|
||||||
int prot = IOMMU_WRITE | IOMMU_READ;
|
int prot = IOMMU_WRITE | IOMMU_READ;
|
||||||
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
|
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
|
||||||
|
|
||||||
@@ -597,6 +719,32 @@ static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cp_heap->iommu_iova[domain_num]) {
|
||||||
|
/* Already mapped. */
|
||||||
|
unsigned long offset = buffer->priv_phys - cp_heap->base;
|
||||||
|
data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
|
||||||
|
return 0;
|
||||||
|
} else if (cp_heap->iommu_map_all) {
|
||||||
|
ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
|
||||||
|
if (!ret) {
|
||||||
|
unsigned long offset =
|
||||||
|
buffer->priv_phys - cp_heap->base;
|
||||||
|
data->iova_addr =
|
||||||
|
cp_heap->iommu_iova[domain_num] + offset;
|
||||||
|
cp_heap->iommu_partition[domain_num] = partition_num;
|
||||||
|
/*
|
||||||
|
clear delayed map flag so that we don't interfere
|
||||||
|
with this feature (we are already delaying).
|
||||||
|
*/
|
||||||
|
data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
cp_heap->iommu_iova[domain_num] = 0;
|
||||||
|
cp_heap->iommu_partition[domain_num] = 0;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
extra = iova_length - buffer->size;
|
extra = iova_length - buffer->size;
|
||||||
|
|
||||||
data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
|
data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
|
||||||
@@ -624,7 +772,8 @@ static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
|
|
||||||
if (extra) {
|
if (extra) {
|
||||||
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
||||||
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
|
||||||
|
SZ_4K, prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
@@ -644,11 +793,20 @@ static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
|
|||||||
unsigned int domain_num;
|
unsigned int domain_num;
|
||||||
unsigned int partition_num;
|
unsigned int partition_num;
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
|
struct ion_cp_heap *cp_heap =
|
||||||
|
container_of(data->buffer->heap, struct ion_cp_heap, heap);
|
||||||
|
|
||||||
if (!msm_use_iommu())
|
if (!msm_use_iommu())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
||||||
domain_num = iommu_map_domain(data);
|
domain_num = iommu_map_domain(data);
|
||||||
|
|
||||||
|
/* If we are mapping everything we'll wait to unmap until everything
|
||||||
|
is freed. */
|
||||||
|
if (cp_heap->iommu_iova[domain_num])
|
||||||
|
return;
|
||||||
|
|
||||||
partition_num = iommu_map_partition(data);
|
partition_num = iommu_map_partition(data);
|
||||||
|
|
||||||
domain = msm_get_iommu_domain(domain_num);
|
domain = msm_get_iommu_domain(domain_num);
|
||||||
@@ -729,7 +887,13 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
|
|||||||
cp_heap->request_region = extra_data->request_region;
|
cp_heap->request_region = extra_data->request_region;
|
||||||
if (extra_data->release_region)
|
if (extra_data->release_region)
|
||||||
cp_heap->release_region = extra_data->release_region;
|
cp_heap->release_region = extra_data->release_region;
|
||||||
|
cp_heap->iommu_map_all =
|
||||||
|
extra_data->iommu_map_all;
|
||||||
|
cp_heap->iommu_2x_map_domain =
|
||||||
|
extra_data->iommu_2x_map_domain;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &cp_heap->heap;
|
return &cp_heap->heap;
|
||||||
|
|
||||||
destroy_pool:
|
destroy_pool:
|
||||||
|
|||||||
@@ -217,7 +217,8 @@ int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
|
|
||||||
if (extra) {
|
if (extra) {
|
||||||
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
||||||
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
|
||||||
|
prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -273,7 +273,8 @@ int ion_system_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
|
|
||||||
extra_iova_addr = data->iova_addr + buffer->size;
|
extra_iova_addr = data->iova_addr + buffer->size;
|
||||||
if (extra) {
|
if (extra) {
|
||||||
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
|
||||||
|
prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
@@ -484,7 +485,8 @@ int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
|
|||||||
|
|
||||||
if (extra) {
|
if (extra) {
|
||||||
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
|
||||||
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
|
||||||
|
prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -176,6 +176,8 @@ struct ion_platform_heap {
|
|||||||
* or not.
|
* or not.
|
||||||
* @fixed_position If nonzero, position in the fixed area.
|
* @fixed_position If nonzero, position in the fixed area.
|
||||||
* @virt_addr: Virtual address used when using fmem.
|
* @virt_addr: Virtual address used when using fmem.
|
||||||
|
* @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
|
||||||
|
* @iommu_2x_map_domain: Indicates the domain to use for overmapping.
|
||||||
* @request_region: function to be called when the number of allocations
|
* @request_region: function to be called when the number of allocations
|
||||||
* goes from 0 -> 1
|
* goes from 0 -> 1
|
||||||
* @release_region: function to be called when the number of allocations
|
* @release_region: function to be called when the number of allocations
|
||||||
@@ -191,6 +193,8 @@ struct ion_cp_heap_pdata {
|
|||||||
int reusable;
|
int reusable;
|
||||||
int mem_is_fmem;
|
int mem_is_fmem;
|
||||||
enum ion_fixed_position fixed_position;
|
enum ion_fixed_position fixed_position;
|
||||||
|
int iommu_map_all;
|
||||||
|
int iommu_2x_map_domain;
|
||||||
ion_virt_addr_t *virt_addr;
|
ion_virt_addr_t *virt_addr;
|
||||||
int (*request_region)(void *);
|
int (*request_region)(void *);
|
||||||
int (*release_region)(void *);
|
int (*release_region)(void *);
|
||||||
|
|||||||
Reference in New Issue
Block a user