gpu: ion: Add support for adjacent heaps

ION has to guarantee that two of the heaps are
adjacent to each other. This is due to a limitation
in the hardware. Add code to ensure these heaps
are adjacent.

Change-Id: Icc18437a50e1d872112468d02b61ab47fd70acc9
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
[sboyd: drop board file changes]
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
This commit is contained in:
Olav Haugan
2012-01-10 16:30:58 -08:00
committed by Stephen Boyd
parent 02d86ef9d7
commit 2fd338e25e
3 changed files with 124 additions and 42 deletions

View File

@@ -39,6 +39,8 @@
* @base: the base address of the memory pool.
* @permission_type: Identifier for the memory used by SCM for protecting
* and unprotecting memory.
* @secure_base: Base address used when securing a heap that is shared.
* @secure_size: Size used when securing a heap that is shared.
* @lock: mutex to protect shared access.
* @heap_secured: Identifies the heap_id as secure or not.
* @allocated_bytes: the total number of allocated bytes from the pool.
@@ -59,6 +61,8 @@ struct ion_cp_heap {
struct gen_pool *pool;
ion_phys_addr_t base;
unsigned int permission_type;
ion_phys_addr_t secure_base;
size_t secure_size;
struct mutex lock;
unsigned int heap_secured;
unsigned long allocated_bytes;
@@ -94,8 +98,8 @@ static int ion_cp_protect(struct ion_heap *heap)
int ret_value = 0;
if (cp_heap->heap_secured == NON_SECURED_HEAP) {
int ret_value = ion_cp_protect_mem(cp_heap->base,
cp_heap->total_size, cp_heap->permission_type);
int ret_value = ion_cp_protect_mem(cp_heap->secure_base,
cp_heap->secure_size, cp_heap->permission_type);
if (ret_value) {
pr_err("Failed to protect memory for heap %s - "
"error code: %d", heap->name, ret_value);
@@ -119,7 +123,7 @@ static void ion_cp_unprotect(struct ion_heap *heap)
if (cp_heap->heap_secured == SECURED_HEAP) {
int error_code = ion_cp_unprotect_mem(
cp_heap->base, cp_heap->total_size,
cp_heap->secure_base, cp_heap->secure_size,
cp_heap->permission_type);
if (error_code) {
pr_err("Failed to un-protect memory for heap %s - "
@@ -578,22 +582,6 @@ static struct ion_heap_ops cp_heap_ops = {
.unsecure_heap = ion_cp_unsecure_heap,
};
static unsigned long ion_cp_get_base(unsigned long size, int memory_type)
{
switch (memory_type) {
case ION_EBI_TYPE:
return allocate_contiguous_ebi_nomap(size, PAGE_SIZE);
break;
case ION_SMI_TYPE:
return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI,
PAGE_SIZE);
break;
default:
return 0;
}
}
struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_cp_heap *cp_heap;
@@ -603,15 +591,6 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
if (!cp_heap)
return ERR_PTR(-ENOMEM);
heap_data->base = ion_cp_get_base(heap_data->size,
heap_data->memory_type);
if (!heap_data->base) {
pr_err("%s: could not get memory for heap %s"
" (id %x)\n", __func__, heap_data->name,
heap_data->id);
goto free_heap;
}
mutex_init(&cp_heap->lock);
cp_heap->pool = gen_pool_create(12, -1);
@@ -631,10 +610,16 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
cp_heap->heap.ops = &cp_heap_ops;
cp_heap->heap.type = ION_HEAP_TYPE_CP;
cp_heap->heap_secured = NON_SECURED_HEAP;
cp_heap->secure_base = cp_heap->base;
cp_heap->secure_size = heap_data->size;
if (heap_data->extra_data) {
struct ion_cp_heap_pdata *extra_data =
heap_data->extra_data;
cp_heap->permission_type = extra_data->permission_type;
if (extra_data->secure_size) {
cp_heap->secure_base = extra_data->secure_base;
cp_heap->secure_size = extra_data->secure_size;
}
if (extra_data->setup_region)
cp_heap->bus_id = extra_data->setup_region();
if (extra_data->request_region)

View File

@@ -44,21 +44,118 @@ int msm_ion_unsecure_heap(int heap_id)
}
EXPORT_SYMBOL(msm_ion_unsecure_heap);
static unsigned long msm_ion_get_base(unsigned long size, int memory_type)
static unsigned long msm_ion_get_base(unsigned long size, int memory_type,
unsigned int align)
{
switch (memory_type) {
case ION_EBI_TYPE:
return allocate_contiguous_ebi_nomap(size, PAGE_SIZE);
return allocate_contiguous_ebi_nomap(size, align);
break;
case ION_SMI_TYPE:
return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI,
PAGE_SIZE);
align);
break;
default:
return 0;
}
}
static struct ion_platform_heap *find_heap(const struct ion_platform_heap
heap_data[],
unsigned int nr_heaps,
int heap_id)
{
unsigned int i;
for (i = 0; i < nr_heaps; ++i) {
const struct ion_platform_heap *heap = &heap_data[i];
if (heap->id == heap_id)
return (struct ion_platform_heap *) heap;
}
return 0;
}
static void allocate_co_memory(struct ion_platform_heap *heap,
struct ion_platform_heap heap_data[],
unsigned int nr_heaps)
{
struct ion_co_heap_pdata *co_heap_data =
(struct ion_co_heap_pdata *) heap->extra_data;
if (co_heap_data->adjacent_mem_id != INVALID_HEAP_ID) {
struct ion_platform_heap *shared_heap =
find_heap(heap_data, nr_heaps,
co_heap_data->adjacent_mem_id);
if (shared_heap) {
struct ion_cp_heap_pdata *cp_data =
(struct ion_cp_heap_pdata *) shared_heap->extra_data;
heap->base = msm_ion_get_base(
heap->size + shared_heap->size,
shared_heap->memory_type,
co_heap_data->align);
if (heap->base) {
shared_heap->base = heap->base + heap->size;
cp_data->secure_base = heap->base;
cp_data->secure_size =
heap->size + shared_heap->size;
} else {
pr_err("%s: could not get memory for heap %s "
"(id %x)\n", __func__, heap->name, heap->id);
}
}
}
}
/* Fixup heaps in board file to support two heaps being adjacent to each other.
* A flag (adjacent_mem_id) in the platform data tells us that the heap phy
* memory location must be adjacent to the specified heap. We do this by
* carving out memory for both heaps and then splitting up the memory to the
* two heaps. The heap specifying the "adjacent_mem_id" get the base of the
* memory while heap specified in "adjacent_mem_id" get base+size as its
* base address.
* Note: Modifies platform data and allocates memory.
*/
static void msm_ion_heap_fixup(struct ion_platform_heap heap_data[],
unsigned int nr_heaps)
{
unsigned int i;
for (i = 0; i < nr_heaps; i++) {
struct ion_platform_heap *heap = &heap_data[i];
if (!heap->base && heap->type == ION_HEAP_TYPE_CARVEOUT) {
if (heap->extra_data)
allocate_co_memory(heap, heap_data, nr_heaps);
}
}
}
static void msm_ion_allocate(struct ion_platform_heap *heap)
{
if (!heap->base && heap->extra_data) {
unsigned int align = 0;
switch (heap->type) {
case ION_HEAP_TYPE_CARVEOUT:
align =
((struct ion_co_heap_pdata *) heap->extra_data)->align;
break;
case ION_HEAP_TYPE_CP:
align =
((struct ion_cp_heap_pdata *) heap->extra_data)->align;
break;
default:
break;
}
if (align) {
heap->base = msm_ion_get_base(heap->size,
heap->memory_type,
align);
if (!heap->base)
pr_err("%s: could not get memory for heap %s "
"(id %x)\n", __func__, heap->name, heap->id);
}
}
}
static int msm_ion_probe(struct platform_device *pdev)
{
struct ion_platform_data *pdata = pdev->dev.platform_data;
@@ -80,20 +177,12 @@ static int msm_ion_probe(struct platform_device *pdev)
goto freeheaps;
}
msm_ion_heap_fixup(pdata->heaps, num_heaps);
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {
struct ion_platform_heap *heap_data = &pdata->heaps[i];
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
heap_data->base = msm_ion_get_base(heap_data->size,
heap_data->memory_type);
if (!heap_data->base) {
pr_err("%s: could not get memory for heap %s"
" (id %x)\n", __func__, heap_data->name,
heap_data->id);
continue;
}
}
msm_ion_allocate(heap_data);
heaps[i] = ion_heap_create(heap_data);
if (IS_ERR_OR_NULL(heaps[i])) {

View File

@@ -62,6 +62,7 @@ enum ion_heap_type {
*/
enum ion_heap_ids {
INVALID_HEAP_ID = -1,
ION_IOMMU_HEAP_ID = 4,
ION_CP_MM_HEAP_ID = 8,
ION_CP_MFC_HEAP_ID = 12,
@@ -70,6 +71,7 @@ enum ion_heap_ids {
ION_SF_HEAP_ID = 24,
ION_AUDIO_HEAP_ID = 28,
ION_MM_FIRMWARE_HEAP_ID = 29,
ION_SYSTEM_HEAP_ID = 30,
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
@@ -93,6 +95,7 @@ enum ion_heap_ids {
#define ION_IOMMU_HEAP_NAME "iommu"
#define ION_MFC_HEAP_NAME "mfc"
#define ION_WB_HEAP_NAME "wb"
#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
#define CACHED 1
#define UNCACHED 0
@@ -148,12 +151,17 @@ struct ion_platform_heap {
struct ion_cp_heap_pdata {
enum ion_permission_type permission_type;
unsigned int align;
ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
size_t secure_size; /* Size used for securing heap when heap is shared*/
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);
};
struct ion_co_heap_pdata {
int adjacent_mem_id;
unsigned int align;
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);