gpu: ion: Add callbacks to enable SMI voting in ION.
This change is part of the move from PMEM to ION. ION needs an SMI voting interface smiliar to PMEM's. Change-Id: I18888f46198848694fb7e1e0d2671074bf51d7c9 Signed-off-by: Alex Bird <alexbird@codeaurora.org>
This commit is contained in:
@@ -33,6 +33,10 @@ struct ion_carveout_heap {
|
||||
ion_phys_addr_t base;
|
||||
unsigned long allocated_bytes;
|
||||
unsigned long total_size;
|
||||
void (*request_region)(void *);
|
||||
void (*release_region)(void *);
|
||||
atomic_t map_count;
|
||||
void *bus_id;
|
||||
};
|
||||
|
||||
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
|
||||
@@ -136,6 +140,12 @@ void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
|
||||
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
||||
if (atomic_inc_return(&carveout_heap->map_count) == 1)
|
||||
carveout_heap->request_region(carveout_heap->bus_id);
|
||||
|
||||
if (ION_IS_CACHED(buffer->flags))
|
||||
return ioremap_cached(buffer->priv_phys, buffer->size);
|
||||
else
|
||||
@@ -145,14 +155,27 @@ void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
|
||||
void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
||||
__arm_iounmap(buffer->vaddr);
|
||||
buffer->vaddr = NULL;
|
||||
|
||||
if (atomic_dec_and_test(&carveout_heap->map_count))
|
||||
carveout_heap->release_region(carveout_heap->bus_id);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
||||
if (atomic_inc_return(&carveout_heap->map_count) == 1)
|
||||
carveout_heap->request_region(carveout_heap->bus_id);
|
||||
|
||||
if (ION_IS_CACHED(buffer->flags))
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
|
||||
@@ -165,6 +188,16 @@ int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
pgprot_noncached(vma->vm_page_prot));
|
||||
}
|
||||
|
||||
void ion_carveout_heap_unmap_user(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
||||
if (atomic_dec_and_test(&carveout_heap->map_count))
|
||||
carveout_heap->release_region(carveout_heap->bus_id);
|
||||
}
|
||||
|
||||
int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
void *vaddr, unsigned int offset, unsigned int length,
|
||||
unsigned int cmd)
|
||||
@@ -213,6 +246,7 @@ static struct ion_heap_ops carveout_heap_ops = {
|
||||
.phys = ion_carveout_heap_phys,
|
||||
.map_user = ion_carveout_heap_map_user,
|
||||
.map_kernel = ion_carveout_heap_map_kernel,
|
||||
.unmap_user = ion_carveout_heap_unmap_user,
|
||||
.unmap_kernel = ion_carveout_heap_unmap_kernel,
|
||||
.map_dma = ion_carveout_heap_map_dma,
|
||||
.unmap_dma = ion_carveout_heap_unmap_dma,
|
||||
@@ -246,6 +280,9 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
|
||||
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
|
||||
carveout_heap->allocated_bytes = 0;
|
||||
carveout_heap->total_size = heap_data->size;
|
||||
carveout_heap->bus_id = heap_data->setup_region();
|
||||
carveout_heap->request_region = heap_data->request_region;
|
||||
carveout_heap->release_region = heap_data->release_region;
|
||||
|
||||
return &carveout_heap->heap;
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ struct ion_buffer {
|
||||
* @map_kernel map memory to the kernel
|
||||
* @unmap_kernel unmap memory to the kernel
|
||||
* @map_user map memory to userspace
|
||||
* @unmap_user unmap memory to userspace
|
||||
*/
|
||||
struct ion_heap_ops {
|
||||
int (*allocate) (struct ion_heap *heap,
|
||||
@@ -89,6 +90,7 @@ struct ion_heap_ops {
|
||||
void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
|
||||
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma);
|
||||
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
|
||||
int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
void *vaddr, unsigned int offset,
|
||||
unsigned int length, unsigned int cmd);
|
||||
|
||||
@@ -98,6 +98,11 @@ struct ion_buffer;
|
||||
* @name: used for debug purposes
|
||||
* @base: base address of heap in physical memory if applicable
|
||||
* @size: size of the heap in bytes if applicable
|
||||
* @request_region: function to be called when the number of allocations goes
|
||||
* from 0 -> 1
|
||||
* @release_region: function to be called when the number of allocations goes
|
||||
* from 1 -> 0
|
||||
* @setup_region: function to be called upon ion registration
|
||||
*
|
||||
* Provided by the board file.
|
||||
*/
|
||||
@@ -108,6 +113,9 @@ struct ion_platform_heap {
|
||||
ion_phys_addr_t base;
|
||||
size_t size;
|
||||
enum ion_memory_types memory_type;
|
||||
void (*request_region)(void *);
|
||||
void (*release_region)(void *);
|
||||
void *(*setup_region)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user