gpu: ion: support begin/end and kmap/kunmap dma_buf ops
These ops were added in the 3.4 kernel. This patch adds support for them to ion. Previous ion_map/unmap_kernel api is also retained in addition to this new api. Change-Id: I6d2db284dce12c2d8cc4e540865beee2da43bd0c Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
This commit is contained in:
@@ -391,6 +391,22 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
||||||
|
{
|
||||||
|
void *vaddr;
|
||||||
|
|
||||||
|
if (buffer->kmap_cnt) {
|
||||||
|
buffer->kmap_cnt++;
|
||||||
|
return buffer->vaddr;
|
||||||
|
}
|
||||||
|
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
|
||||||
|
if (IS_ERR_OR_NULL(vaddr))
|
||||||
|
return vaddr;
|
||||||
|
buffer->vaddr = vaddr;
|
||||||
|
buffer->kmap_cnt++;
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
|
||||||
static void *ion_handle_kmap_get(struct ion_handle *handle)
|
static void *ion_handle_kmap_get(struct ion_handle *handle)
|
||||||
{
|
{
|
||||||
struct ion_buffer *buffer = handle->buffer;
|
struct ion_buffer *buffer = handle->buffer;
|
||||||
@@ -399,34 +415,30 @@ static void *ion_handle_kmap_get(struct ion_handle *handle)
|
|||||||
if (handle->kmap_cnt) {
|
if (handle->kmap_cnt) {
|
||||||
handle->kmap_cnt++;
|
handle->kmap_cnt++;
|
||||||
return buffer->vaddr;
|
return buffer->vaddr;
|
||||||
} else if (buffer->kmap_cnt) {
|
|
||||||
handle->kmap_cnt++;
|
|
||||||
buffer->kmap_cnt++;
|
|
||||||
return buffer->vaddr;
|
|
||||||
}
|
}
|
||||||
|
vaddr = ion_buffer_kmap_get(buffer);
|
||||||
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
|
if (IS_ERR_OR_NULL(vaddr))
|
||||||
buffer->vaddr = vaddr;
|
|
||||||
if (IS_ERR_OR_NULL(vaddr)) {
|
|
||||||
buffer->vaddr = NULL;
|
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
|
||||||
handle->kmap_cnt++;
|
handle->kmap_cnt++;
|
||||||
buffer->kmap_cnt++;
|
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
|
||||||
|
{
|
||||||
|
buffer->kmap_cnt--;
|
||||||
|
if (!buffer->kmap_cnt) {
|
||||||
|
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
||||||
|
buffer->vaddr = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void ion_handle_kmap_put(struct ion_handle *handle)
|
static void ion_handle_kmap_put(struct ion_handle *handle)
|
||||||
{
|
{
|
||||||
struct ion_buffer *buffer = handle->buffer;
|
struct ion_buffer *buffer = handle->buffer;
|
||||||
|
|
||||||
handle->kmap_cnt--;
|
handle->kmap_cnt--;
|
||||||
if (!handle->kmap_cnt)
|
if (!handle->kmap_cnt)
|
||||||
buffer->kmap_cnt--;
|
ion_buffer_kmap_put(buffer);
|
||||||
if (!buffer->kmap_cnt) {
|
|
||||||
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
|
||||||
buffer->vaddr = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
|
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
|
||||||
@@ -675,7 +687,8 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
|
|||||||
|
|
||||||
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
|
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
|
||||||
{
|
{
|
||||||
return NULL;
|
struct ion_buffer *buffer = dmabuf->priv;
|
||||||
|
return buffer->vaddr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
||||||
@@ -684,26 +697,49 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *ion_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
|
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
|
||||||
unsigned long offset)
|
size_t len,
|
||||||
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
return NULL;
|
struct ion_buffer *buffer = dmabuf->priv;
|
||||||
|
void *vaddr;
|
||||||
|
|
||||||
|
if (!buffer->heap->ops->map_kernel) {
|
||||||
|
pr_err("%s: map kernel is not implemented by this heap.\n",
|
||||||
|
__func__);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&buffer->lock);
|
||||||
|
vaddr = ion_buffer_kmap_get(buffer);
|
||||||
|
mutex_unlock(&buffer->lock);
|
||||||
|
if (IS_ERR(vaddr))
|
||||||
|
return PTR_ERR(vaddr);
|
||||||
|
if (!vaddr)
|
||||||
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ion_dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
|
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
|
||||||
unsigned long offset, void *ptr)
|
size_t len,
|
||||||
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
return;
|
struct ion_buffer *buffer = dmabuf->priv;
|
||||||
}
|
|
||||||
|
|
||||||
|
mutex_lock(&buffer->lock);
|
||||||
|
ion_buffer_kmap_put(buffer);
|
||||||
|
mutex_unlock(&buffer->lock);
|
||||||
|
}
|
||||||
|
|
||||||
struct dma_buf_ops dma_buf_ops = {
|
struct dma_buf_ops dma_buf_ops = {
|
||||||
.map_dma_buf = ion_map_dma_buf,
|
.map_dma_buf = ion_map_dma_buf,
|
||||||
.unmap_dma_buf = ion_unmap_dma_buf,
|
.unmap_dma_buf = ion_unmap_dma_buf,
|
||||||
.mmap = ion_mmap,
|
.mmap = ion_mmap,
|
||||||
.release = ion_dma_buf_release,
|
.release = ion_dma_buf_release,
|
||||||
.kmap_atomic = ion_dma_buf_kmap_atomic,
|
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
|
||||||
.kunmap_atomic = ion_dma_buf_kunmap_atomic,
|
.end_cpu_access = ion_dma_buf_end_cpu_access,
|
||||||
|
.kmap_atomic = ion_dma_buf_kmap,
|
||||||
|
.kunmap_atomic = ion_dma_buf_kunmap,
|
||||||
.kmap = ion_dma_buf_kmap,
|
.kmap = ion_dma_buf_kmap,
|
||||||
.kunmap = ion_dma_buf_kunmap,
|
.kunmap = ion_dma_buf_kunmap,
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user