gpu: ion: Allow smaller mappings

Clients using ION must be able to mmap a smaller
set of pages than what was allocated from the
IOMMU heap.

Add check that we don't try to map a page passed
the end of the vma.

Change-Id: I1241f952359f6bb1d5bb0deb11cc0f44d94a3404
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
This commit is contained in:
Olav Haugan
2012-02-25 10:32:41 -08:00
committed by Stephen Boyd
parent 9c3801b1e3
commit 7c80b9fdf1

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -151,22 +151,24 @@ int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
int i;
unsigned long curr_addr;
if (!data)
return -EINVAL;
if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
for (i = 0; i < data->nrpages; i++)
if (vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
data->pages[i]))
curr_addr = vma->vm_start;
for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
if (vm_insert_page(vma, curr_addr, data->pages[i])) {
/*
* This will fail the mmap which will
* clean up the vma space properly.
*/
return -EINVAL;
}
curr_addr += PAGE_SIZE;
}
return 0;
}