1
0
Fork 0

ion: fix sparse warnings

Fix sparse warnings in ion.

Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Colin Cross 2013-12-13 19:26:28 -08:00 committed by Greg Kroah-Hartman
parent 5c6a470557
commit f63958d80c
7 changed files with 37 additions and 38 deletions

View File

@ -669,7 +669,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&client->lock);
@ -887,7 +887,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
mutex_unlock(&buffer->lock);
}
int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
unsigned long pfn;
@ -939,7 +939,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
mutex_unlock(&buffer->lock);
}
struct vm_operations_struct ion_vma_ops = {
static struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
@ -1030,7 +1030,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_unlock(&buffer->lock);
}
struct dma_buf_ops dma_buf_ops = {
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,

View File

@ -85,8 +85,8 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct sg_table *table;
int ret;
@ -104,8 +104,8 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
return table;
}
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
sg_free_table(buffer->sg_table);
}

View File

@ -115,14 +115,14 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}

View File

@ -44,8 +44,8 @@ struct ion_cma_buffer_info {
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
int ret;
@ -137,16 +137,16 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
@ -162,7 +162,8 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
buffer->size);
}
void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */

View File

@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
return 0;
return NULL;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
@ -193,7 +193,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
return total_drained;
}
int ion_heap_deferred_free(void *data)
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;

View File

@ -134,7 +134,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int i;
bool high;
high = gfp_mask & __GFP_HIGHMEM;
high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);

View File

@ -26,11 +26,9 @@
#include "ion.h"
#include "ion_priv.h"
static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN | __GFP_NORETRY) &
~__GFP_WAIT;
static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN);
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_WAIT;
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
@ -76,12 +74,12 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
gfp_flags = high_order_gfp_flags;
page = alloc_pages(gfp_flags, order);
if (!page)
return 0;
return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
return NULL;
return page;
}
@ -187,7 +185,7 @@ err:
return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
@ -211,14 +209,14 @@ void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
@ -403,7 +401,7 @@ out:
return ret;
}
void ion_system_contig_heap_free(struct ion_buffer *buffer)
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
@ -427,14 +425,14 @@ static int ion_system_contig_heap_phys(struct ion_heap *heap,
return 0;
}
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}