diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d5773a99f20..dd0a2c810529 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -82,6 +82,10 @@ extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); extern void vunmap(const void *addr); +extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, + unsigned long uaddr, void *kaddr, + unsigned long size); + extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); void vmalloc_sync_all(void); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3875fa2f0f60..b7259906a806 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1476,10 +1476,9 @@ static void __vunmap(const void *addr, int deallocate_pages) if (!addr) return; - if ((PAGE_SIZE-1) & (unsigned long)addr) { - WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); + if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", + addr)); return; - } area = remove_vm_area(addr); if (unlikely(!area)) { @@ -2147,6 +2146,61 @@ finished: return buflen; } +/** + * remap_vmalloc_range_partial - map vmalloc pages to userspace + * @vma: vma to cover + * @uaddr: target user address to start at + * @kaddr: virtual address of vmalloc kernel memory + * @size: size of map area + * + * Returns: 0 for success, -Exxx on failure + * + * This function checks that @kaddr is a valid vmalloc'ed area, + * and that it is big enough to cover the range starting at + * @uaddr in @vma. Will return failure if that criteria isn't + * met. + * + * Similar to remap_pfn_range() (see mm/memory.c) + */ +int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, + void *kaddr, unsigned long size) +{ + struct vm_struct *area; + + size = PAGE_ALIGN(size); + + if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) + return -EINVAL; + + area = find_vm_area(kaddr); + if (!area) + return -EINVAL; + + if (!(area->flags & VM_USERMAP)) + return -EINVAL; + + if (kaddr + size > area->addr + area->size) + return -EINVAL; + + do { + struct page *page = vmalloc_to_page(kaddr); + int ret; + + ret = vm_insert_page(vma, uaddr, page); + if (ret) + return ret; + + uaddr += PAGE_SIZE; + kaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + + return 0; +} +EXPORT_SYMBOL(remap_vmalloc_range_partial); + /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) @@ -2164,40 +2218,9 @@ finished: int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { - struct vm_struct *area; - unsigned long uaddr = vma->vm_start; - unsigned long usize = vma->vm_end - vma->vm_start; - - if ((PAGE_SIZE-1) & (unsigned long)addr) - return -EINVAL; - - area = find_vm_area(addr); - if (!area) - return -EINVAL; - - if (!(area->flags & VM_USERMAP)) - return -EINVAL; - - if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) - return -EINVAL; - - addr += pgoff << PAGE_SHIFT; - do { - struct page *page = vmalloc_to_page(addr); - int ret; - - ret = vm_insert_page(vma, uaddr, page); - if (ret) - return ret; - - uaddr += PAGE_SIZE; - addr += PAGE_SIZE; - usize -= PAGE_SIZE; - } while (usize > 0); - - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; - - return 0; + return remap_vmalloc_range_partial(vma, vma->vm_start, + addr + (pgoff << PAGE_SHIFT), + vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range);