1
0
Fork 0

docs/mm: vmalloc: re-indent kernel-doc comemnts

Some kernel-doc comments in mm/vmalloc.c have leading tab in
indentation.  This leads to excessive indentation in the generated HTML
and to the inconsistency of its layout ([1] vs [2]).

Besides, multi-line Note: sections are not handled properly with extra
indentation.

[1] https://www.kernel.org/doc/html/v4.20/core-api/mm-api.html?#c.vm_map_ram
[2] https://www.kernel.org/doc/html/v4.20/core-api/mm-api.html?#c.vfree

Link: http://lkml.kernel.org/r/1549549644-4903-2-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Mike Rapoport 2019-03-05 15:48:36 -08:00 committed by Linus Torvalds
parent 6d2bef9df7
commit 92eac16819
1 changed files with 171 additions and 174 deletions

View File

@ -1191,6 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram); EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata; static struct vm_struct *vmlist __initdata;
/** /**
* vm_area_add_early - add vmap area early during boot * vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add * @vm: vm_struct to add
@ -1425,13 +1426,13 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
} }
/** /**
* get_vm_area - reserve a contiguous kernel virtual area * get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area * @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
* *
* Search an area of @size in the kernel virtual mapping area, * Search an area of @size in the kernel virtual mapping area,
* and reserved it for out purposes. Returns the area descriptor * and reserved it for out purposes. Returns the area descriptor
* on success or %NULL on failure. * on success or %NULL on failure.
*/ */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{ {
@ -1448,12 +1449,12 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
} }
/** /**
* find_vm_area - find a continuous kernel virtual area * find_vm_area - find a continuous kernel virtual area
* @addr: base address * @addr: base address
* *
* Search for the kernel VM area starting at @addr, and return it. * Search for the kernel VM area starting at @addr, and return it.
* It is up to the caller to do all required locking to keep the returned * It is up to the caller to do all required locking to keep the returned
* pointer valid. * pointer valid.
*/ */
struct vm_struct *find_vm_area(const void *addr) struct vm_struct *find_vm_area(const void *addr)
{ {
@ -1467,12 +1468,12 @@ struct vm_struct *find_vm_area(const void *addr)
} }
/** /**
* remove_vm_area - find and remove a continuous kernel virtual area * remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address * @addr: base address
* *
* Search for the kernel VM area starting at @addr, and remove it. * Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe * This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags. * on SMP machines, except for its size or flags.
*/ */
struct vm_struct *remove_vm_area(const void *addr) struct vm_struct *remove_vm_area(const void *addr)
{ {
@ -1552,11 +1553,11 @@ static inline void __vfree_deferred(const void *addr)
} }
/** /**
* vfree_atomic - release memory allocated by vmalloc() * vfree_atomic - release memory allocated by vmalloc()
* @addr: memory base address * @addr: memory base address
* *
* This one is just like vfree() but can be called in any atomic context * This one is just like vfree() but can be called in any atomic context
* except NMIs. * except NMIs.
*/ */
void vfree_atomic(const void *addr) void vfree_atomic(const void *addr)
{ {
@ -1578,20 +1579,20 @@ static void __vfree(const void *addr)
} }
/** /**
* vfree - release memory allocated by vmalloc() * vfree - release memory allocated by vmalloc()
* @addr: memory base address * @addr: memory base address
* *
* Free the virtually continuous memory area starting at @addr, as * Free the virtually continuous memory area starting at @addr, as
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed. * NULL, no operation is performed.
* *
* Must not be called in NMI context (strictly speaking, only if we don't * Must not be called in NMI context (strictly speaking, only if we don't
* have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
* conventions for vfree() arch-depenedent would be a really bad idea) * conventions for vfree() arch-depenedent would be a really bad idea)
* *
* May sleep if called *not* from interrupt context. * May sleep if called *not* from interrupt context.
* *
* NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
*/ */
void vfree(const void *addr) void vfree(const void *addr)
{ {
@ -1609,13 +1610,13 @@ void vfree(const void *addr)
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
/** /**
* vunmap - release virtual mapping obtained by vmap() * vunmap - release virtual mapping obtained by vmap()
* @addr: memory base address * @addr: memory base address
* *
* Free the virtually contiguous memory area starting at @addr, * Free the virtually contiguous memory area starting at @addr,
* which was created from the page array passed to vmap(). * which was created from the page array passed to vmap().
* *
* Must not be called in interrupt context. * Must not be called in interrupt context.
*/ */
void vunmap(const void *addr) void vunmap(const void *addr)
{ {
@ -1627,17 +1628,17 @@ void vunmap(const void *addr)
EXPORT_SYMBOL(vunmap); EXPORT_SYMBOL(vunmap);
/** /**
* vmap - map an array of pages into virtually contiguous space * vmap - map an array of pages into virtually contiguous space
* @pages: array of page pointers * @pages: array of page pointers
* @count: number of pages to map * @count: number of pages to map
* @flags: vm_area->flags * @flags: vm_area->flags
* @prot: page protection for the mapping * @prot: page protection for the mapping
* *
* Maps @count pages from @pages into contiguous kernel virtual * Maps @count pages from @pages into contiguous kernel virtual
* space. * space.
*/ */
void *vmap(struct page **pages, unsigned int count, void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot) unsigned long flags, pgprot_t prot)
{ {
struct vm_struct *area; struct vm_struct *area;
unsigned long size; /* In bytes */ unsigned long size; /* In bytes */
@ -1724,20 +1725,20 @@ fail:
} }
/** /**
* __vmalloc_node_range - allocate virtually contiguous memory * __vmalloc_node_range - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
* @align: desired alignment * @align: desired alignment
* @start: vm area range start * @start: vm area range start
* @end: vm area range end * @end: vm area range end
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address * @caller: caller's return address
* *
* Allocate enough pages to cover @size from the page level * Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous * allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
*/ */
void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
@ -1788,24 +1789,23 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range);
#endif #endif
/** /**
* __vmalloc_node - allocate virtually contiguous memory * __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
* @align: desired alignment * @align: desired alignment
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @node: node to use for allocation or NUMA_NO_NODE * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address * @caller: caller's return address
* *
* Allocate enough pages to cover @size from the page level * Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous * allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
* *
* Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
* and __GFP_NOFAIL are not supported * and __GFP_NOFAIL are not supported
*
* Any use of gfp flags outside of GFP_KERNEL should be consulted
* with mm people.
* *
* Any use of gfp flags outside of GFP_KERNEL should be consulted
* with mm people.
*/ */
static void *__vmalloc_node(unsigned long size, unsigned long align, static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot, gfp_t gfp_mask, pgprot_t prot,
@ -1837,13 +1837,14 @@ void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
} }
/** /**
* vmalloc - allocate virtually contiguous memory * vmalloc - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
* *
* For tight control over page level allocator and protection flags * Allocate enough pages to cover @size from the page level
* use __vmalloc() instead. * allocator and map them into contiguous kernel virtual space.
*
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/ */
void *vmalloc(unsigned long size) void *vmalloc(unsigned long size)
{ {
@ -1853,14 +1854,15 @@ void *vmalloc(unsigned long size)
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
/** /**
* vzalloc - allocate virtually contiguous memory with zero fill * vzalloc - allocate virtually contiguous memory with zero fill
* @size: allocation size * @size: allocation size
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
* The memory allocated is set to zero.
* *
* For tight control over page level allocator and protection flags * Allocate enough pages to cover @size from the page level
* use __vmalloc() instead. * allocator and map them into contiguous kernel virtual space.
* The memory allocated is set to zero.
*
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/ */
void *vzalloc(unsigned long size) void *vzalloc(unsigned long size)
{ {
@ -1886,15 +1888,15 @@ void *vmalloc_user(unsigned long size)
EXPORT_SYMBOL(vmalloc_user); EXPORT_SYMBOL(vmalloc_user);
/** /**
* vmalloc_node - allocate memory on a specific node * vmalloc_node - allocate memory on a specific node
* @size: allocation size * @size: allocation size
* @node: numa node * @node: numa node
* *
* Allocate enough pages to cover @size from the page level * Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space. * allocator and map them into contiguous kernel virtual space.
* *
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node(unsigned long size, int node)
{ {
@ -1923,17 +1925,16 @@ void *vzalloc_node(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node); EXPORT_SYMBOL(vzalloc_node);
/** /**
* vmalloc_exec - allocate virtually contiguous, executable memory * vmalloc_exec - allocate virtually contiguous, executable memory
* @size: allocation size * @size: allocation size
* *
* Kernel-internal function to allocate enough pages to cover @size * Kernel-internal function to allocate enough pages to cover @size
* the page level allocator and map them into contiguous and * the page level allocator and map them into contiguous and
* executable kernel virtual space. * executable kernel virtual space.
* *
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vmalloc_exec(unsigned long size) void *vmalloc_exec(unsigned long size)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
@ -1953,11 +1954,11 @@ void *vmalloc_exec(unsigned long size)
#endif #endif
/** /**
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
* @size: allocation size * @size: allocation size
* *
* Allocate enough 32bit PA addressable pages to cover @size from the * Allocate enough 32bit PA addressable pages to cover @size from the
* page level allocator and map them into contiguous kernel virtual space. * page level allocator and map them into contiguous kernel virtual space.
*/ */
void *vmalloc_32(unsigned long size) void *vmalloc_32(unsigned long size)
{ {
@ -1968,7 +1969,7 @@ EXPORT_SYMBOL(vmalloc_32);
/** /**
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
* @size: allocation size * @size: allocation size
* *
* The resulting memory area is 32bit addressable and zeroed so it can be * The resulting memory area is 32bit addressable and zeroed so it can be
* mapped to userspace without leaking data. * mapped to userspace without leaking data.
@ -2064,31 +2065,29 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
} }
/** /**
* vread() - read vmalloc area in a safe way. * vread() - read vmalloc area in a safe way.
* @buf: buffer for reading data * @buf: buffer for reading data
* @addr: vm address. * @addr: vm address.
* @count: number of bytes to be read. * @count: number of bytes to be read.
* *
* Returns # of bytes which addr and buf should be increased. * Returns # of bytes which addr and buf should be increased.
* (same number to @count). Returns 0 if [addr...addr+count) doesn't * (same number to @count). Returns 0 if [addr...addr+count) doesn't
* includes any intersect with alive vmalloc area. * includes any intersect with alive vmalloc area.
* *
* This function checks that addr is a valid vmalloc'ed area, and * This function checks that addr is a valid vmalloc'ed area, and
* copy data from that area to a given buffer. If the given memory range * copy data from that area to a given buffer. If the given memory range
* of [addr...addr+count) includes some valid address, data is copied to * of [addr...addr+count) includes some valid address, data is copied to
* proper area of @buf. If there are memory holes, they'll be zero-filled. * proper area of @buf. If there are memory holes, they'll be zero-filled.
* IOREMAP area is treated as memory hole and no copy is done. * IOREMAP area is treated as memory hole and no copy is done.
* *
* If [addr...addr+count) doesn't includes any intersects with alive * If [addr...addr+count) doesn't includes any intersects with alive
* vm_struct area, returns 0. @buf should be kernel's buffer. * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
* any informaion, as /dev/kmem.
* *
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
* any informaion, as /dev/kmem.
*/ */
long vread(char *buf, char *addr, unsigned long count) long vread(char *buf, char *addr, unsigned long count)
{ {
struct vmap_area *va; struct vmap_area *va;
@ -2145,31 +2144,30 @@ finished:
} }
/** /**
* vwrite() - write vmalloc area in a safe way. * vwrite() - write vmalloc area in a safe way.
* @buf: buffer for source data * @buf: buffer for source data
* @addr: vm address. * @addr: vm address.
* @count: number of bytes to be read. * @count: number of bytes to be read.
* *
* Returns # of bytes which addr and buf should be incresed. * Returns # of bytes which addr and buf should be incresed.
* (same number to @count). * (same number to @count).
* If [addr...addr+count) doesn't includes any intersect with valid * If [addr...addr+count) doesn't includes any intersect with valid
* vmalloc area, returns 0. * vmalloc area, returns 0.
* *
* This function checks that addr is a valid vmalloc'ed area, and * This function checks that addr is a valid vmalloc'ed area, and
* copy data from a buffer to the given addr. If specified range of * copy data from a buffer to the given addr. If specified range of
* [addr...addr+count) includes some valid address, data is copied from * [addr...addr+count) includes some valid address, data is copied from
* proper area of @buf. If there are memory holes, no copy to hole. * proper area of @buf. If there are memory holes, no copy to hole.
* IOREMAP area is treated as memory hole and no copy is done. * IOREMAP area is treated as memory hole and no copy is done.
* *
* If [addr...addr+count) doesn't includes any intersects with alive * If [addr...addr+count) doesn't includes any intersects with alive
* vm_struct area, returns 0. @buf should be kernel's buffer. * vm_struct area, returns 0. @buf should be kernel's buffer.
* *
* Note: In usual ops, vwrite() is never necessary because the caller * Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy(). * should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without * This is for routines which have to access vmalloc area without
* any informaion, as /dev/kmem. * any informaion, as /dev/kmem.
*/ */
long vwrite(char *buf, char *addr, unsigned long count) long vwrite(char *buf, char *addr, unsigned long count)
{ {
struct vmap_area *va; struct vmap_area *va;
@ -2221,20 +2219,20 @@ finished:
} }
/** /**
* remap_vmalloc_range_partial - map vmalloc pages to userspace * remap_vmalloc_range_partial - map vmalloc pages to userspace
* @vma: vma to cover * @vma: vma to cover
* @uaddr: target user address to start at * @uaddr: target user address to start at
* @kaddr: virtual address of vmalloc kernel memory * @kaddr: virtual address of vmalloc kernel memory
* @size: size of map area * @size: size of map area
* *
* Returns: 0 for success, -Exxx on failure * Returns: 0 for success, -Exxx on failure
* *
* This function checks that @kaddr is a valid vmalloc'ed area, * This function checks that @kaddr is a valid vmalloc'ed area,
* and that it is big enough to cover the range starting at * and that it is big enough to cover the range starting at
* @uaddr in @vma. Will return failure if that criteria isn't * @uaddr in @vma. Will return failure if that criteria isn't
* met. * met.
* *
* Similar to remap_pfn_range() (see mm/memory.c) * Similar to remap_pfn_range() (see mm/memory.c)
*/ */
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
void *kaddr, unsigned long size) void *kaddr, unsigned long size)
@ -2276,18 +2274,18 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
EXPORT_SYMBOL(remap_vmalloc_range_partial); EXPORT_SYMBOL(remap_vmalloc_range_partial);
/** /**
* remap_vmalloc_range - map vmalloc pages to userspace * remap_vmalloc_range - map vmalloc pages to userspace
* @vma: vma to cover (map full range of vma) * @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory * @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map * @pgoff: number of pages into addr before first page to map
* *
* Returns: 0 for success, -Exxx on failure * Returns: 0 for success, -Exxx on failure
* *
* This function checks that addr is a valid vmalloc'ed area, and * This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if * that it is big enough to cover the vma. Will return failure if
* that criteria isn't met. * that criteria isn't met.
* *
* Similar to remap_pfn_range() (see mm/memory.c) * Similar to remap_pfn_range() (see mm/memory.c)
*/ */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff) unsigned long pgoff)
@ -2319,18 +2317,18 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
} }
/** /**
* alloc_vm_area - allocate a range of kernel address space * alloc_vm_area - allocate a range of kernel address space
* @size: size of the area * @size: size of the area
* @ptes: returns the PTEs for the address space * @ptes: returns the PTEs for the address space
* *
* Returns: NULL on failure, vm_struct on success * Returns: NULL on failure, vm_struct on success
* *
* This function reserves a range of kernel address space, and * This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings * allocates pagetables to map that range. No actual mappings
* are created. * are created.
* *
* If @ptes is non-NULL, pointers to the PTEs (in init_mm) * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
* allocated for the VM area are returned. * allocated for the VM area are returned.
*/ */
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{ {
@ -2756,4 +2754,3 @@ static int __init proc_vmalloc_init(void)
module_init(proc_vmalloc_init); module_init(proc_vmalloc_init);
#endif #endif