1
0
Fork 0

[PATCH] Remove old node based policy interface from mempolicy.c

mempolicy.c contains provisional interface for huge page allocation based on
node numbers.  This is in use in SLES9 but was never used (AFAIK) in upstream
versions of Linux.

Huge page allocations now use zonelists to figure out where to allocate pages.
 The use of zonelists allows us to find the closest hugepage which was the
consideration of the NUMA distance for huge page allocations.

Remove the obsolete functions.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Acked-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
hifive-unleashed-5.1
Christoph Lameter 2006-01-06 00:10:47 -08:00 committed by Linus Torvalds
parent 5da7ca8607
commit 21abb1478a
2 changed files with 0 additions and 67 deletions

View File

@ -109,14 +109,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
* Hugetlb policy. i386 hugetlb so far works with node numbers
* instead of zone lists, so give it special interfaces for now.
*/
extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
unsigned long addr);
/*
* Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas
@ -184,17 +176,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
return NULL;
}
static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
{
return numa_node_id();
}
static inline int
mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
{
return 1;
}
struct shared_policy {};
static inline int mpol_set_shared_policy(struct shared_policy *info,

View File

@ -960,54 +960,6 @@ void __mpol_free(struct mempolicy *p)
kmem_cache_free(policy_cache, p);
}
/*
* Hugetlb policy. Same as above, just works with node numbers instead of
* zonelists.
*/
/* Find first node suitable for an allocation */
int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_DEFAULT:
return numa_node_id();
case MPOL_BIND:
return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
case MPOL_INTERLEAVE:
return interleave_nodes(pol);
case MPOL_PREFERRED:
return pol->v.preferred_node >= 0 ?
pol->v.preferred_node : numa_node_id();
}
BUG();
return 0;
}
/* Find secondary valid nodes for an allocation */
int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_PREFERRED:
case MPOL_DEFAULT:
case MPOL_INTERLEAVE:
return 1;
case MPOL_BIND: {
struct zone **z;
for (z = pol->v.zonelist->zones; *z; z++)
if ((*z)->zone_pgdat->node_id == nid)
return 1;
return 0;
}
default:
BUG();
return 0;
}
}
/*
* Shared memory backing store policy support.
*