1
0
Fork 0

staging/lustre: replace num_physpages with totalram_pages

The global variable num_physpages is going away. Replace it
with totalram_pages.

Cc: Jiang Liu <jiang.liu@huawei.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Peng Tao 2013-07-15 22:27:04 +08:00 committed by Greg Kroah-Hartman
parent 3b2f64d00c
commit 4f6cc9ab53
10 changed files with 23 additions and 23 deletions

View File

@ -63,9 +63,9 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
#else
#define NUM_CACHEPAGES num_physpages
#define NUM_CACHEPAGES totalram_pages
#endif
/*

View File

@ -339,8 +339,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8)
cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3);
if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@ -388,11 +388,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
} else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
} else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
} else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
if (osc_on_mdt(obddev->obd_name))

View File

@ -269,7 +269,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
int cfs_trace_max_debug_mb(void)
{
int total_mb = (num_physpages >> (20 - PAGE_SHIFT));
int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}

View File

@ -243,9 +243,9 @@ static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buff
if (rc)
return rc;
if (pages_number < 0 || pages_number > num_physpages / 2) {
if (pages_number < 0 || pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
num_physpages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@ -388,10 +388,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
if (rc)
RETURN(rc);
if (pages_number < 0 || pages_number > num_physpages) {
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
num_physpages >> (20 - PAGE_CACHE_SHIFT));
totalram_pages >> (20 - PAGE_CACHE_SHIFT));
RETURN(-ERANGE);
}

View File

@ -558,10 +558,10 @@ static int __init init_obdclass(void)
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
if (num_physpages <= 512 << (20 - PAGE_CACHE_SHIFT))
obd_max_dirty_pages = num_physpages / 4;
if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
obd_max_dirty_pages = totalram_pages / 4;
else
obd_max_dirty_pages = num_physpages / 2;
obd_max_dirty_pages = totalram_pages / 2;
err = obd_init_caches();
if (err)

View File

@ -202,12 +202,12 @@ int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
1 << (20 - PAGE_CACHE_SHIFT));
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
CERROR("Refusing to set max dirty pages to %u, which "
"is more than 90%% of available RAM; setting "
"to %lu\n", obd_max_dirty_pages,
((num_physpages / 10) * 9));
obd_max_dirty_pages = ((num_physpages / 10) * 9);
((totalram_pages / 10) * 9));
obd_max_dirty_pages = ((totalram_pages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
}

View File

@ -849,7 +849,7 @@ static int lu_htable_order(void)
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
cache_size = num_physpages;
cache_size = totalram_pages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */

View File

@ -146,7 +146,7 @@ static ssize_t osc_max_dirty_mb_seq_write(struct file *file, const char *buffer,
if (pages_number <= 0 ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
pages_number > num_physpages / 4) /* 1/4 of RAM */
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);

View File

@ -302,7 +302,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, const char *buffer,
* hose a kernel by allowing the request history to grow too
* far. */
bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (val > num_physpages/(2 * bufpages))
if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
spin_lock(&svc->srv_lock);

View File

@ -156,7 +156,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%u\n"
,
num_physpages,
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
@ -705,7 +705,7 @@ int sptlrpc_enc_pool_init(void)
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
*/
page_pools.epp_max_pages = num_physpages / 8;
page_pools.epp_max_pages = totalram_pages / 8;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
init_waitqueue_head(&page_pools.epp_waitq);