1
0
Fork 0

btrfs: cleanup 64bit/32bit divs, compile time constants

Switch to div_u64 if the divisor is a numeric constant or sum of
sizeof()s. We can remove a few instances of do_div that has the hidden
semtantics of changing the 1st argument.

Small power-of-two divisors are converted to bitshifts, large values are
kept intact for clarity.

Signed-off-by: David Sterba <dsterba@suse.cz>
wifi-calibration
David Sterba 2015-01-16 17:21:12 +01:00
parent 351810c1d2
commit f8c269d722
6 changed files with 18 additions and 21 deletions

View File

@ -670,8 +670,8 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
srcdev = dev_replace->srcdev; srcdev = dev_replace->srcdev;
args->status.progress_1000 = div64_u64(dev_replace->cursor_left, args->status.progress_1000 = div_u64(dev_replace->cursor_left,
div64_u64(btrfs_device_get_total_bytes(srcdev), 1000)); div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
break; break;
} }
btrfs_dev_replace_unlock(dev_replace); btrfs_dev_replace_unlock(dev_replace);
@ -806,7 +806,7 @@ static int btrfs_dev_replace_kthread(void *data)
btrfs_dev_replace_status(fs_info, status_args); btrfs_dev_replace_status(fs_info, status_args);
progress = status_args->status.progress_1000; progress = status_args->status.progress_1000;
kfree(status_args); kfree(status_args);
do_div(progress, 10); progress = div_u64(progress, 10);
printk_in_rcu(KERN_INFO printk_in_rcu(KERN_INFO
"BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n", "BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
dev_replace->srcdev->missing ? "<missing disk>" : dev_replace->srcdev->missing ? "<missing disk>" :

View File

@ -2277,7 +2277,7 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0; fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT; fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64); fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
/* readahead state */ /* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock); spin_lock_init(&fs_info->reada_lock);

View File

@ -2561,8 +2561,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/ */
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
avg = div64_u64(avg, 4); fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
fs_info->avg_delayed_ref_runtime = avg;
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
} }
return 0; return 0;
@ -2624,7 +2623,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
* We don't ever fill up leaves all the way so multiply by 2 just to be * We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to ouse. * closer to what we're really going to want to ouse.
*/ */
return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
} }
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
@ -3193,7 +3192,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct inode *inode = NULL; struct inode *inode = NULL;
u64 alloc_hint = 0; u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR; int dcs = BTRFS_DC_ERROR;
int num_pages = 0; u64 num_pages = 0;
int retries = 0; int retries = 0;
int ret = 0; int ret = 0;
@ -3277,7 +3276,7 @@ again:
* taking up quite a bit since it's not folded into the other space * taking up quite a bit since it's not folded into the other space
* cache. * cache.
*/ */
num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages) if (!num_pages)
num_pages = 1; num_pages = 1;
@ -4770,10 +4769,10 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
csum_size * 2; csum_size * 2;
num_bytes += div64_u64(data_used + meta_used, 50); num_bytes += div_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used) if (num_bytes * 3 > meta_used)
num_bytes = div64_u64(meta_used, 3); num_bytes = div_u64(meta_used, 3);
return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10); return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
} }
@ -5039,7 +5038,7 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
else else
BTRFS_I(inode)->csum_bytes -= num_bytes; BTRFS_I(inode)->csum_bytes -= num_bytes;
csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
num_csums_per_leaf = (int)div64_u64(csum_size, num_csums_per_leaf = (int)div_u64(csum_size,
sizeof(struct btrfs_csum_item) + sizeof(struct btrfs_csum_item) +
sizeof(struct btrfs_disk_key)); sizeof(struct btrfs_disk_key));
num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);

View File

@ -1537,7 +1537,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
max_bytes = MAX_CACHE_BYTES_PER_GIG; max_bytes = MAX_CACHE_BYTES_PER_GIG;
else else
max_bytes = MAX_CACHE_BYTES_PER_GIG * max_bytes = MAX_CACHE_BYTES_PER_GIG *
div64_u64(size, 1024 * 1024 * 1024); div_u64(size, 1024 * 1024 * 1024);
/* /*
* we want to account for 1 more bitmap than what we have so we can make * we want to account for 1 more bitmap than what we have so we can make
@ -1552,14 +1552,14 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
} }
/* /*
* we want the extent entry threshold to always be at most 1/2 the maxw * we want the extent entry threshold to always be at most 1/2 the max
* bytes we can have, or whatever is less than that. * bytes we can have, or whatever is less than that.
*/ */
extent_bytes = max_bytes - bitmap_bytes; extent_bytes = max_bytes - bitmap_bytes;
extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
ctl->extents_thresh = ctl->extents_thresh =
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); div_u64(extent_bytes, sizeof(struct btrfs_free_space));
} }
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,

View File

@ -28,8 +28,7 @@ static inline u64 div_factor(u64 num, int factor)
if (factor == 10) if (factor == 10)
return num; return num;
num *= factor; num *= factor;
do_div(num, 10); return div_u64(num, 10);
return num;
} }
static inline u64 div_factor_fine(u64 num, int factor) static inline u64 div_factor_fine(u64 num, int factor)
@ -37,8 +36,7 @@ static inline u64 div_factor_fine(u64 num, int factor)
if (factor == 100) if (factor == 100)
return num; return num;
num *= factor; num *= factor;
do_div(num, 100); return div_u64(num, 100);
return num;
} }
#endif #endif

View File

@ -1704,7 +1704,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
avail_space = device->total_bytes - device->bytes_used; avail_space = device->total_bytes - device->bytes_used;
/* align with stripe_len */ /* align with stripe_len */
do_div(avail_space, BTRFS_STRIPE_LEN); avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
avail_space *= BTRFS_STRIPE_LEN; avail_space *= BTRFS_STRIPE_LEN;
/* /*