remarkable-linux/fs/nfs/blocklayout/blocklayout.c
Peng Tao 7542274519 pnfsblock: fix writeback deadlock
We should check if the sector is already initialized before
trying to grab the page from page cache. Otherwise when two
pages of the same block are written back by two threads each
calling from writepage_locked, it can cause deadlock like bellow.

 [ 1080.972099] INFO: task kswapd0:25 blocked for more than 120 seconds.
 [ 1080.972377] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
 [ 1080.972812] kswapd0         D ffff88000c4926c0     0    25      2 0x00000000
 [ 1080.972816]  ffff88000df276b0 0000000000000046 ffff88000df27640 ffffffff81013ba7
 [ 1080.972821]  ffff88000c492310 ffff88000df27fd8 ffff88000df27fd8 00000000001d3440
 [ 1080.972824]  ffff88000c378000 ffff88000c492310 ffff8800175d3d40 ffff880017fc75a8
 [ 1080.972828] Call Trace:
 [ 1080.972860]  [<ffffffff81013ba7>] ? read_tsc+0x9/0x19
 [ 1080.972877]  [<ffffffff810e0b23>] ? lock_page+0x2b/0x2b
 [ 1080.972899]  [<ffffffff81475a1d>] io_schedule+0x63/0x7e
 [ 1080.972902]  [<ffffffff810e0b31>] sleep_on_page+0xe/0x12
 [ 1080.972905]  [<ffffffff81475fe8>] __wait_on_bit_lock+0x46/0x8f
 [ 1080.972916]  [<ffffffff810822d7>] ? lock_release_holdtime.part.7+0x6b/0x72
 [ 1080.972919]  [<ffffffff810e0af6>] __lock_page+0x66/0x68
 [ 1080.972928]  [<ffffffff81072705>] ? autoremove_wake_function+0x3d/0x3d
 [ 1080.972932]  [<ffffffff810e0b1f>] lock_page+0x27/0x2b
 [ 1080.972934]  [<ffffffff810e0bcf>] find_lock_page+0x34/0x57
 [ 1080.972937]  [<ffffffff810e1738>] find_or_create_page+0x34/0x8a
 [ 1080.972947]  [<ffffffffa034245b>] bl_write_pagelist+0x205/0x6da [blocklayoutdriver]
 [ 1080.972951]  [<ffffffffa034145d>] ? bl_free_lseg+0x38/0x38 [blocklayoutdriver]
 [ 1080.972995]  [<ffffffffa02e27b9>] ? nfs_write_rpcsetup+0x118/0x123 [nfs]
 [ 1080.973033]  [<ffffffffa030246b>] pnfs_generic_pg_writepages+0x10b/0x1f4 [nfs]
 [ 1080.973089]  [<ffffffffa02deaae>] nfs_pageio_doio+0x1a/0x43 [nfs]
 [ 1080.973098]  [<ffffffffa02df035>] nfs_pageio_complete+0x16/0x2d [nfs]
 [ 1080.973108]  [<ffffffffa02e2d8f>] nfs_writepage_locked+0xa0/0xbf [nfs]
 [ 1080.973119]  [<ffffffffa02e36a1>] nfs_writepage+0x16/0x2b [nfs]
 [ 1080.973122]  [<ffffffff810e8762>] ? clear_page_dirty_for_io+0x87/0x9a
 [ 1080.973133]  [<ffffffff810efc5b>] shrink_page_list+0x39b/0x6c8
 [ 1080.973139]  [<ffffffff810f03bb>] shrink_inactive_list+0x22c/0x39e
 [ 1080.973144]  [<ffffffff810822d7>] ? lock_release_holdtime.part.7+0x6b/0x72
 [ 1080.973148]  [<ffffffff810f0c33>] shrink_zone+0x445/0x588
 [ 1080.973152]  [<ffffffff810f1a11>] balance_pgdat+0x2c2/0x56b
 [ 1080.973170]  [<ffffffff81254208>] ? __bitmap_weight+0x34/0x80
 [ 1080.973175]  [<ffffffff810f1f78>] kswapd+0x2be/0x2fa
 [ 1080.973179]  [<ffffffff810726c8>] ? __init_waitqueue_head+0x4b/0x4b
 [ 1080.973183]  [<ffffffff810f1cba>] ? balance_pgdat+0x56b/0x56b
 [ 1080.973187]  [<ffffffff81071f69>] kthread+0xa8/0xb0
 [ 1080.973200]  [<ffffffff814806b4>] kernel_thread_helper+0x4/0x10
 [ 1080.973205]  [<ffffffff81071ec1>] ? __init_kthread_worker+0x5a/0x5a
 [ 1080.973210]  [<ffffffff814806b0>] ? gs_change+0x13/0x13
 [ 1080.973213] no locks held by kswapd0/25.

Signed-off-by: Peng Tao <peng_tao@emc.com>
Signed-off-by: Jim Rees <rees@umich.edu>
Cc: stable@kernel.org [3.0]
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-10-18 09:08:15 -07:00

1025 lines
27 KiB
C

/*
* linux/fs/nfs/blocklayout/blocklayout.c
*
* Module for the NFSv4.1 pNFS block layout driver.
*
* Copyright (c) 2006 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@citi.umich.edu>
* Fred Isaman <iisaman@umich.edu>
*
* permission is granted to use, copy, create derivative works and
* redistribute this software and such derivative works for any purpose,
* so long as the name of the university of michigan is not used in
* any advertising or publicity pertaining to the use or distribution
* of this software without specific, written prior authorization. if
* the above copyright notice or any other identification of the
* university of michigan is included in any copy of any portion of
* this software, then the disclaimer below must also be included.
*
* this software is provided as is, without representation from the
* university of michigan as to its fitness for any purpose, and without
* warranty by the university of michigan of any kind, either express
* or implied, including without limitation the implied warranties of
* merchantability and fitness for a particular purpose. the regents
* of the university of michigan shall not be liable for any damages,
* including special, indirect, incidental, or consequential damages,
* with respect to any claim arising out or in connection with the use
* of the software, even if it has been or is hereafter advised of the
* possibility of such damages.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/bio.h> /* struct bio */
#include <linux/buffer_head.h> /* various write calls */
#include <linux/prefetch.h>
#include "blocklayout.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
struct dentry *bl_device_pipe;
wait_queue_head_t bl_wq;
static void print_page(struct page *page)
{
dprintk("PRINTPAGE page %p\n", page);
dprintk(" PagePrivate %d\n", PagePrivate(page));
dprintk(" PageUptodate %d\n", PageUptodate(page));
dprintk(" PageError %d\n", PageError(page));
dprintk(" PageDirty %d\n", PageDirty(page));
dprintk(" PageReferenced %d\n", PageReferenced(page));
dprintk(" PageLocked %d\n", PageLocked(page));
dprintk(" PageWriteback %d\n", PageWriteback(page));
dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
dprintk("\n");
}
/* Given the be associated with isect, determine if page data needs to be
* initialized.
*/
static int is_hole(struct pnfs_block_extent *be, sector_t isect)
{
if (be->be_state == PNFS_BLOCK_NONE_DATA)
return 1;
else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
return 0;
else
return !bl_is_sector_init(be->be_inval, isect);
}
/* Given the be associated with isect, determine if page data can be
* written to disk.
*/
static int is_writable(struct pnfs_block_extent *be, sector_t isect)
{
return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
be->be_state == PNFS_BLOCK_INVALID_DATA);
}
/* The data we are handed might be spread across several bios. We need
* to track when the last one is finished.
*/
struct parallel_io {
struct kref refcnt;
struct rpc_call_ops call_ops;
void (*pnfs_callback) (void *data);
void *data;
};
static inline struct parallel_io *alloc_parallel(void *data)
{
struct parallel_io *rv;
rv = kmalloc(sizeof(*rv), GFP_NOFS);
if (rv) {
rv->data = data;
kref_init(&rv->refcnt);
}
return rv;
}
static inline void get_parallel(struct parallel_io *p)
{
kref_get(&p->refcnt);
}
static void destroy_parallel(struct kref *kref)
{
struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
dprintk("%s enter\n", __func__);
p->pnfs_callback(p->data);
kfree(p);
}
static inline void put_parallel(struct parallel_io *p)
{
kref_put(&p->refcnt, destroy_parallel);
}
static struct bio *
bl_submit_bio(int rw, struct bio *bio)
{
if (bio) {
get_parallel(bio->bi_private);
dprintk("%s submitting %s bio %u@%llu\n", __func__,
rw == READ ? "read" : "write",
bio->bi_size, (unsigned long long)bio->bi_sector);
submit_bio(rw, bio);
}
return NULL;
}
static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
struct pnfs_block_extent *be,
void (*end_io)(struct bio *, int err),
struct parallel_io *par)
{
struct bio *bio;
bio = bio_alloc(GFP_NOIO, npg);
if (!bio)
return NULL;
bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
bio->bi_bdev = be->be_mdev;
bio->bi_end_io = end_io;
bio->bi_private = par;
return bio;
}
static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
sector_t isect, struct page *page,
struct pnfs_block_extent *be,
void (*end_io)(struct bio *, int err),
struct parallel_io *par)
{
retry:
if (!bio) {
bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
if (!bio)
return ERR_PTR(-ENOMEM);
}
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio = bl_submit_bio(rw, bio);
goto retry;
}
return bio;
}
/* This is basically copied from mpage_end_io_read */
static void bl_end_io_read(struct bio *bio, int err)
{
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
do {
struct page *page = bvec->bv_page;
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
if (uptodate)
SetPageUptodate(page);
} while (bvec >= bio->bi_io_vec);
if (!uptodate) {
if (!rdata->pnfs_error)
rdata->pnfs_error = -EIO;
pnfs_set_lo_fail(rdata->lseg);
}
bio_put(bio);
put_parallel(par);
}
static void bl_read_cleanup(struct work_struct *work)
{
struct rpc_task *task;
struct nfs_read_data *rdata;
dprintk("%s enter\n", __func__);
task = container_of(work, struct rpc_task, u.tk_work);
rdata = container_of(task, struct nfs_read_data, task);
pnfs_ld_read_done(rdata);
}
static void
bl_end_par_io_read(void *data)
{
struct nfs_read_data *rdata = data;
INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
schedule_work(&rdata->task.u.tk_work);
}
/* We don't want normal .rpc_call_done callback used, so we replace it
* with this stub.
*/
static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
{
return;
}
static enum pnfs_try_status
bl_read_pagelist(struct nfs_read_data *rdata)
{
int i, hole;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
sector_t isect, extent_length = 0;
struct parallel_io *par;
loff_t f_offset = rdata->args.offset;
size_t count = rdata->args.count;
struct page **pages = rdata->args.pages;
int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
rdata->npages, f_offset, count);
par = alloc_parallel(rdata);
if (!par)
goto use_mds;
par->call_ops = *rdata->mds_ops;
par->call_ops.rpc_call_done = bl_rpc_do_nothing;
par->pnfs_callback = bl_end_par_io_read;
/* At this point, we can no longer jump to use_mds */
isect = (sector_t) (f_offset >> SECTOR_SHIFT);
/* Code assumes extents are page-aligned */
for (i = pg_index; i < rdata->npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bl_put_extent(cow_read);
bio = bl_submit_bio(READ, bio);
/* Get the next one */
be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
isect, &cow_read);
if (!be) {
rdata->pnfs_error = -EIO;
goto out;
}
extent_length = be->be_length -
(isect - be->be_f_offset);
if (cow_read) {
sector_t cow_length = cow_read->be_length -
(isect - cow_read->be_f_offset);
extent_length = min(extent_length, cow_length);
}
}
hole = is_hole(be, isect);
if (hole && !cow_read) {
bio = bl_submit_bio(READ, bio);
/* Fill hole w/ zeroes w/o accessing device */
dprintk("%s Zeroing page for hole\n", __func__);
zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
print_page(pages[i]);
SetPageUptodate(pages[i]);
} else {
struct pnfs_block_extent *be_read;
be_read = (hole && cow_read) ? cow_read : be;
bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
isect, pages[i], be_read,
bl_end_io_read, par);
if (IS_ERR(bio)) {
rdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
}
isect += PAGE_CACHE_SECTORS;
extent_length -= PAGE_CACHE_SECTORS;
}
if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
rdata->res.eof = 1;
rdata->res.count = rdata->inode->i_size - f_offset;
} else {
rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
}
out:
bl_put_extent(be);
bl_put_extent(cow_read);
bl_submit_bio(READ, bio);
put_parallel(par);
return PNFS_ATTEMPTED;
use_mds:
dprintk("Giving up and using normal NFS\n");
return PNFS_NOT_ATTEMPTED;
}
static void mark_extents_written(struct pnfs_block_layout *bl,
__u64 offset, __u32 count)
{
sector_t isect, end;
struct pnfs_block_extent *be;
dprintk("%s(%llu, %u)\n", __func__, offset, count);
if (count == 0)
return;
isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
end >>= SECTOR_SHIFT;
while (isect < end) {
sector_t len;
be = bl_find_get_extent(bl, isect, NULL);
BUG_ON(!be); /* FIXME */
len = min(end, be->be_f_offset + be->be_length) - isect;
if (be->be_state == PNFS_BLOCK_INVALID_DATA)
bl_mark_for_commit(be, isect, len); /* What if fails? */
isect += len;
bl_put_extent(be);
}
}
static void bl_end_io_write_zero(struct bio *bio, int err)
{
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
do {
struct page *page = bvec->bv_page;
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
/* This is the zeroing page we added */
end_page_writeback(page);
page_cache_release(page);
} while (bvec >= bio->bi_io_vec);
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
}
/* This is basically copied from mpage_end_io_read */
static void bl_end_io_write(struct bio *bio, int err)
{
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
}
/* Function scheduled for call during bl_end_par_io_write,
* it marks sectors as written and extends the commitlist.
*/
static void bl_write_cleanup(struct work_struct *work)
{
struct rpc_task *task;
struct nfs_write_data *wdata;
dprintk("%s enter\n", __func__);
task = container_of(work, struct rpc_task, u.tk_work);
wdata = container_of(task, struct nfs_write_data, task);
if (!wdata->pnfs_error) {
/* Marks for LAYOUTCOMMIT */
mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
wdata->args.offset, wdata->args.count);
}
pnfs_ld_write_done(wdata);
}
/* Called when last of bios associated with a bl_write_pagelist call finishes */
static void bl_end_par_io_write(void *data)
{
struct nfs_write_data *wdata = data;
wdata->task.tk_status = 0;
wdata->verf.committed = NFS_FILE_SYNC;
INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
schedule_work(&wdata->task.u.tk_work);
}
/* FIXME STUB - mark intersection of layout and page as bad, so is not
* used again.
*/
static void mark_bad_read(void)
{
return;
}
/*
* map_block: map a requested I/0 block (isect) into an offset in the LVM
* block_device
*/
static void
map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
{
dprintk("%s enter be=%p\n", __func__, be);
set_buffer_mapped(bh);
bh->b_bdev = be->be_mdev;
bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
(be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
__func__, (unsigned long long)isect, (long)bh->b_blocknr,
bh->b_size);
return;
}
/* Given an unmapped page, zero it or read in page for COW, page is locked
* by caller.
*/
static int
init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
{
struct buffer_head *bh = NULL;
int ret = 0;
sector_t isect;
dprintk("%s enter, %p\n", __func__, page);
BUG_ON(PageUptodate(page));
if (!cow_read) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
goto cleanup;
}
bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
if (!bh) {
ret = -ENOMEM;
goto cleanup;
}
isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
map_block(bh, isect, cow_read);
if (!bh_uptodate_or_lock(bh))
ret = bh_submit_read(bh);
if (ret)
goto cleanup;
SetPageUptodate(page);
cleanup:
bl_put_extent(cow_read);
if (bh)
free_buffer_head(bh);
if (ret) {
/* Need to mark layout with bad read...should now
* just use nfs4 for reads and writes.
*/
mark_bad_read();
}
return ret;
}
static enum pnfs_try_status
bl_write_pagelist(struct nfs_write_data *wdata, int sync)
{
int i, ret, npg_zero, pg_index, last = 0;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
sector_t isect, last_isect = 0, extent_length = 0;
struct parallel_io *par;
loff_t offset = wdata->args.offset;
size_t count = wdata->args.count;
struct page **pages = wdata->args.pages;
struct page *page;
pgoff_t index;
u64 temp;
int npg_per_block =
NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
* We want to write each, and if there is an error set pnfs_error
* to have it redone using nfs.
*/
par = alloc_parallel(wdata);
if (!par)
return PNFS_NOT_ATTEMPTED;
par->call_ops = *wdata->mds_ops;
par->call_ops.rpc_call_done = bl_rpc_do_nothing;
par->pnfs_callback = bl_end_par_io_write;
/* At this point, have to be more careful with error handling */
isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
if (!be || !is_writable(be, isect)) {
dprintk("%s no matching extents!\n", __func__);
wdata->pnfs_error = -EINVAL;
goto out;
}
/* First page inside INVALID extent */
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
temp = offset >> PAGE_CACHE_SHIFT;
npg_zero = do_div(temp, npg_per_block);
isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
(long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
extent_length = be->be_length - (isect - be->be_f_offset);
fill_invalid_ext:
dprintk("%s need to zero %d pages\n", __func__, npg_zero);
for (;npg_zero > 0; npg_zero--) {
if (bl_is_sector_init(be->be_inval, isect)) {
dprintk("isect %llu already init\n",
(unsigned long long)isect);
goto next_page;
}
/* page ref released in bl_end_io_write_zero */
index = isect >> PAGE_CACHE_SECTOR_SHIFT;
dprintk("%s zero %dth page: index %lu isect %llu\n",
__func__, npg_zero, index,
(unsigned long long)isect);
page =
find_or_create_page(wdata->inode->i_mapping, index,
GFP_NOFS);
if (!page) {
dprintk("%s oom\n", __func__);
wdata->pnfs_error = -ENOMEM;
goto out;
}
/* PageDirty: Other will write this out
* PageWriteback: Other is writing this out
* PageUptodate: It was read before
* sector_initialized: already written out
*/
if (PageDirty(page) || PageWriteback(page)) {
print_page(page);
unlock_page(page);
page_cache_release(page);
goto next_page;
}
if (!PageUptodate(page)) {
/* New page, readin or zero it */
init_page_for_write(page, cow_read);
}
set_page_writeback(page);
unlock_page(page);
ret = bl_mark_sectors_init(be->be_inval, isect,
PAGE_CACHE_SECTORS,
NULL);
if (unlikely(ret)) {
dprintk("%s bl_mark_sectors_init fail %d\n",
__func__, ret);
end_page_writeback(page);
page_cache_release(page);
wdata->pnfs_error = ret;
goto out;
}
bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
isect, page, be,
bl_end_io_write_zero, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
/* FIXME: This should be done in bi_end_io */
mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
page->index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE);
next_page:
isect += PAGE_CACHE_SECTORS;
extent_length -= PAGE_CACHE_SECTORS;
}
if (last)
goto write_done;
}
bio = bl_submit_bio(WRITE, bio);
/* Middle pages */
pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
for (i = pg_index; i < wdata->npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bio = bl_submit_bio(WRITE, bio);
/* Get the next one */
be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
isect, NULL);
if (!be || !is_writable(be, isect)) {
wdata->pnfs_error = -EINVAL;
goto out;
}
extent_length = be->be_length -
(isect - be->be_f_offset);
}
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
ret = bl_mark_sectors_init(be->be_inval, isect,
PAGE_CACHE_SECTORS,
NULL);
if (unlikely(ret)) {
dprintk("%s bl_mark_sectors_init fail %d\n",
__func__, ret);
wdata->pnfs_error = ret;
goto out;
}
}
bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
isect, pages[i], be,
bl_end_io_write, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
isect += PAGE_CACHE_SECTORS;
last_isect = isect;
extent_length -= PAGE_CACHE_SECTORS;
}
/* Last page inside INVALID extent */
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
bio = bl_submit_bio(WRITE, bio);
temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
npg_zero = npg_per_block - do_div(temp, npg_per_block);
if (npg_zero < npg_per_block) {
last = 1;
goto fill_invalid_ext;
}
}
write_done:
wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
if (count < wdata->res.count) {
wdata->res.count = count;
}
out:
bl_put_extent(be);
bl_submit_bio(WRITE, bio);
put_parallel(par);
return PNFS_ATTEMPTED;
}
/* FIXME - range ignored */
static void
release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
{
int i;
struct pnfs_block_extent *be;
spin_lock(&bl->bl_ext_lock);
for (i = 0; i < EXTENT_LISTS; i++) {
while (!list_empty(&bl->bl_extents[i])) {
be = list_first_entry(&bl->bl_extents[i],
struct pnfs_block_extent,
be_node);
list_del(&be->be_node);
bl_put_extent(be);
}
}
spin_unlock(&bl->bl_ext_lock);
}
static void
release_inval_marks(struct pnfs_inval_markings *marks)
{
struct pnfs_inval_tracking *pos, *temp;
list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
list_del(&pos->it_link);
kfree(pos);
}
return;
}
static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
dprintk("%s enter\n", __func__);
release_extents(bl, NULL);
release_inval_marks(&bl->bl_inval);
kfree(bl);
}
static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
gfp_t gfp_flags)
{
struct pnfs_block_layout *bl;
dprintk("%s enter\n", __func__);
bl = kzalloc(sizeof(*bl), gfp_flags);
if (!bl)
return NULL;
spin_lock_init(&bl->bl_ext_lock);
INIT_LIST_HEAD(&bl->bl_extents[0]);
INIT_LIST_HEAD(&bl->bl_extents[1]);
INIT_LIST_HEAD(&bl->bl_commit);
INIT_LIST_HEAD(&bl->bl_committing);
bl->bl_count = 0;
bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
return &bl->bl_layout;
}
static void bl_free_lseg(struct pnfs_layout_segment *lseg)
{
dprintk("%s enter\n", __func__);
kfree(lseg);
}
/* We pretty much ignore lseg, and store all data layout wide, so we
* can correctly merge.
*/
static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
struct nfs4_layoutget_res *lgr,
gfp_t gfp_flags)
{
struct pnfs_layout_segment *lseg;
int status;
dprintk("%s enter\n", __func__);
lseg = kzalloc(sizeof(*lseg), gfp_flags);
if (!lseg)
return ERR_PTR(-ENOMEM);
status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
if (status) {
/* We don't want to call the full-blown bl_free_lseg,
* since on error extents were not touched.
*/
kfree(lseg);
return ERR_PTR(status);
}
return lseg;
}
static void
bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
const struct nfs4_layoutcommit_args *arg)
{
dprintk("%s enter\n", __func__);
encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
}
static void
bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
{
struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
dprintk("%s enter\n", __func__);
clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
}
static void free_blk_mountid(struct block_mount_id *mid)
{
if (mid) {
struct pnfs_block_dev *dev;
spin_lock(&mid->bm_lock);
while (!list_empty(&mid->bm_devlist)) {
dev = list_first_entry(&mid->bm_devlist,
struct pnfs_block_dev,
bm_node);
list_del(&dev->bm_node);
bl_free_block_dev(dev);
}
spin_unlock(&mid->bm_lock);
kfree(mid);
}
}
/* This is mostly copied from the filelayout's get_device_info function.
* It seems much of this should be at the generic pnfs level.
*/
static struct pnfs_block_dev *
nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
struct nfs4_deviceid *d_id)
{
struct pnfs_device *dev;
struct pnfs_block_dev *rv;
u32 max_resp_sz;
int max_pages;
struct page **pages = NULL;
int i, rc;
/*
* Use the session max response size as the basis for setting
* GETDEVICEINFO's maxcount
*/
max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
max_pages = max_resp_sz >> PAGE_SHIFT;
dprintk("%s max_resp_sz %u max_pages %d\n",
__func__, max_resp_sz, max_pages);
dev = kmalloc(sizeof(*dev), GFP_NOFS);
if (!dev) {
dprintk("%s kmalloc failed\n", __func__);
return ERR_PTR(-ENOMEM);
}
pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
kfree(dev);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < max_pages; i++) {
pages[i] = alloc_page(GFP_NOFS);
if (!pages[i]) {
rv = ERR_PTR(-ENOMEM);
goto out_free;
}
}
memcpy(&dev->dev_id, d_id, sizeof(*d_id));
dev->layout_type = LAYOUT_BLOCK_VOLUME;
dev->pages = pages;
dev->pgbase = 0;
dev->pglen = PAGE_SIZE * max_pages;
dev->mincount = 0;
dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
rc = nfs4_proc_getdeviceinfo(server, dev);
dprintk("%s getdevice info returns %d\n", __func__, rc);
if (rc) {
rv = ERR_PTR(rc);
goto out_free;
}
rv = nfs4_blk_decode_device(server, dev);
out_free:
for (i = 0; i < max_pages; i++)
__free_page(pages[i]);
kfree(pages);
kfree(dev);
return rv;
}
static int
bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
{
struct block_mount_id *b_mt_id = NULL;
struct pnfs_devicelist *dlist = NULL;
struct pnfs_block_dev *bdev;
LIST_HEAD(block_disklist);
int status, i;
dprintk("%s enter\n", __func__);
if (server->pnfs_blksize == 0) {
dprintk("%s Server did not return blksize\n", __func__);
return -EINVAL;
}
b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
if (!b_mt_id) {
status = -ENOMEM;
goto out_error;
}
/* Initialize nfs4 block layout mount id */
spin_lock_init(&b_mt_id->bm_lock);
INIT_LIST_HEAD(&b_mt_id->bm_devlist);
dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
if (!dlist) {
status = -ENOMEM;
goto out_error;
}
dlist->eof = 0;
while (!dlist->eof) {
status = nfs4_proc_getdevicelist(server, fh, dlist);
if (status)
goto out_error;
dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
__func__, dlist->num_devs, dlist->eof);
for (i = 0; i < dlist->num_devs; i++) {
bdev = nfs4_blk_get_deviceinfo(server, fh,
&dlist->dev_id[i]);
if (IS_ERR(bdev)) {
status = PTR_ERR(bdev);
goto out_error;
}
spin_lock(&b_mt_id->bm_lock);
list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
spin_unlock(&b_mt_id->bm_lock);
}
}
dprintk("%s SUCCESS\n", __func__);
server->pnfs_ld_data = b_mt_id;
out_return:
kfree(dlist);
return status;
out_error:
free_blk_mountid(b_mt_id);
goto out_return;
}
static int
bl_clear_layoutdriver(struct nfs_server *server)
{
struct block_mount_id *b_mt_id = server->pnfs_ld_data;
dprintk("%s enter\n", __func__);
free_blk_mountid(b_mt_id);
dprintk("%s RETURNS\n", __func__);
return 0;
}
static const struct nfs_pageio_ops bl_pg_read_ops = {
.pg_init = pnfs_generic_pg_init_read,
.pg_test = pnfs_generic_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops bl_pg_write_ops = {
.pg_init = pnfs_generic_pg_init_write,
.pg_test = pnfs_generic_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
};
static struct pnfs_layoutdriver_type blocklayout_type = {
.id = LAYOUT_BLOCK_VOLUME,
.name = "LAYOUT_BLOCK_VOLUME",
.read_pagelist = bl_read_pagelist,
.write_pagelist = bl_write_pagelist,
.alloc_layout_hdr = bl_alloc_layout_hdr,
.free_layout_hdr = bl_free_layout_hdr,
.alloc_lseg = bl_alloc_lseg,
.free_lseg = bl_free_lseg,
.encode_layoutcommit = bl_encode_layoutcommit,
.cleanup_layoutcommit = bl_cleanup_layoutcommit,
.set_layoutdriver = bl_set_layoutdriver,
.clear_layoutdriver = bl_clear_layoutdriver,
.pg_read_ops = &bl_pg_read_ops,
.pg_write_ops = &bl_pg_write_ops,
};
static const struct rpc_pipe_ops bl_upcall_ops = {
.upcall = rpc_pipe_generic_upcall,
.downcall = bl_pipe_downcall,
.destroy_msg = bl_pipe_destroy_msg,
};
static int __init nfs4blocklayout_init(void)
{
struct vfsmount *mnt;
struct path path;
int ret;
dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
ret = pnfs_register_layoutdriver(&blocklayout_type);
if (ret)
goto out;
init_waitqueue_head(&bl_wq);
mnt = rpc_get_mount();
if (IS_ERR(mnt)) {
ret = PTR_ERR(mnt);
goto out_remove;
}
ret = vfs_path_lookup(mnt->mnt_root,
mnt,
NFS_PIPE_DIRNAME, 0, &path);
if (ret)
goto out_putrpc;
bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
&bl_upcall_ops, 0);
path_put(&path);
if (IS_ERR(bl_device_pipe)) {
ret = PTR_ERR(bl_device_pipe);
goto out_putrpc;
}
out:
return ret;
out_putrpc:
rpc_put_mount();
out_remove:
pnfs_unregister_layoutdriver(&blocklayout_type);
return ret;
}
static void __exit nfs4blocklayout_exit(void)
{
dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
__func__);
pnfs_unregister_layoutdriver(&blocklayout_type);
rpc_unlink(bl_device_pipe);
rpc_put_mount();
}
MODULE_ALIAS("nfs-layouttype4-3");
module_init(nfs4blocklayout_init);
module_exit(nfs4blocklayout_exit);