1
0
Fork 0

[PATCH] pipe: introduce ->pin() buffer operation

The ->map() function is really expensive on highmem machines right now,
since it has to use the slower kmap() instead of kmap_atomic(). Splice
rarely needs to access the virtual address of a page, so it's a waste
of time doing it.

Introduce ->pin() to take over the responsibility of making sure the
page data is valid. ->map() is then reduced to just kmap(). That way we
can also share a most of the pipe buffer ops between pipe.c and splice.c

Signed-off-by: Jens Axboe <axboe@suse.de>
wifi-calibration
Jens Axboe 2006-05-01 19:59:03 +02:00
parent 0568b409c7
commit f84d751994
3 changed files with 73 additions and 78 deletions

View File

@ -110,14 +110,14 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
page_cache_release(page);
}
static void * anon_pipe_buf_map(struct file *file, struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return kmap(buf->page);
}
static void anon_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
kunmap(buf->page);
}
@ -135,19 +135,24 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
return 1;
}
static void anon_pipe_buf_get(struct pipe_inode_info *info,
struct pipe_buffer *buf)
void generic_pipe_buf_get(struct pipe_inode_info *info, struct pipe_buffer *buf)
{
page_cache_get(buf->page);
}
int generic_pipe_buf_pin(struct pipe_inode_info *info, struct pipe_buffer *buf)
{
return 0;
}
static struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.map = anon_pipe_buf_map,
.unmap = anon_pipe_buf_unmap,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.pin = generic_pipe_buf_pin,
.release = anon_pipe_buf_release,
.steal = anon_pipe_buf_steal,
.get = anon_pipe_buf_get,
.get = generic_pipe_buf_get,
};
static ssize_t
@ -183,12 +188,14 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
if (chars > total_len)
chars = total_len;
addr = ops->map(filp, pipe, buf);
if (IS_ERR(addr)) {
error = ops->pin(pipe, buf);
if (error) {
if (!ret)
ret = PTR_ERR(addr);
error = ret;
break;
}
addr = ops->map(pipe, buf);
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars);
ops->unmap(pipe, buf);
if (unlikely(error)) {
@ -300,11 +307,11 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
void *addr;
int error;
addr = ops->map(filp, pipe, buf);
if (IS_ERR(addr)) {
error = PTR_ERR(addr);
error = ops->pin(pipe, buf);
if (error)
goto out;
}
addr = ops->map(pipe, buf);
error = pipe_iov_copy_from_user(offset + addr, iov,
chars);
ops->unmap(pipe, buf);

View File

@ -90,9 +90,8 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static void *page_cache_pipe_buf_map(struct file *file,
struct pipe_inode_info *info,
struct pipe_buffer *buf)
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
int err;
@ -118,49 +117,25 @@ static void *page_cache_pipe_buf_map(struct file *file,
}
/*
* Page is ok afterall, fall through to mapping.
* Page is ok afterall, we are done.
*/
unlock_page(page);
}
return kmap(page);
return 0;
error:
unlock_page(page);
return ERR_PTR(err);
}
static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
kunmap(buf->page);
}
static void *user_page_pipe_buf_map(struct file *file,
struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return kmap(buf->page);
}
static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
kunmap(buf->page);
}
static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
page_cache_get(buf->page);
return err;
}
static struct pipe_buf_operations page_cache_pipe_buf_ops = {
.can_merge = 0,
.map = page_cache_pipe_buf_map,
.unmap = page_cache_pipe_buf_unmap,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.pin = page_cache_pipe_buf_pin,
.release = page_cache_pipe_buf_release,
.steal = page_cache_pipe_buf_steal,
.get = page_cache_pipe_buf_get,
.get = generic_pipe_buf_get,
};
static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
@ -171,11 +146,12 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
static struct pipe_buf_operations user_page_pipe_buf_ops = {
.can_merge = 0,
.map = user_page_pipe_buf_map,
.unmap = user_page_pipe_buf_unmap,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.pin = generic_pipe_buf_pin,
.release = page_cache_pipe_buf_release,
.steal = user_page_pipe_buf_steal,
.get = page_cache_pipe_buf_get,
.get = generic_pipe_buf_get,
};
/*
@ -517,26 +493,16 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
{
struct file *file = sd->file;
loff_t pos = sd->pos;
ssize_t ret;
void *ptr;
int more;
int ret, more;
/*
* Sub-optimal, but we are limited by the pipe ->map. We don't
* need a kmap'ed buffer here, we just want to make sure we
* have the page pinned if the pipe page originates from the
* page cache.
*/
ptr = buf->ops->map(file, info, buf);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
ret = buf->ops->pin(info, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
ret = file->f_op->sendpage(file, buf->page, buf->offset,
sd->len, &pos, more);
}
ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
&pos, more);
buf->ops->unmap(info, buf);
return ret;
}
@ -569,15 +535,14 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
unsigned int offset, this_len;
struct page *page;
pgoff_t index;
char *src;
int ret;
/*
* make sure the data in this buffer is uptodate
*/
src = buf->ops->map(file, info, buf);
if (IS_ERR(src))
return PTR_ERR(src);
ret = buf->ops->pin(info, buf);
if (unlikely(ret))
return ret;
index = sd->pos >> PAGE_CACHE_SHIFT;
offset = sd->pos & ~PAGE_CACHE_MASK;
@ -666,11 +631,16 @@ find_page:
goto out;
if (buf->page != page) {
char *dst = kmap_atomic(page, KM_USER0);
/*
* Careful, ->map() uses KM_USER0!
*/
char *src = buf->ops->map(info, buf);
char *dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
kunmap_atomic(dst, KM_USER0);
kunmap_atomic(dst, KM_USER1);
buf->ops->unmap(info, buf);
}
ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
@ -690,7 +660,6 @@ out:
page_cache_release(page);
unlock_page(page);
out_nomem:
buf->ops->unmap(info, buf);
return ret;
}

View File

@ -14,10 +14,23 @@ struct pipe_buffer {
unsigned int flags;
};
/*
* Note on the nesting of these functions:
*
* ->pin()
* ->steal()
* ...
* ->map()
* ...
* ->unmap()
*
* That is, ->map() must be called on a pinned buffer, same goes for ->steal().
*/
struct pipe_buf_operations {
int can_merge;
void * (*map)(struct file *, struct pipe_inode_info *, struct pipe_buffer *);
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *);
void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *);
int (*pin)(struct pipe_inode_info *, struct pipe_buffer *);
void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
@ -50,6 +63,12 @@ struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
void free_pipe_info(struct inode * inode);
void __free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_pin(struct pipe_inode_info *, struct pipe_buffer *);
/*
* splice is tied to pipes as a transport (at least for now), so we'll just
* add the splice flags here.