1
0
Fork 0

fs: remove the second argument of k[un]map_atomic()

Acked-by: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Cong Wang <amwang@redhat.com>
hifive-unleashed-5.1
Cong Wang 2011-11-25 23:14:27 +08:00 committed by Cong Wang
parent c6daa7ffa8
commit e8e3c3d66f
7 changed files with 34 additions and 37 deletions

View File

@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
info->nr = nr_events; /* trusted copy */ info->nr = nr_events; /* trusted copy */
ring = kmap_atomic(info->ring_pages[0], KM_USER0); ring = kmap_atomic(info->ring_pages[0]);
ring->nr = nr_events; /* user copy */ ring->nr = nr_events; /* user copy */
ring->id = ctx->user_id; ring->id = ctx->user_id;
ring->head = ring->tail = 0; ring->head = ring->tail = 0;
@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
ring->compat_features = AIO_RING_COMPAT_FEATURES; ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring); ring->header_length = sizeof(struct aio_ring);
kunmap_atomic(ring, KM_USER0); kunmap_atomic(ring);
return 0; return 0;
} }
/* aio_ring_event: returns a pointer to the event at the given index from /* aio_ring_event: returns a pointer to the event at the given index from
* kmap_atomic(, km). Release the pointer with put_aio_ring_event(); * kmap_atomic(). Release the pointer with put_aio_ring_event();
*/ */
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
#define aio_ring_event(info, nr, km) ({ \ #define aio_ring_event(info, nr) ({ \
unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
struct io_event *__event; \ struct io_event *__event; \
__event = kmap_atomic( \ __event = kmap_atomic( \
(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
__event += pos % AIO_EVENTS_PER_PAGE; \ __event += pos % AIO_EVENTS_PER_PAGE; \
__event; \ __event; \
}) })
#define put_aio_ring_event(event, km) do { \ #define put_aio_ring_event(event) do { \
struct io_event *__event = (event); \ struct io_event *__event = (event); \
(void)__event; \ (void)__event; \
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
} while(0) } while(0)
static void ctx_rcu_free(struct rcu_head *head) static void ctx_rcu_free(struct rcu_head *head)
@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
if (kiocbIsCancelled(iocb)) if (kiocbIsCancelled(iocb))
goto put_rq; goto put_rq;
ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); ring = kmap_atomic(info->ring_pages[0]);
tail = info->tail; tail = info->tail;
event = aio_ring_event(info, tail, KM_IRQ0); event = aio_ring_event(info, tail);
if (++tail >= info->nr) if (++tail >= info->nr)
tail = 0; tail = 0;
@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
info->tail = tail; info->tail = tail;
ring->tail = tail; ring->tail = tail;
put_aio_ring_event(event, KM_IRQ0); put_aio_ring_event(event);
kunmap_atomic(ring, KM_IRQ1); kunmap_atomic(ring);
pr_debug("added to ring %p at [%lu]\n", iocb, tail); pr_debug("added to ring %p at [%lu]\n", iocb, tail);
@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
unsigned long head; unsigned long head;
int ret = 0; int ret = 0;
ring = kmap_atomic(info->ring_pages[0], KM_USER0); ring = kmap_atomic(info->ring_pages[0]);
dprintk("in aio_read_evt h%lu t%lu m%lu\n", dprintk("in aio_read_evt h%lu t%lu m%lu\n",
(unsigned long)ring->head, (unsigned long)ring->tail, (unsigned long)ring->head, (unsigned long)ring->tail,
(unsigned long)ring->nr); (unsigned long)ring->nr);
@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
head = ring->head % info->nr; head = ring->head % info->nr;
if (head != ring->tail) { if (head != ring->tail) {
struct io_event *evp = aio_ring_event(info, head, KM_USER1); struct io_event *evp = aio_ring_event(info, head);
*ent = *evp; *ent = *evp;
head = (head + 1) % info->nr; head = (head + 1) % info->nr;
smp_mb(); /* finish reading the event before updatng the head */ smp_mb(); /* finish reading the event before updatng the head */
ring->head = head; ring->head = head;
ret = 1; ret = 1;
put_aio_ring_event(evp, KM_USER1); put_aio_ring_event(evp);
} }
spin_unlock(&info->ring_lock); spin_unlock(&info->ring_lock);
out: out:
kunmap_atomic(ring, KM_USER0); kunmap_atomic(ring);
dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
(unsigned long)ring->head, (unsigned long)ring->tail); (unsigned long)ring->head, (unsigned long)ring->tail);
return ret; return ret;

View File

@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
bix.sector_size = bi->sector_size; bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) { bio_for_each_segment(bv, bio, i) {
void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset; bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len; bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf; bix.prot_buf = prot_buf;
@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
total += sectors * bi->tuple_size; total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size); BUG_ON(total > bio->bi_integrity->bip_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
} }
@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
bix.sector_size = bi->sector_size; bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) { bio_for_each_segment(bv, bio, i) {
void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset; bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len; bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf; bix.prot_buf = prot_buf;
@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
ret = bi->verify_fn(&bix); ret = bi->verify_fn(&bix);
if (ret) { if (ret) {
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
return ret; return ret;
} }
@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
total += sectors * bi->tuple_size; total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size); BUG_ON(total > bio->bi_integrity->bip_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
return ret; return ret;

View File

@ -1339,13 +1339,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
for (; offset < PAGE_SIZE && kaddr[offset]; for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++) offset++, bprm->p++)
; ;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
put_arg_page(page); put_arg_page(page);
if (offset == PAGE_SIZE) if (offset == PAGE_SIZE)

View File

@ -3371,9 +3371,9 @@ retry:
if (err) if (err)
goto fail; goto fail;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len-1); memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata); page, fsdata);

View File

@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
{ {
if (atomic) { if (atomic) {
buf->flags |= PIPE_BUF_FLAG_ATOMIC; buf->flags |= PIPE_BUF_FLAG_ATOMIC;
return kmap_atomic(buf->page, KM_USER0); return kmap_atomic(buf->page);
} }
return kmap(buf->page); return kmap(buf->page);
@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
{ {
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) { if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC; buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
kunmap_atomic(map_data, KM_USER0); kunmap_atomic(map_data);
} else } else
kunmap(buf->page); kunmap(buf->page);
} }
@ -565,14 +565,14 @@ redo1:
iov_fault_in_pages_read(iov, chars); iov_fault_in_pages_read(iov, chars);
redo2: redo2:
if (atomic) if (atomic)
src = kmap_atomic(page, KM_USER0); src = kmap_atomic(page);
else else
src = kmap(page); src = kmap(page);
error = pipe_iov_copy_from_user(src, iov, chars, error = pipe_iov_copy_from_user(src, iov, chars,
atomic); atomic);
if (atomic) if (atomic)
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
else else
kunmap(page); kunmap(page);

View File

@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
goto out; goto out;
if (buf->page != page) { if (buf->page != page) {
/*
* Careful, ->map() uses KM_USER0!
*/
char *src = buf->ops->map(pipe, buf, 1); char *src = buf->ops->map(pipe, buf, 1);
char *dst = kmap_atomic(page, KM_USER1); char *dst = kmap_atomic(page);
memcpy(dst + offset, src + buf->offset, this_len); memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(dst, KM_USER1); kunmap_atomic(dst);
buf->ops->unmap(pipe, buf, src); buf->ops->unmap(pipe, buf, src);
} }
ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,

View File

@ -101,10 +101,10 @@ static inline int bio_has_allocated_vec(struct bio *bio)
* I/O completely on that queue (see ide-dma for example) * I/O completely on that queue (see ide-dma for example)
*/ */
#define __bio_kmap_atomic(bio, idx, kmtype) \ #define __bio_kmap_atomic(bio, idx, kmtype) \
(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \ (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
bio_iovec_idx((bio), (idx))->bv_offset) bio_iovec_idx((bio), (idx))->bv_offset)
#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype) #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
/* /*
* merge helpers etc * merge helpers etc
@ -317,7 +317,7 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
* balancing is a lot nicer this way * balancing is a lot nicer this way
*/ */
local_irq_save(*flags); local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ); addr = (unsigned long) kmap_atomic(bvec->bv_page);
BUG_ON(addr & ~PAGE_MASK); BUG_ON(addr & ~PAGE_MASK);
@ -328,7 +328,7 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{ {
unsigned long ptr = (unsigned long) buffer & PAGE_MASK; unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ); kunmap_atomic((void *) ptr);
local_irq_restore(*flags); local_irq_restore(*flags);
} }