1
0
Fork 0

Merge branch 'akpm' (patches from Andrew)

Merge misc patches from Andrew Morton:

- a little bit more MM

- a few fixups

[ The "little bit more MM" is actually just one of the three patches
  Andrew sent for mm/filemap.c, I'm still mulling over two more of them
  from Josef Bacik     - Linus ]

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  include/linux/swap.h: use offsetof() instead of custom __swapoffset macro
  tools/testing/selftests/proc/proc-pid-vm.c: test with vsyscall in mind
  zram: default to lzo-rle instead of lzo
  filemap: pass vm_fault to the mmap ra helpers
hifive-unleashed-5.1
Linus Torvalds 2019-03-14 15:10:10 -07:00
commit f261c4e529
4 changed files with 63 additions and 20 deletions

View File

@ -41,7 +41,7 @@ static DEFINE_IDR(zram_index_idr);
static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = "lzo";
static const char *default_compressor = "lzo-rle";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;

View File

@ -157,9 +157,9 @@ struct swap_extent {
/*
* Max bad pages in the new format..
*/
#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
#define MAX_SWAP_BADPAGES \
((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
((offsetof(union swap_header, magic.magic) - \
offsetof(union swap_header, info.badpages)) / sizeof(int))
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */

View File

@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
static void do_sync_mmap_readahead(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;
if (vma->vm_flags & VM_SEQ_READ) {
if (vmf->vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
static void do_async_mmap_readahead(struct vm_fault *vmf,
struct page *page)
{
struct file *file = vmf->vma->vm_file;
struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
do_async_mmap_readahead(vmf, page);
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
do_sync_mmap_readahead(vmf);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;

View File

@ -29,6 +29,7 @@
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@ -36,11 +37,14 @@
#include <sys/mount.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/uio.h>
#include <linux/kdev_t.h>
#include <sys/time.h>
#include <sys/resource.h>
static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
{
@ -205,12 +209,44 @@ static int make_exe(const uint8_t *payload, size_t len)
}
#endif
static bool g_vsyscall = false;
static const char str_vsyscall[] =
"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n";
#ifdef __x86_64__
/*
* vsyscall page can't be unmapped, probe it with memory load.
*/
static void vsyscall(void)
{
pid_t pid;
int wstatus;
pid = fork();
if (pid < 0) {
fprintf(stderr, "fork, errno %d\n", errno);
exit(1);
}
if (pid == 0) {
struct rlimit rlim = {0, 0};
(void)setrlimit(RLIMIT_CORE, &rlim);
*(volatile int *)0xffffffffff600000UL;
exit(0);
}
wait(&wstatus);
if (WIFEXITED(wstatus)) {
g_vsyscall = true;
}
}
int main(void)
{
int pipefd[2];
int exec_fd;
vsyscall();
atexit(ate);
make_private_tmp();
@ -261,9 +297,9 @@ int main(void)
snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET,
"/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino);
/* Test /proc/$PID/maps */
{
const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0);
char buf[256];
ssize_t rv;
int fd;
@ -274,13 +310,16 @@ int main(void)
return 1;
}
rv = read(fd, buf, sizeof(buf));
assert(rv == strlen(buf0));
assert(rv == len);
assert(memcmp(buf, buf0, strlen(buf0)) == 0);
if (g_vsyscall) {
assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0);
}
}
/* Test /proc/$PID/smaps */
{
char buf[1024];
char buf[4096];
ssize_t rv;
int fd;
@ -319,6 +358,10 @@ int main(void)
for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
assert(memmem(buf, rv, S[i], strlen(S[i])));
}
if (g_vsyscall) {
assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall)));
}
}
/* Test /proc/$PID/smaps_rollup */