1
0
Fork 0

usercopy: Allow strict enforcement of whitelists

This introduces CONFIG_HARDENED_USERCOPY_FALLBACK to control the
behavior of hardened usercopy whitelist violations. By default, whitelist
violations will continue to WARN() so that any bad or missing usercopy
whitelists can be discovered without being too disruptive.

If this config is disabled at build time or a system is booted with
"slab_common.usercopy_fallback=0", usercopy whitelists will BUG() instead
of WARN(). This is useful for admins that want to use usercopy whitelists
immediately.

Suggested-by: Matthew Garrett <mjg59@google.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
hifive-unleashed-5.1
Kees Cook 2017-11-30 13:04:32 -08:00
parent afcc90f862
commit 2d891fbc3b
5 changed files with 28 additions and 2 deletions

View File

@ -135,6 +135,8 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
extern bool usercopy_fallback;
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, slab_flags_t flags,
void (*ctor)(void *));

View File

@ -4426,7 +4426,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
* to be a temporary method to find any missing usercopy
* whitelists.
*/
if (offset <= cachep->object_size &&
if (usercopy_fallback &&
offset <= cachep->object_size &&
n <= cachep->object_size - offset) {
usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
return;

View File

@ -31,6 +31,14 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_HARDENED_USERCOPY
bool usercopy_fallback __ro_after_init =
IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
module_param(usercopy_fallback, bool, 0400);
MODULE_PARM_DESC(usercopy_fallback,
"WARN instead of reject usercopy whitelist violations");
#endif
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,

View File

@ -3859,7 +3859,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
* whitelists.
*/
object_size = slab_ksize(s);
if (offset <= object_size && n <= object_size - offset) {
if (usercopy_fallback &&
offset <= object_size && n <= object_size - offset) {
usercopy_warn("SLUB object", s->name, to_user, offset, n);
return;
}

View File

@ -152,6 +152,20 @@ config HARDENED_USERCOPY
or are part of the kernel text. This kills entire classes
of heap overflow exploits and similar kernel memory exposures.
config HARDENED_USERCOPY_FALLBACK
bool "Allow usercopy whitelist violations to fallback to object size"
depends on HARDENED_USERCOPY
default y
help
This is a temporary option that allows missing usercopy whitelists
to be discovered via a WARN() to the kernel log, instead of
rejecting the copy, falling back to non-whitelisted hardened
usercopy that checks the slab allocation size instead of the
whitelist size. This option will be removed once it seems like
all missing usercopy whitelists have been identified and fixed.
Booting with "slab_common.usercopy_fallback=Y/N" can change
this setting.
config HARDENED_USERCOPY_PAGESPAN
bool "Refuse to copy allocations that span multiple pages"
depends on HARDENED_USERCOPY