1
0
Fork 0

drm/i915: Introduce the i915_user_extension_method

An idea for extending uABI inspired by Vulkan's extension chains.
Instead of expanding the data struct for each ioctl every time we need
to add a new feature, define an extension chain instead. As we add
optional interfaces to control the ioctl, we define a new extension
struct that can be linked into the ioctl data only when required by the
user. The key advantage being able to ignore large control structs for
optional interfaces/extensions, while being able to process them in a
consistent manner.

In comparison to other extensible ioctls, the key difference is the
use of a linked chain of extension structs vs an array of tagged
pointers. For example,

struct drm_amdgpu_cs_chunk {
        __u32           chunk_id;
        __u32           length_dw;
        __u64           chunk_data;
};

struct drm_amdgpu_cs_in {
        __u32           ctx_id;
        __u32           bo_list_handle;
        __u32           num_chunks;
        __u32           _pad;
        __u64           chunks;
};

allows userspace to pass in array of pointers to extension structs, but
must therefore keep constructing that array along side the command stream.
In dynamic situations like that, a linked list is preferred and does not
similar from extra cache line misses as the extension structs themselves
must still be loaded separate to the chunks array.

v2: Apply the tail call optimisation directly to nip the worry of stack
overflow in the bud.
v3: Defend against recursion.
v4: Fixup local types to match new uabi

Opens:
- do we include the result as an out-field in each chain?
struct i915_user_extension {
	__u64 next_extension;
	__u64 name;
	__s32 result;
	__u32 mbz; /* reserved for future use */
};
* Undecided, so provision some room for future expansion.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-1-chris@chris-wilson.co.uk
hifive-unleashed-5.2
Chris Wilson 2019-03-22 09:23:22 +00:00
parent b9d52d381e
commit 9d1305ef80
5 changed files with 135 additions and 0 deletions

View File

@ -46,6 +46,7 @@ i915-y := i915_drv.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \

View File

@ -0,0 +1,61 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/nospec.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <uapi/drm/i915_drm.h>
#include "i915_user_extensions.h"
#include "i915_utils.h"
int i915_user_extensions(struct i915_user_extension __user *ext,
const i915_user_extension_fn *tbl,
unsigned int count,
void *data)
{
unsigned int stackdepth = 512;
while (ext) {
int i, err;
u32 name;
u64 next;
if (!stackdepth--) /* recursion vs useful flexibility */
return -E2BIG;
err = check_user_mbz(&ext->flags);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
err = check_user_mbz(&ext->rsvd[i]);
if (err)
return err;
}
if (get_user(name, &ext->name))
return -EFAULT;
err = -EINVAL;
if (name < count) {
name = array_index_nospec(name, count);
if (tbl[name])
err = tbl[name](ext, data);
}
if (err)
return err;
if (get_user(next, &ext->next_extension) ||
overflows_type(next, ext))
return -EFAULT;
ext = u64_to_user_ptr(next);
}
return 0;
}

View File

@ -0,0 +1,20 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#ifndef I915_USER_EXTENSIONS_H
#define I915_USER_EXTENSIONS_H
struct i915_user_extension;
typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
void *data);
int i915_user_extensions(struct i915_user_extension __user *ext,
const i915_user_extension_fn *tbl,
unsigned int count,
void *data);
#endif /* I915_USER_EXTENSIONS_H */

View File

@ -105,6 +105,37 @@
__T; \
})
/*
* container_of_user: Extract the superclass from a pointer to a member.
*
* Exactly like container_of() with the exception that it plays nicely
* with sparse for __user @ptr.
*/
#define container_of_user(ptr, type, member) ({ \
void __user *__mptr = (void __user *)(ptr); \
BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type __user *)(__mptr - offsetof(type, member))); })
/*
* check_user_mbz: Check that a user value exists and is zero
*
* Frequently in our uABI we reserve space for future extensions, and
* two ensure that userspace is prepared we enforce that space must
* be zero. (Then any future extension can safely assume a default value
* of 0.)
*
* check_user_mbz() combines checking that the user pointer is accessible
* and that the contained value is zero.
*
* Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
*/
#define check_user_mbz(U) ({ \
typeof(*(U)) mbz__; \
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;

View File

@ -62,6 +62,28 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
/*
* i915_user_extension: Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
* as demonstrated by Vulkan's approach to providing extensions for forward
* and backward compatibility, is to use a list of optional structs to
* provide those extra details.
*
* The key advantage to using an extension chain is that it allows us to
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
*/
struct i915_user_extension {
__u64 next_extension;
__u32 name;
__u32 flags; /* All undefined bits must be zero. */
__u32 rsvd[4]; /* Reserved for future use; must be zero. */
};
/*
* MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.