1
0
Fork 0

Merge 5.1-rc3 into char-misc-next

We want the char-misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.2
Greg Kroah-Hartman 2019-04-01 07:34:09 +02:00
commit 62fa78436e
558 changed files with 6044 additions and 7186 deletions

View File

@ -224,3 +224,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>

View File

@ -12,10 +12,15 @@ Required properties:
Subnodes:
The integrated switch subnode should be specified according to the binding
described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
port and PHY id, each subnode describing a port needs to have a valid phandle
referencing the internal PHY connected to it. The CPU port of this switch is
always port 0.
described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
mdio-bus each subnode describing a port needs to have a valid phandle
referencing the internal PHY it is connected to. This is because there's no
N:N mapping of port and PHY id.
Don't use mixed external and internal mdio-bus configurations, as this is
not supported by the hardware.
The CPU port of this switch is always port 0.
A CPU port node has the following optional node:
@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
- 'full-duplex' (boolean, optional), to indicate that full duplex is
used. When absent, half duplex is assumed.
Example:
Examples:
for the external mdio-bus configuration:
&mdio0 {
phy_port1: phy@0 {
@ -55,12 +61,12 @@ Example:
reg = <4>;
};
switch0@0 {
switch@10 {
compatible = "qca,qca8337";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
reg = <0x10>;
ports {
#address-cells = <1>;
@ -108,3 +114,56 @@ Example:
};
};
};
for the internal master mdio-bus configuration:
&mdio0 {
switch@10 {
compatible = "qca,qca8337";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
fixed-link {
speed = 1000;
full-duplex;
};
};
port@1 {
reg = <1>;
label = "lan1";
};
port@2 {
reg = <2>;
label = "lan2";
};
port@3 {
reg = <3>;
label = "lan3";
};
port@4 {
reg = <4>;
label = "lan4";
};
port@5 {
reg = <5>;
label = "wan";
};
};
};
};

View File

@ -16,6 +16,7 @@ Required properties:
* "mediatek,mt8127-uart" for MT8127 compatible UARTS
* "mediatek,mt8135-uart" for MT8135 compatible UARTS
* "mediatek,mt8173-uart" for MT8173 compatible UARTS
* "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
* "mediatek,mt6577-uart" for MT6577 and all of the above
- reg: The base address of the UART register bank.

View File

@ -12,11 +12,13 @@ CONTENTS
(4) Filesystem context security.
(5) VFS filesystem context operations.
(5) VFS filesystem context API.
(6) Parameter description.
(6) Superblock creation helpers.
(7) Parameter helper functions.
(7) Parameter description.
(8) Parameter helper functions.
========
@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
(7) Destroy the context.
To support this, the file_system_type struct gains a new field:
To support this, the file_system_type struct gains two new fields:
int (*init_fs_context)(struct fs_context *fc);
const struct fs_parameter_description *parameters;
which is invoked to set up the filesystem-specific parts of a filesystem
context, including the additional space.
The first is invoked to set up the filesystem-specific parts of a filesystem
context, including the additional space, and the second points to the
parameter description for validation at registration time and querying by a
future system call.
Note that security initialisation is done *after* the filesystem is called so
that the namespaces may be adjusted first.
@ -73,9 +78,9 @@ context. This is represented by the fs_context structure:
void *s_fs_info;
unsigned int sb_flags;
unsigned int sb_flags_mask;
unsigned int s_iflags;
unsigned int lsm_flags;
enum fs_context_purpose purpose:8;
bool sloppy:1;
bool silent:1;
...
};
@ -141,6 +146,10 @@ The fs_context fields are as follows:
Which bits SB_* flags are to be set/cleared in super_block::s_flags.
(*) unsigned int s_iflags
These will be bitwise-OR'd with s->s_iflags when a superblock is created.
(*) enum fs_context_purpose
This indicates the purpose for which the context is intended. The
@ -150,17 +159,6 @@ The fs_context fields are as follows:
FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount
FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount
(*) bool sloppy
(*) bool silent
These are set if the sloppy or silent mount options are given.
[NOTE] sloppy is probably unnecessary when userspace passes over one
option at a time since the error can just be ignored if userspace deems it
to be unimportant.
[NOTE] silent is probably redundant with sb_flags & SB_SILENT.
The mount context is created by calling vfs_new_fs_context() or
vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the
structure is not refcounted.
@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
It should return 0 on success or a negative error code on failure.
=================================
VFS FILESYSTEM CONTEXT OPERATIONS
=================================
==========================
VFS FILESYSTEM CONTEXT API
==========================
There are four operations for creating a filesystem context and
one for destroying a context:
There are four operations for creating a filesystem context and one for
destroying a context:
(*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type,
struct dentry *reference,
unsigned int sb_flags,
unsigned int sb_flags_mask,
enum fs_context_purpose purpose);
(*) struct fs_context *fs_context_for_mount(
struct file_system_type *fs_type,
unsigned int sb_flags);
Create a filesystem context for a given filesystem type and purpose. This
allocates the filesystem context, sets the superblock flags, initialises
the security and calls fs_type->init_fs_context() to initialise the
filesystem private data.
Allocate a filesystem context for the purpose of setting up a new mount,
whether that be with a new superblock or sharing an existing one. This
sets the superblock flags, initialises the security and calls
fs_type->init_fs_context() to initialise the filesystem private data.
reference can be NULL or it may indicate the root dentry of a superblock
that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or
the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT).
This is provided as a source of namespace information.
fs_type specifies the filesystem type that will manage the context and
sb_flags presets the superblock flags stored therein.
(*) struct fs_context *fs_context_for_reconfigure(
struct dentry *dentry,
unsigned int sb_flags,
unsigned int sb_flags_mask);
Allocate a filesystem context for the purpose of reconfiguring an
existing superblock. dentry provides a reference to the superblock to be
configured. sb_flags and sb_flags_mask indicate which superblock flags
need changing and to what.
(*) struct fs_context *fs_context_for_submount(
struct file_system_type *fs_type,
struct dentry *reference);
Allocate a filesystem context for the purpose of creating a new mount for
an automount point or other derived superblock. fs_type specifies the
filesystem type that will manage the context and the reference dentry
supplies the parameters. Namespaces are propagated from the reference
dentry's superblock also.
Note that it's not a requirement that the reference dentry be of the same
filesystem type as fs_type.
(*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
@ -390,20 +407,6 @@ context pointer or a negative error code.
For the remaining operations, if an error occurs, a negative error code will be
returned.
(*) int vfs_get_tree(struct fs_context *fc);
Get or create the mountable root and superblock, using the parameters in
the filesystem context to select/configure the superblock. This invokes
the ->validate() op and then the ->get_tree() op.
[NOTE] ->validate() could perhaps be rolled into ->get_tree() and
->reconfigure().
(*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
Create a mount given the parameters in the specified filesystem context.
Note that this does not attach the mount to anything.
(*) int vfs_parse_fs_param(struct fs_context *fc,
struct fs_parameter *param);
@ -432,17 +435,80 @@ returned.
clear the pointer, but then becomes responsible for disposing of the
object.
(*) int vfs_parse_fs_string(struct fs_context *fc, char *key,
(*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size);
A wrapper around vfs_parse_fs_param() that just passes a constant string.
A wrapper around vfs_parse_fs_param() that copies the value string it is
passed.
(*) int generic_parse_monolithic(struct fs_context *fc, void *data);
Parse a sys_mount() data page, assuming the form to be a text list
consisting of key[=val] options separated by commas. Each item in the
list is passed to vfs_mount_option(). This is the default when the
->parse_monolithic() operation is NULL.
->parse_monolithic() method is NULL.
(*) int vfs_get_tree(struct fs_context *fc);
Get or create the mountable root and superblock, using the parameters in
the filesystem context to select/configure the superblock. This invokes
the ->get_tree() method.
(*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
Create a mount given the parameters in the specified filesystem context.
Note that this does not attach the mount to anything.
===========================
SUPERBLOCK CREATION HELPERS
===========================
A number of VFS helpers are available for use by filesystems for the creation
or looking up of superblocks.
(*) struct super_block *
sget_fc(struct fs_context *fc,
int (*test)(struct super_block *sb, struct fs_context *fc),
int (*set)(struct super_block *sb, struct fs_context *fc));
This is the core routine. If test is non-NULL, it searches for an
existing superblock matching the criteria held in the fs_context, using
the test function to match them. If no match is found, a new superblock
is created and the set function is called to set it up.
Prior to the set function being called, fc->s_fs_info will be transferred
to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
success (ie. 0).
The following helpers all wrap sget_fc():
(*) int vfs_get_super(struct fs_context *fc,
enum vfs_get_super_keying keying,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
This creates/looks up a deviceless superblock. The keying indicates how
many superblocks of this type may exist and in what manner they may be
shared:
(1) vfs_get_single_super
Only one such superblock may exist in the system. Any further
attempt to get a new superblock gets this one (and any parameter
differences are ignored).
(2) vfs_get_keyed_super
Multiple superblocks of this type may exist and they're keyed on
their s_fs_info pointer (for example this may refer to a
namespace).
(3) vfs_get_independent_super
Multiple independent superblocks of this type may exist. This
function never matches an existing one and always creates a new
one.
=====================
@ -454,35 +520,22 @@ There's a core description struct that links everything together:
struct fs_parameter_description {
const char name[16];
u8 nr_params;
u8 nr_alt_keys;
u8 nr_enums;
bool ignore_unknown;
bool no_source;
const char *const *keys;
const struct constant_table *alt_keys;
const struct fs_parameter_spec *specs;
const struct fs_parameter_enum *enums;
};
For example:
enum afs_param {
enum {
Opt_autocell,
Opt_bar,
Opt_dyn,
Opt_foo,
Opt_source,
nr__afs_params
};
static const struct fs_parameter_description afs_fs_parameters = {
.name = "kAFS",
.nr_params = nr__afs_params,
.nr_alt_keys = ARRAY_SIZE(afs_param_alt_keys),
.nr_enums = ARRAY_SIZE(afs_param_enums),
.keys = afs_param_keys,
.alt_keys = afs_param_alt_keys,
.specs = afs_param_specs,
.enums = afs_param_enums,
};
@ -494,28 +547,24 @@ The members are as follows:
The name to be used in error messages generated by the parse helper
functions.
(2) u8 nr_params;
(2) const struct fs_parameter_specification *specs;
The number of discrete parameter identifiers. This indicates the number
of elements in the ->types[] array and also limits the values that may be
used in the values that the ->keys[] array maps to.
Table of parameter specifications, terminated with a null entry, where the
entries are of type:
It is expected that, for example, two parameters that are related, say
"acl" and "noacl" with have the same ID, but will be flagged to indicate
that one is the inverse of the other. The value can then be picked out
from the parse result.
(3) const struct fs_parameter_specification *specs;
Table of parameter specifications, where the entries are of type:
struct fs_parameter_type {
enum fs_parameter_spec type:8;
u8 flags;
struct fs_parameter_spec {
const char *name;
u8 opt;
enum fs_parameter_type type:8;
unsigned short flags;
};
and the parameter identifier is the index to the array. 'type' indicates
the desired value type and must be one of:
The 'name' field is a string to match exactly to the parameter key (no
wildcards, patterns and no case-independence) and 'opt' is the value that
will be returned by the fs_parser() function in the case of a successful
match.
The 'type' field indicates the desired value type and must be one of:
TYPE NAME EXPECTED VALUE RESULT IN
======================= ======================= =====================
@ -525,85 +574,65 @@ The members are as follows:
fs_param_is_u32_octal 32-bit octal int result->uint_32
fs_param_is_u32_hex 32-bit hex int result->uint_32
fs_param_is_s32 32-bit signed int result->int_32
fs_param_is_u64 64-bit unsigned int result->uint_64
fs_param_is_enum Enum value name result->uint_32
fs_param_is_string Arbitrary string param->string
fs_param_is_blob Binary blob param->blob
fs_param_is_blockdev Blockdev path * Needs lookup
fs_param_is_path Path * Needs lookup
fs_param_is_fd File descriptor param->file
And each parameter can be qualified with 'flags':
fs_param_v_optional The value is optional
fs_param_neg_with_no If key name is prefixed with "no", it is false
fs_param_neg_with_empty If value is "", it is false
fs_param_deprecated The parameter is deprecated.
For example:
static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
[Opt_autocell] = { fs_param_is flag },
[Opt_bar] = { fs_param_is_enum },
[Opt_dyn] = { fs_param_is flag },
[Opt_foo] = { fs_param_is_bool, fs_param_neg_with_no },
[Opt_source] = { fs_param_is_string },
};
fs_param_is_fd File descriptor result->int_32
Note that if the value is of fs_param_is_bool type, fs_parse() will try
to match any string value against "0", "1", "no", "yes", "false", "true".
[!] NOTE that the table must be sorted according to primary key name so
that ->keys[] is also sorted.
Each parameter can also be qualified with 'flags':
(4) const char *const *keys;
fs_param_v_optional The value is optional
fs_param_neg_with_no result->negated set if key is prefixed with "no"
fs_param_neg_with_empty result->negated set if value is ""
fs_param_deprecated The parameter is deprecated.
Table of primary key names for the parameters. There must be one entry
per defined parameter. The table is optional if ->nr_params is 0. The
table is just an array of names e.g.:
These are wrapped with a number of convenience wrappers:
static const char *const afs_param_keys[nr__afs_params] = {
[Opt_autocell] = "autocell",
[Opt_bar] = "bar",
[Opt_dyn] = "dyn",
[Opt_foo] = "foo",
[Opt_source] = "source",
MACRO SPECIFIES
======================= ===============================================
fsparam_flag() fs_param_is_flag
fsparam_flag_no() fs_param_is_flag, fs_param_neg_with_no
fsparam_bool() fs_param_is_bool
fsparam_u32() fs_param_is_u32
fsparam_u32oct() fs_param_is_u32_octal
fsparam_u32hex() fs_param_is_u32_hex
fsparam_s32() fs_param_is_s32
fsparam_u64() fs_param_is_u64
fsparam_enum() fs_param_is_enum
fsparam_string() fs_param_is_string
fsparam_blob() fs_param_is_blob
fsparam_bdev() fs_param_is_blockdev
fsparam_path() fs_param_is_path
fsparam_fd() fs_param_is_fd
all of which take two arguments, name string and option number - for
example:
static const struct fs_parameter_spec afs_param_specs[] = {
fsparam_flag ("autocell", Opt_autocell),
fsparam_flag ("dyn", Opt_dyn),
fsparam_string ("source", Opt_source),
fsparam_flag_no ("foo", Opt_foo),
{}
};
[!] NOTE that the table must be sorted such that the table can be searched
with bsearch() using strcmp(). This means that the Opt_* values must
correspond to the entries in this table.
(5) const struct constant_table *alt_keys;
u8 nr_alt_keys;
Table of additional key names and their mappings to parameter ID plus the
number of elements in the table. This is optional. The table is just an
array of { name, integer } pairs, e.g.:
static const struct constant_table afs_param_keys[] = {
{ "baz", Opt_bar },
{ "dynamic", Opt_dyn },
};
[!] NOTE that the table must be sorted such that strcmp() can be used with
bsearch() to search the entries.
The parameter ID can also be fs_param_key_removed to indicate that a
deprecated parameter has been removed and that an error will be given.
This differs from fs_param_deprecated where the parameter may still have
an effect.
Further, the behaviour of the parameter may differ when an alternate name
is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
An addition macro, __fsparam() is provided that takes an additional pair
of arguments to specify the type and the flags for anything that doesn't
match one of the above macros.
(6) const struct fs_parameter_enum *enums;
u8 nr_enums;
Table of enum value names to integer mappings and the number of elements
stored therein. This is of type:
Table of enum value names to integer mappings, terminated with a null
entry. This is of type:
struct fs_parameter_enum {
u8 param_id;
u8 opt;
char name[14];
u8 value;
};
@ -621,11 +650,6 @@ The members are as follows:
try to look the value up in the enum table and the result will be stored
in the parse result.
(7) bool no_source;
If this is set, fs_parse() will ignore any "source" parameter and not
pass it to the filesystem.
The parser should be pointed to by the parser pointer in the file_system_type
struct as this will provide validation on registration (if
CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@ -650,9 +674,8 @@ process the parameters it is given.
int value;
};
and it must be sorted such that it can be searched using bsearch() using
strcmp(). If a match is found, the corresponding value is returned. If a
match isn't found, the not_found value is returned instead.
If a match is found, the corresponding value is returned. If a match
isn't found, the not_found value is returned instead.
(*) bool validate_constant_table(const struct constant_table *tbl,
size_t tbl_size,
@ -665,36 +688,36 @@ process the parameters it is given.
should just be set to lie inside the low-to-high range.
If all is good, true is returned. If the table is invalid, errors are
logged to dmesg, the stack is dumped and false is returned.
logged to dmesg and false is returned.
(*) bool fs_validate_description(const struct fs_parameter_description *desc);
This performs some validation checks on a parameter description. It
returns true if the description is good and false if it is not. It will
log errors to dmesg if validation fails.
(*) int fs_parse(struct fs_context *fc,
const struct fs_param_parser *parser,
const struct fs_parameter_description *desc,
struct fs_parameter *param,
struct fs_param_parse_result *result);
struct fs_parse_result *result);
This is the main interpreter of parameters. It uses the parameter
description (parser) to look up the name of the parameter to use and to
convert that to a parameter ID (stored in result->key).
description to look up a parameter by key name and to convert that to an
option number (which it returns).
If successful, and if the parameter type indicates the result is a
boolean, integer or enum type, the value is converted by this function and
the result stored in result->{boolean,int_32,uint_32}.
the result stored in result->{boolean,int_32,uint_32,uint_64}.
If a match isn't initially made, the key is prefixed with "no" and no
value is present then an attempt will be made to look up the key with the
prefix removed. If this matches a parameter for which the type has flag
fs_param_neg_with_no set, then a match will be made and the value will be
set to false/0/NULL.
fs_param_neg_with_no set, then a match will be made and result->negated
will be set to true.
If the parameter is successfully matched and, optionally, parsed
correctly, 1 is returned. If the parameter isn't matched and
parser->ignore_unknown is set, then 0 is returned. Otherwise -EINVAL is
returned.
(*) bool fs_validate_description(const struct fs_parameter_description *desc);
This is validates the parameter description. It returns true if the
description is good and false if it is not.
If the parameter isn't matched, -ENOPARAM will be returned; if the
parameter is matched, but the value is erroneous, -EINVAL will be
returned; otherwise the parameter's option number will be returned.
(*) int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *value,

View File

@ -36,6 +36,7 @@ Supported adapters:
* Intel Cannon Lake (PCH)
* Intel Cedar Fork (PCH)
* Intel Ice Lake (PCH)
* Intel Comet Lake (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller

View File

@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
patchset
[PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
Interface

View File

@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.
Q: I made changes to only a few patches in a patch series should I resend only those changed?
--------------------------------------------------------------------------------------------
A: No, please resend the entire patch series and make sure you do number your
patches such that it is clear this is the latest and greatest set of patches
that can be applied.
Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
-------------------------------------------------------------------------------------------------------------------------------------------
A: There is no revert possible, once it is pushed out, it stays like that.
Please send incremental versions on top of what has been merged in order to fix
the patches the way they would look like if your latest patch series was to be
merged.
Q: How can I tell what patches are queued up for backporting to the various stable releases?
--------------------------------------------------------------------------------------------
A: Normally Greg Kroah-Hartman collects stable commits himself, but for

View File

@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
/ \ / \ |Routing | / \
--> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit
\_________/ \__________/ ---------- \____________/ ^
| ^ | | ^ |
flowtable | | ____\/___ | |
| | | / \ | |
__\/___ | --------->| forward |------------ |
| ^ | ^ |
flowtable | ____\/___ | |
| | / \ | |
__\/___ | | forward |------------ |
|-----| | \_________/ |
|-----| | 'flow offload' rule |
|-----| | adds entry to |

View File

@ -413,7 +413,7 @@ algorithm.
.. _F-RTO: https://tools.ietf.org/html/rfc5682
TCP Fast Path
============
=============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
code provides a good explanation of them, I pasted them below::
@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
invalid SACK and DSACK
====================
======================
When a SACK (or DSACK) block is invalid, a corresponding counter would
be updated. The validation method is base on the start/end sequence
number of the SACK block. For more details, please refer the comment
@ -704,11 +705,13 @@ explaination:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
* TcpExtTCPSACKDiscard
This counter indicates how many SACK blocks are invalid. If the invalid
SACK block is caused by ACK recording, the TCP stack will only ignore
it and won't update this counter.
* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
When a DSACK block is invalid, one of these two counters would be
updated. Which counter will be updated depends on the undo_marker flag
of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
will be updated. As implied in its name, it might be an old packet.
SACK shift
=========
==========
The linux networking stack stores data in sk_buff struct (skb for
short). If a SACK block acrosses multiple skb, the TCP stack will try
to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
discard, this operation is 'merge'.
* TcpExtTCPSackShifted
A skb is shifted
* TcpExtTCPSackMerged
A skb is merged
* TcpExtTCPSackShiftFallback
A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.

View File

@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
----------------------
The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to three classes
of a virtual machine. The ioctls belong to three classes:
- System ioctls: These query and set global attributes which affect the
whole kvm subsystem. In addition a system ioctl is used to create
virtual machines
virtual machines.
- VM ioctls: These query and set attributes that affect an entire virtual
machine, for example memory layout. In addition a VM ioctl is used to
create virtual cpus (vcpus).
create virtual cpus (vcpus) and devices.
Only run VM ioctls from the same process (address space) that was used
to create the VM.
VM ioctls must be issued from the same process (address space) that was
used to create the VM.
- vcpu ioctls: These query and set attributes that control the operation
of a single virtual cpu.
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
vcpu ioctls should be issued from the same thread that was used to create
the vcpu, except for asynchronous vcpu ioctl that are marked as such in
the documentation. Otherwise, the first ioctl after switching threads
could see a performance impact.
- device ioctls: These query and set attributes that control the operation
of a single device.
device ioctls must be issued from the same process (address space) that
was used to create the VM.
2. File descriptors
-------------------
@ -32,17 +39,34 @@ The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
and return a file descriptor pointing to it. Finally, ioctls on a vcpu
fd can be used to control the vcpu, including the important task of
actually running guest code.
ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
create a virtual cpu or device and return a file descriptor pointing to
the new resource. Finally, ioctls on a vcpu or device fd can be used
to control the vcpu or device. For vcpus, this includes the important
task of actually running guest code.
In general file descriptors can be migrated among processes by means
of fork() and the SCM_RIGHTS facility of unix domain socket. These
kinds of tricks are explicitly not supported by kvm. While they will
not cause harm to the host, their actual behavior is not guaranteed by
the API. The only supported use is one virtual machine per process,
and one vcpu per thread.
the API. See "General description" for details on the ioctl usage
model that is supported by KVM.
It is important to note that althought VM ioctls may only be issued from
the process that created the VM, a VM's lifecycle is associated with its
file descriptor, not its creator (process). In other words, the VM and
its resources, *including the associated address space*, are not freed
until the last reference to the VM's file descriptor has been released.
For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
not be freed until both the parent (original) process and its child have
put their references to the VM's file descriptor.
Because a VM's resources are not freed until the last reference to its
file descriptor is released, creating additional references to a VM via
via fork(), dup(), etc... without careful consideration is strongly
discouraged and may have unwanted side effects, e.g. memory allocated
by and on behalf of the VM's process may not be freed/unaccounted when
the VM is shut down.
It is important to note that althought VM ioctls may only be issued from
@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
This is an asynchronous vcpu ioctl and can be invoked from any thread.
MIPS:
Queues an external interrupt to be injected into the virtual CPU. A negative
interrupt number dequeues the interrupt.
This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.17 KVM_DEBUG_GUEST
@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region {
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
This ioctl allows the user to create or modify a guest physical memory
slot. When changing an existing slot, it may be moved in the guest
physical memory space, or its flags may be modified. It may not be
resized. Slots may not overlap in guest physical address space.
Bits 0-15 of "slot" specifies the slot id and this value should be
less than the maximum number of user memory slots supported per VM.
The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
if this capability is supported by the architecture.
This ioctl allows the user to create, modify or delete a guest physical
memory slot. Bits 0-15 of "slot" specify the slot id and this value
should be less than the maximum number of user memory slots supported per
VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
if this capability is supported by the architecture. Slots may not
overlap in guest physical address space.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be
@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces
are unrelated; the restriction on overlapping slots only applies within
each address space.
Deleting a slot is done by passing zero for memory_size. When changing
an existing slot, it may be moved in the guest physical memory space,
or its flags may be modified, but it may not be resized.
Memory for the region is taken starting at the address denoted by the
field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including
@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
machine checks needing further payload are not
supported by this ioctl)
Note that the vcpu ioctl is asynchronous to vcpu execution.
This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.78 KVM_PPC_GET_HTAB_FD
@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
KVM_S390_MCHK - machine check interrupt; parameters in .mchk
Note that the vcpu ioctl is asynchronous to vcpu execution.
This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.94 KVM_S390_GET_IRQ_STATE

View File

@ -142,7 +142,7 @@ Shadow pages contain the following information:
If clear, this page corresponds to a guest page table denoted by the gfn
field.
role.quadrant:
When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
sptes. That means a guest page table contains more ptes than the host,
so multiple shadow pages are needed to shadow one guest page.
For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@ -158,9 +158,9 @@ Shadow pages contain the following information:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
unpinned it will be destroyed.
role.cr4_pae:
Contains the value of cr4.pae for which the page is valid (e.g. whether
32-bit or 64-bit gptes are in use).
role.gpte_is_8_bytes:
Reflects the size of the guest PTE for which the page is valid, i.e. '1'
if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
role.nxe:
Contains the value of efer.nxe for which the page is valid.
role.cr0_wp:
@ -173,6 +173,9 @@ Shadow pages contain the following information:
Contains the value of cr4.smap && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
role.ept_sp:
This is a virtual flag to denote a shadowed nested EPT page. ept_sp
is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
role.smm:
Is 1 if the page is valid in system management mode. This field
determines which of the kvm_memslots array was used to build this

View File

@ -2356,7 +2356,7 @@ F: arch/arm/mm/cache-uniphier.c
F: arch/arm64/boot/dts/socionext/uniphier*
F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/
F: drivers/dmaengine/uniphier-mdmac.c
F: drivers/dma/uniphier-mdmac.c
F: drivers/gpio/gpio-uniphier.c
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
@ -6408,7 +6408,6 @@ L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
S: Maintained
F: kernel/futex.c
F: kernel/futex_compat.c
F: include/asm-generic/futex.h
F: include/linux/futex.h
F: include/uapi/linux/futex.h

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Shy Crocodile
# *DOCUMENTATION*
@ -31,26 +31,12 @@ _all:
# descending is started. They are now explicitly listed as the
# prepare rule.
# Ugly workaround for Debian make-kpkg:
# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
# displays warning to discourage such abusage.
ifneq ($(word 2, $(MAKEFILE_LIST)),)
$(warning Do not include top Makefile of Linux Kernel)
sub-make-done := 1
MAKEFLAGS += -rR
endif
ifneq ($(sub-make-done),1)
ifneq ($(sub_make_done),1)
# Do not use make's built-in rules and variables
# (this increases performance and avoids hard-to-debug behaviour)
MAKEFLAGS += -rR
# 'MAKEFLAGS += -rR' does not become immediately effective for old
# GNU Make versions. Cancel implicit rules for this Makefile.
$(lastword $(MAKEFILE_LIST)): ;
# Avoid funny character set dependencies
unexport LC_ALL
LC_COLLATE=C
@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
# 'sub-make' below.
MAKEFLAGS += --include-dir=$(CURDIR)
need-sub-make := 1
else
# Do not print "Entering directory ..." at all for in-tree build.
@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
endif # ifneq ($(KBUILD_OUTPUT),)
ifneq ($(filter 3.%,$(MAKE_VERSION)),)
# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
# We need to invoke sub-make to avoid implicit rules in the top Makefile.
need-sub-make := 1
# Cancel implicit rules for this Makefile.
$(lastword $(MAKEFILE_LIST)): ;
endif
export sub_make_done := 1
ifeq ($(need-sub-make),1)
PHONY += $(MAKECMDGOALS) sub-make
$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
# Invoke a second make in the output directory, passing relevant variables
sub-make:
$(Q)$(MAKE) sub-make-done=1 \
$(Q)$(MAKE) \
$(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
else # sub-make-done
endif # need-sub-make
endif # sub_make_done
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(need-sub-make),)
# Do not print "Entering directory ...",
# but we want to display it when entering to the output directory
@ -497,7 +499,8 @@ outputmakefile:
ifneq ($(KBUILD_SRC),)
$(Q)ln -fsn $(srctree) source
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
$(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
$(Q)test -e .gitignore || \
{ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
endif
ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@ -677,7 +680,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
KBUILD_CFLAGS += -Os
else
KBUILD_CFLAGS += -O2
endif
@ -950,9 +953,11 @@ mod_sign_cmd = true
endif
export mod_sign_cmd
HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
ifdef CONFIG_STACK_VALIDATION
has_libelf := $(call try-run,\
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets)))
endif # ifeq ($(config-targets),1)
endif # ifeq ($(mixed-targets),1)
endif # sub-make-done
endif # need-sub-make
PHONY += FORCE
FORCE:

View File

@ -6,6 +6,7 @@ generic-y += exec.h
generic-y += export.h
generic-y += fb.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h

View File

@ -1,2 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>

View File

@ -11,6 +11,7 @@ generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -596,6 +596,7 @@ config ARCH_DAVINCI
select HAVE_IDE
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER
select SPARSE_IRQ
select USE_OF

View File

@ -93,7 +93,7 @@
};
&hdmi {
hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
};
&pwm {

View File

@ -114,9 +114,9 @@
reg = <2>;
};
switch@0 {
switch@10 {
compatible = "qca,qca8334";
reg = <0>;
reg = <10>;
switch_ports: ports {
#address-cells = <1>;
@ -125,7 +125,7 @@
ethphy0: port@0 {
reg = <0>;
label = "cpu";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
ethernet = <&fec>;
fixed-link {

View File

@ -264,7 +264,7 @@
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
vmcc-supply = <&reg_sd3_vmmc>;
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
bus-witdh = <4>;
bus-width = <4>;
no-1-8-v;
status = "okay";
};
@ -275,7 +275,7 @@
pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
vmcc-supply = <&reg_sd4_vmmc>;
bus-witdh = <8>;
bus-width = <8>;
no-1-8-v;
non-removable;
status = "okay";

View File

@ -91,6 +91,7 @@
pinctrl-0 = <&pinctrl_enet>;
phy-handle = <&ethphy>;
phy-mode = "rgmii";
phy-reset-duration = <10>; /* in msecs */
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>;
status = "disabled";

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright (C) 2017 NXP

View File

@ -213,12 +213,13 @@
gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
/*
* It's not actually active high, but the frameworks assume
* the polarity of the passed-in GPIO is "normal" (active
* high) then actively drives the line low to select the
* chip.
* This chipselect is active high. Just setting the flags
* to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
* it will be ignored, only the special "spi-cs-high" flag
* really counts.
*/
cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
spi-cs-high;
num-chipselects = <1>;
/*

View File

@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_FSL_MX25_ADC=y
CONFIG_PWM=y
CONFIG_PWM_IMX1=y
CONFIG_PWM_IMX27=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y

View File

@ -398,7 +398,7 @@ CONFIG_MAG3110=y
CONFIG_MPL3115=y
CONFIG_PWM=y
CONFIG_PWM_FSL_FTM=y
CONFIG_PWM_IMX=y
CONFIG_PWM_IMX27=y
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_VF610_OCOTP=y
CONFIG_TEE=y

View File

@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret;
}
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_write_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline void *kvm_get_hyp_vector(void)
{
switch(read_cpuid_part()) {

View File

@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE
#define S2_PUD_MASK PUD_MASK
#define S2_PUD_SIZE PUD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{

View File

@ -3,3 +3,4 @@
generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
generic-y += kvm_para.h

View File

@ -1,2 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>

View File

@ -16,30 +16,23 @@
#include "cpuidle.h"
#include "hardware.h"
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
static int num_idle_cpus = 0;
static DEFINE_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
if (atomic_inc_return(&master) == num_online_cpus()) {
/*
* With this lock, we prevent other cpu to exit and enter
* this function again and become the master.
*/
if (!spin_trylock(&master_lock))
goto idle;
spin_lock(&cpuidle_lock);
if (++num_idle_cpus == num_online_cpus())
imx6_set_lpm(WAIT_UNCLOCKED);
cpu_do_idle();
imx6_set_lpm(WAIT_CLOCKED);
spin_unlock(&master_lock);
goto done;
}
spin_unlock(&cpuidle_lock);
idle:
cpu_do_idle();
done:
atomic_dec(&master);
spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus())
imx6_set_lpm(WAIT_CLOCKED);
spin_unlock(&cpuidle_lock);
return index;
}

View File

@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
return;
m4if_base = of_iomap(np, 0);
of_node_put(np);
if (!m4if_base) {
pr_err("Unable to map M4IF registers\n");
return;

View File

@ -27,6 +27,7 @@ config ARCH_BCM2835
bool "Broadcom BCM2835 family"
select TIMER_OF
select GPIOLIB
select MFD_CORE
select PINCTRL
select PINCTRL_BCM2835
select ARM_AMBA

View File

@ -321,7 +321,6 @@
nvidia,default-trim = <0x9>;
nvidia,dqs-trim = <63>;
mmc-hs400-1_8v;
supports-cqe;
status = "disabled";
};

View File

@ -2,7 +2,7 @@
/*
* Device Tree Source for the RZ/G2E (R8A774C0) SoC
*
* Copyright (C) 2018 Renesas Electronics Corp.
* Copyright (C) 2018-2019 Renesas Electronics Corp.
*/
#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
@ -1150,9 +1150,8 @@
<&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
<&dmac2 0x5b>, <&dmac2 0x5a>;
dma-names = "tx", "rx", "tx", "rx";
dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 202>;
status = "disabled";

View File

@ -2,7 +2,7 @@
/*
* Device Tree Source for the R-Car E3 (R8A77990) SoC
*
* Copyright (C) 2018 Renesas Electronics Corp.
* Copyright (C) 2018-2019 Renesas Electronics Corp.
*/
#include <dt-bindings/clock/r8a77990-cpg-mssr.h>
@ -1067,9 +1067,8 @@
<&cpg CPG_CORE R8A77990_CLK_S3D1C>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
<&dmac2 0x5b>, <&dmac2 0x5a>;
dma-names = "tx", "rx", "tx", "rx";
dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 202>;
status = "disabled";

View File

@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret;
}
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_write_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
* EL2 vectors can be mapped and rerouted in a number of ways,

View File

@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
if (!standard_resources)
panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);

View File

@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int ret = -EINVAL;
bool loaded;
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
preempt_disable();
loaded = (vcpu->cpu != -1);
if (loaded)
@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
}
/* Reset PMU */
kvm_pmu_vcpu_reset(vcpu);
/* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;

View File

@ -19,6 +19,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -23,6 +23,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -19,6 +19,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,2 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>

View File

@ -2,6 +2,7 @@ generated-y += syscall_table.h
generic-y += compat.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h

View File

@ -1,2 +1 @@
generated-y += unistd_64.h
generic-y += kvm_para.h

View File

@ -13,6 +13,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,2 +1 @@
generated-y += unistd_32.h
generic-y += kvm_para.h

View File

@ -17,6 +17,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h

View File

@ -1,3 +1,2 @@
generated-y += unistd_32.h
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -23,6 +23,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -20,6 +20,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -11,6 +11,7 @@ generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,3 +1,2 @@
generated-y += unistd_32.h
generated-y += unistd_64.h
generic-y += kvm_para.h

View File

@ -302,6 +302,7 @@
/* Misc instructions for BPF compiler */
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LDX 0x7c00002a
#define PPC_INST_LHZ 0xa0000000
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_LHBRX 0x7c00062c
@ -309,6 +310,7 @@
#define PPC_INST_STB 0x98000000
#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDX 0x7c00012a
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
#define PPC_INST_STWU 0x94000000

View File

@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
beq .Lzero
.Lcmp_rest_lt8bytes:
/* Here we have only less than 8 bytes to compare with. at least s1
* Address is aligned with 8 bytes.
* The next double words are load and shift right with appropriate
* bits.
/*
* Here we have less than 8 bytes to compare. At least s1 is aligned to
* 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
* page boundary, otherwise we might read past the end of the buffer and
* trigger a page fault. We use 4K as the conservative minimum page
* size. If we detect that case we go to the byte-by-byte loop.
*
* Otherwise the next double word is loaded from s1 and s2, and shifted
* right to compare the appropriate bits.
*/
clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
cmpdi r6,0xff8
bgt .Lshort
subfic r6,r5,8
slwi r6,r6,3
LD rA,0,r3

View File

@ -51,6 +51,8 @@
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
___PPC_RA(base) | ___PPC_RB(b))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
@ -65,7 +67,9 @@
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
@ -85,17 +89,6 @@
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
#ifdef CONFIG_PPC64
#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
#else
#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#endif
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \

View File

@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
#endif
#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#define SEEN_DATAREF 0x10000 /* might call external helpers */
#define SEEN_XREG 0x20000 /* X reg is used */
#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary

View File

@ -68,6 +68,26 @@ static const int b2p[] = {
/* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN 27
/*
* WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
* so ensure that it isn't in use already.
*/
#define PPC_BPF_LL(r, base, i) do { \
if ((i) % 4) { \
PPC_LI(b2p[TMP_REG_2], (i)); \
PPC_LDX(r, base, b2p[TMP_REG_2]); \
} else \
PPC_LD(r, base, i); \
} while(0)
#define PPC_BPF_STL(r, base, i) do { \
if ((i) % 4) { \
PPC_LI(b2p[TMP_REG_2], (i)); \
PPC_STDX(r, base, b2p[TMP_REG_2]); \
} else \
PPC_STD(r, base, i); \
} while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
#define SEEN_TAILCALL 0x4000 /* uses tail calls */

View File

@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
PPC_BCC(COND_GT, out);
@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
/* prog = array->ptrs[index]; */
PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
#ifdef PPC64_ELF_ABI_v1
/* skip past the function descriptor */
PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@ -606,7 +606,7 @@ bpf_alu32_trunc:
* the instructions generated will remain the
* same across all passes
*/
PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
break;
@ -662,7 +662,7 @@ emit_clear:
PPC_LI32(b2p[TMP_REG_1], imm);
src_reg = b2p[TMP_REG_1];
}
PPC_STD(src_reg, dst_reg, off);
PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
@ -709,7 +709,7 @@ emit_clear:
break;
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
PPC_LD(dst_reg, src_reg, off);
PPC_BPF_LL(dst_reg, src_reg, off);
break;
/*

View File

@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
} else {
const __be32 *indexes;
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
if (indexes == NULL)
goto err_of_node_put;
u32 nr_drc_indexes, thread_drc_index;
/*
* The first element indexes[0] is the number of drc_indexes
* returned in the list. Hence thread_index+1 will get the
* drc_index corresponding to core number thread_index.
* The first element of ibm,drc-indexes array is the
* number of drc_indexes returned in the list. Hence
* thread_index+1 will get the drc_index corresponding
* to core number thread_index.
*/
ret = indexes[thread_index + 1];
rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
0, &nr_drc_indexes);
if (rc)
goto err_of_node_put;
WARN_ON_ONCE(thread_index > nr_drc_indexes);
rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
thread_index + 1,
&thread_drc_index);
if (rc)
goto err_of_node_put;
ret = thread_drc_index;
}
rc = 0;

View File

@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
"UE",
"SLB",
"ERAT",
"Unknown",
"TLB",
"D-Cache",
"Unknown",

View File

@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1;
}
/*
* Interface to tell the AP bus code that a configuration
* change has happened. The bus code should at least do
* an ap bus resource rescan.
*/
#if IS_ENABLED(CONFIG_ZCRYPT)
void ap_bus_cfg_chg(void);
#else
static inline void ap_bus_cfg_chg(void){};
#endif
#endif /* _ASM_S390_AP_H_ */

View File

@ -252,11 +252,14 @@ do { \
/*
* Cache aliasing on the latest machines calls for a mapping granularity
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization
* of up to 1GB. For 31-bit processes the virtual address space is limited,
* use no alignment and limit the randomization to 8MB.
* of 512KB for the anonymous mapping base. For 64-bit processes use a
* 512KB alignment and a randomization of up to 1GB. For 31-bit processes
* the virtual address space is limited, use no alignment and limit the
* randomization to 8MB.
* For the additional randomization of the program break use 32MB for
* 64-bit and 8MB for 31-bit.
*/
#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK

View File

@ -91,52 +91,53 @@ struct lowcore {
__u64 hardirq_timer; /* 0x02e8 */
__u64 softirq_timer; /* 0x02f0 */
__u64 steal_timer; /* 0x02f8 */
__u64 last_update_timer; /* 0x0300 */
__u64 last_update_clock; /* 0x0308 */
__u64 int_clock; /* 0x0310 */
__u64 mcck_clock; /* 0x0318 */
__u64 clock_comparator; /* 0x0320 */
__u64 boot_clock[2]; /* 0x0328 */
__u64 avg_steal_timer; /* 0x0300 */
__u64 last_update_timer; /* 0x0308 */
__u64 last_update_clock; /* 0x0310 */
__u64 int_clock; /* 0x0318*/
__u64 mcck_clock; /* 0x0320 */
__u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */
/* Current process. */
__u64 current_task; /* 0x0338 */
__u64 kernel_stack; /* 0x0340 */
__u64 current_task; /* 0x0340 */
__u64 kernel_stack; /* 0x0348 */
/* Interrupt, DAT-off and restartstack. */
__u64 async_stack; /* 0x0348 */
__u64 nodat_stack; /* 0x0350 */
__u64 restart_stack; /* 0x0358 */
__u64 async_stack; /* 0x0350 */
__u64 nodat_stack; /* 0x0358 */
__u64 restart_stack; /* 0x0360 */
/* Restart function and parameter. */
__u64 restart_fn; /* 0x0360 */
__u64 restart_data; /* 0x0368 */
__u64 restart_source; /* 0x0370 */
__u64 restart_fn; /* 0x0368 */
__u64 restart_data; /* 0x0370 */
__u64 restart_source; /* 0x0378 */
/* Address space pointer. */
__u64 kernel_asce; /* 0x0378 */
__u64 user_asce; /* 0x0380 */
__u64 vdso_asce; /* 0x0388 */
__u64 kernel_asce; /* 0x0380 */
__u64 user_asce; /* 0x0388 */
__u64 vdso_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
__u32 lpp; /* 0x0390 */
__u32 current_pid; /* 0x0394 */
__u32 lpp; /* 0x0398 */
__u32 current_pid; /* 0x039c */
/* SMP info area */
__u32 cpu_nr; /* 0x0398 */
__u32 softirq_pending; /* 0x039c */
__u32 preempt_count; /* 0x03a0 */
__u32 spinlock_lockval; /* 0x03a4 */
__u32 spinlock_index; /* 0x03a8 */
__u32 fpu_flags; /* 0x03ac */
__u64 percpu_offset; /* 0x03b0 */
__u64 vdso_per_cpu_data; /* 0x03b8 */
__u64 machine_flags; /* 0x03c0 */
__u64 gmap; /* 0x03c8 */
__u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
__u32 cpu_nr; /* 0x03a0 */
__u32 softirq_pending; /* 0x03a4 */
__u32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
__u64 vdso_per_cpu_data; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
__u64 gmap; /* 0x03d0 */
__u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */

View File

@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
*/
static int __hw_perf_event_init(struct perf_event *event)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct perf_event_attr *attr = &event->attr;
struct cpu_cf_events *cpuhw;
enum cpumf_ctr_set i;
int err = 0;
debug_sprintf_event(cf_diag_dbg, 5,
"%s event %p cpu %d authorized %#x\n", __func__,
event, event->cpu, cpuhw->info.auth_ctl);
debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
event, event->cpu);
event->hw.config = attr->config;
event->hw.config_base = 0;
local64_set(&event->count, 0);
/* Add all authorized counter sets to config_base */
/* Add all authorized counter sets to config_base. The
* the hardware init function is either called per-cpu or just once
* for all CPUS (event->cpu == -1). This depends on the whether
* counting is started for all CPUs or on a per workload base where
* the perf event moves from one CPU to another CPU.
* Checking the authorization on any CPU is fine as the hardware
* applies the same authorization settings to all CPUs.
*/
cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
event->hw.config_base |= cpumf_ctr_ctl[i];
put_cpu_var(cpu_cf_events);
/* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) {

View File

@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer = lc->steal_timer = 0;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,

View File

@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
*/
static int do_account_vtime(struct task_struct *tsk)
{
u64 timer, clock, user, guest, system, hardirq, softirq, steal;
u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
if (softirq)
account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
}
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
*/
void vtime_flush(struct task_struct *tsk)
{
u64 steal, avg_steal;
if (do_account_vtime(tsk))
virt_timer_expire();
steal = S390_lowcore.steal_timer;
avg_steal = S390_lowcore.avg_steal_timer / 2;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(steal);
avg_steal += steal;
}
S390_lowcore.avg_steal_timer = avg_steal;
}
/*

View File

@ -9,6 +9,7 @@ generic-y += emergency-restart.h
generic-y += exec.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += unistd_32.h
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -9,6 +9,7 @@ generic-y += exec.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h

View File

@ -1,2 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>

View File

@ -18,6 +18,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h

View File

@ -1,2 +1 @@
generic-y += kvm_para.h
generic-y += ucontext.h

View File

@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
If unsure, leave at the default value.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
def_bool y
depends on SMP
---help---
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
( Note: power management support will enable this option
automatically on SMP systems. )
Say N if you want to disable CPU hotplug.
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"

View File

@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
# clang turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86.
KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20)
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
ifndef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
endif
endif
archscripts: scripts_basic

View File

@ -120,8 +120,6 @@ static inline void console_init(void)
void set_sev_encryption_mask(void);
#endif
/* acpi.c */
#ifdef CONFIG_ACPI
acpi_physical_address get_rsdp_addr(void);
@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
#else
static inline int count_immovable_mem_regions(void) { return 0; }
#endif
#endif /* BOOT_COMPRESSED_MISC_H */

View File

@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
test_cpu_cap(c, bit))
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, \
(unsigned long __percpu *)&cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel

View File

@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache {
* kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
* by indirect shadow page can not be more than 15 bits.
*
* Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
* Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
* @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
*/
union kvm_mmu_page_role {
u32 word;
struct {
unsigned level:4;
unsigned cr4_pae:1;
unsigned gpte_is_8_bytes:1;
unsigned quadrant:2;
unsigned direct:1;
unsigned access:3;
@ -350,6 +350,7 @@ struct kvm_mmu_page {
};
struct kvm_pio_request {
unsigned long linear_rip;
unsigned long count;
int in;
int port;
@ -568,6 +569,7 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting;
u64 ia32_xss;
u64 microcode_version;
u64 arch_capabilities;
/*
* Paging state of the vcpu
@ -1192,6 +1194,8 @@ struct kvm_x86_ops {
int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@ -1252,7 +1256,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);

View File

@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
}
void set_real_mode_mem(phys_addr_t mem, size_t size);
static inline void set_real_mode_mem(phys_addr_t mem)
{
real_mode_header = (struct real_mode_header *) __va(mem);
}
void reserve_real_mode(void);
#endif /* __ASSEMBLY__ */

View File

@ -501,11 +501,8 @@ out_unlock:
void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
{
unsigned long delay = msecs_to_jiffies(delay_ms);
struct rdt_resource *r;
int cpu;
r = &rdt_resources_all[RDT_RESOURCE_L3];
cpu = cpumask_any(&dom->cpu_mask);
dom->cqm_work_cpu = cpu;

View File

@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
new_config.enable = 0;
stimer->config.as_uint64 = new_config.as_uint64;
stimer_mark_pending(stimer, false);
if (stimer->config.enable)
stimer_mark_pending(stimer, false);
return 0;
}
@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer->config.enable = 0;
else if (stimer->config.auto_enable)
stimer->config.enable = 1;
stimer_mark_pending(stimer, false);
if (stimer->config.enable)
stimer_mark_pending(stimer, false);
return 0;
}

View File

@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator {
static const union kvm_mmu_page_role mmu_base_role_mask = {
.cr0_wp = 1,
.cr4_pae = 1,
.gpte_is_8_bytes = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
for_each_valid_sp(_kvm, _sp, _gfn) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
static inline bool is_ept_sp(struct kvm_mmu_page *sp)
{
return sp->role.cr0_wp && sp->role.smap_andnot_wp;
}
/* @sp->gfn should be write-protected at the call site */
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)
|| vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.level = level;
role.direct = direct;
if (role.direct)
role.cr4_pae = 0;
role.gpte_is_8_bytes = true;
role.access = access;
if (!vcpu->arch.mmu->direct_map
&& vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
role.base.access = ACC_ALL;
role.base.nxe = !!is_nx(vcpu);
role.base.cr4_pae = !!is_pae(vcpu);
role.base.cr0_wp = is_write_protection(vcpu);
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
role.base.direct = true;
role.base.gpte_is_8_bytes = true;
return role;
}
@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
role.base.smap_andnot_wp = role.ext.cr4_smap &&
!is_write_protection(vcpu);
role.base.direct = !is_paging(vcpu);
role.base.gpte_is_8_bytes = !!is_pae(vcpu);
if (!is_long_mode(vcpu))
role.base.level = PT32E_ROOT_LEVEL;
@ -4918,18 +4925,26 @@ static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
bool execonly)
{
union kvm_mmu_role role;
union kvm_mmu_role role = {0};
/* Base role is inherited from root_mmu */
role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
role.ext = kvm_calc_mmu_role_ext(vcpu);
/* SMM flag is inherited from root_mmu */
role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
role.base.level = PT64_ROOT_4LEVEL;
role.base.gpte_is_8_bytes = true;
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
role.base.guest_mode = true;
role.base.access = ACC_ALL;
/*
* WP=1 and NOT_WP=1 is an impossible combination, use WP and the
* SMAP variation to denote shadow EPT entries.
*/
role.base.cr0_wp = true;
role.base.smap_andnot_wp = true;
role.ext = kvm_calc_mmu_role_ext(vcpu);
role.ext.execonly = execonly;
return role;
@ -5179,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
gpa, bytes, sp->role.word);
offset = offset_in_page(gpa);
pte_size = sp->role.cr4_pae ? 8 : 4;
pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
/*
* Sometimes, the OS only writes the last one bytes to update status
@ -5203,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
page_offset = offset_in_page(gpa);
level = sp->role.level;
*nspte = 1;
if (!sp->role.cr4_pae) {
if (!sp->role.gpte_is_8_bytes) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
@ -5393,10 +5408,12 @@ emulate:
* This can happen if a guest gets a page-fault on data access but the HW
* table walker is not able to read the instruction page (e.g instruction
* page is not present in memory). In those cases we simply restart the
* guest.
* guest, with the exception of AMD Erratum 1096 which is unrecoverable.
*/
if (unlikely(insn && !insn_len))
return 1;
if (unlikely(insn && !insn_len)) {
if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
return 1;
}
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
@ -5509,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
@ -5517,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
}
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
end_gfn - start_gfn + 1);
flush = false;
}
@ -6011,7 +6031,7 @@ out:
/*
* Calculate mmu pages needed for kvm.
*/
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
{
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;

View File

@ -29,10 +29,10 @@
\
role.word = __entry->role; \
\
trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s" \
trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \
" %snxe %sad root %u %s%c", \
__entry->gfn, role.level, \
role.cr4_pae ? " pae" : "", \
role.gpte_is_8_bytes ? 8 : 4, \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \

View File

@ -7098,6 +7098,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
return -ENODEV;
}
static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
{
bool is_user, smap;
is_user = svm_get_cpl(vcpu) == 3;
smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
/*
* Detect and workaround Errata 1096 Fam_17h_00_0Fh
*
* In non SEV guest, hypervisor will be able to read the guest
* memory to decode the instruction pointer when insn_len is zero
* so we return true to indicate that decoding is possible.
*
* But in the SEV guest, the guest memory is encrypted with the
* guest specific key and hypervisor will not be able to decode the
* instruction pointer so we will not able to workaround it. Lets
* print the error and request to kill the guest.
*/
if (is_user && smap) {
if (!sev_guest(vcpu->kvm))
return true;
pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
}
return false;
}
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@ -7231,6 +7261,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.nested_enable_evmcs = nested_enable_evmcs,
.nested_get_evmcs_version = nested_get_evmcs_version,
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
};
static int __init svm_init(void)

View File

@ -2585,6 +2585,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return -EINVAL;
if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
return -EINVAL;
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,

View File

@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = to_vmx(vcpu)->spec_ctrl;
break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
return 1;
msr_info->data = to_vmx(vcpu)->arch_capabilities;
break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
MSR_TYPE_W);
break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated)
return 1;
vmx->arch_capabilities = data;
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
vmx->arch_capabilities = kvm_get_arch_capabilities();
vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
/* 22.2.1, 20.8.1 */
@ -7409,6 +7396,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
{
return 0;
}
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
@ -7711,6 +7703,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.set_nested_state = NULL,
.get_vmcs12_pages = NULL,
.nested_enable_evmcs = NULL,
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
};
static void vmx_cleanup_l1d_flush(void)

View File

@ -190,7 +190,6 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
u64 arch_capabilities;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;

View File

@ -1125,7 +1125,7 @@ static u32 msrs_to_save[] = {
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_SPEC_CTRL,
MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@ -1158,6 +1158,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE,
MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
@ -2443,6 +2444,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated)
vcpu->arch.microcode_version = data;
break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated)
return 1;
vcpu->arch.arch_capabilities = data;
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
@ -2747,6 +2753,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_UCODE_REV:
msr_info->data = vcpu->arch.microcode_version;
break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
return 1;
msr_info->data = vcpu->arch.arch_capabilities;
break;
case MSR_IA32_TSC:
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
break;
@ -6523,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{
vcpu->arch.pio.count = 0;
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
return 1;
return kvm_skip_emulated_instruction(vcpu);
}
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
/* do not return to emulator after return from userspace */
vcpu->arch.pio.count = 0;
if (!ret) {
vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_out;
}
return ret;
}
@ -6541,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
/* We should only ever be called with arch.pio.count equal to 1 */
BUG_ON(vcpu->arch.pio.count != 1);
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
vcpu->arch.pio.count = 0;
return 1;
}
/* For size less than 4 we merge, else we zero extend */
val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
: 0;
@ -6553,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
vcpu->arch.pio.port, &val, 1);
kvm_register_write(vcpu, VCPU_REGS_RAX, val);
return 1;
return kvm_skip_emulated_instruction(vcpu);
}
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@ -6572,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
return ret;
}
vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_in;
return 0;
@ -6579,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{
int ret = kvm_skip_emulated_instruction(vcpu);
int ret;
/*
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
if (in)
return kvm_fast_pio_in(vcpu, size, port) && ret;
ret = kvm_fast_pio_in(vcpu, size, port);
else
return kvm_fast_pio_out(vcpu, size, port) && ret;
ret = kvm_fast_pio_out(vcpu, size, port);
return ret && kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_fast_pio);
@ -8733,6 +8761,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
@ -9429,13 +9458,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int nr_mmu_pages = 0;
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
kvm_mmu_change_mmu_pages(kvm,
kvm_mmu_calculate_default_mmu_pages(kvm));
/*
* Dirty logging tracks sptes in 4k granularity, meaning that large

View File

@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
/* Can we access it for direct reading/writing? Must be RAM: */
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
return addr + count <= __pa(high_memory);
return addr + count - 1 <= __pa(high_memory - 1);
}
/* Can we access it through mmap? Must be a valid physical address: */

View File

@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
*/
rm_size = real_mode_size_needed();
if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
set_real_mode_mem(start, rm_size);
set_real_mode_mem(start);
start += rm_size;
size -= rm_size;
}

View File

@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
/* Hold the pgd entry used on booting additional CPUs */
pgd_t trampoline_pgd_entry;
void __init set_real_mode_mem(phys_addr_t mem, size_t size)
{
void *base = __va(mem);
real_mode_header = (struct real_mode_header *) base;
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
base, (unsigned long long)mem, size);
}
void __init reserve_real_mode(void)
{
phys_addr_t mem;
@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
}
memblock_reserve(mem, size);
set_real_mode_mem(mem, size);
set_real_mode_mem(mem);
}
static void __init setup_real_mode(void)

Some files were not shown because too many files have changed in this diff Show More