1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

master
David S. Miller 2021-02-10 13:30:12 -08:00
commit dc9d87581d
320 changed files with 3837 additions and 1448 deletions

View File

@ -37,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
@ -179,6 +180,8 @@ Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@ -199,6 +202,8 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Mark Brown <broonie@sirena.org.uk>
@ -244,6 +249,7 @@ Morten Welinder <welinder@anemone.rentec.com>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
@ -334,6 +340,8 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>

View File

@ -75,7 +75,7 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
$(PYTHON) $(srctree)/scripts/jobserver-exec \
$(PYTHON3) $(srctree)/scripts/jobserver-exec \
$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \

View File

@ -70,8 +70,8 @@ trampoline code on the vDSO, that trampoline is never intercepted.
[selector] is a pointer to a char-sized region in the process memory
region, that provides a quick way to enable disable syscall redirection
thread-wide, without the need to invoke the kernel directly. selector
can be set to PR_SYS_DISPATCH_ON or PR_SYS_DISPATCH_OFF. Any other
value should terminate the program with a SIGSYS.
can be set to SYSCALL_DISPATCH_FILTER_ALLOW or SYSCALL_DISPATCH_FILTER_BLOCK.
Any other value should terminate the program with a SIGSYS.
Security Notes
--------------

View File

@ -163,8 +163,7 @@ particular KASAN features.
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
``off``).
traces collection (default: ``on``).
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
report or also panic the kernel (default: ``report``).

View File

@ -5,7 +5,8 @@ Required properties:
- compatible: "adc-keys"
- io-channels: Phandle to an ADC channel
- io-channel-names = "buttons";
- keyup-threshold-microvolt: Voltage at which all the keys are considered up.
- keyup-threshold-microvolt: Voltage above or equal to which all the keys are
considered up.
Optional properties:
- poll-interval: Poll interval time in milliseconds
@ -17,7 +18,12 @@ Each button (key) is represented as a sub-node of "adc-keys":
Required subnode-properties:
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
- press-threshold-microvolt: Voltage ADC input when this key is pressed.
- press-threshold-microvolt: voltage above or equal to which this key is
considered pressed.
No two values of press-threshold-microvolt may be the same.
All values of press-threshold-microvolt must be less than
keyup-threshold-microvolt.
Example:
@ -47,3 +53,15 @@ Example:
press-threshold-microvolt = <500000>;
};
};
+--------------------------------+------------------------+
| 2.000.000 <= value | no key pressed |
+--------------------------------+------------------------+
| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
+--------------------------------+------------------------+
| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
+--------------------------------+------------------------+
| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
+--------------------------------+------------------------+
| value < 500.000 | no key pressed |
+--------------------------------+------------------------+

View File

@ -26,6 +26,7 @@ properties:
- goodix,gt927
- goodix,gt9271
- goodix,gt928
- goodix,gt9286
- goodix,gt967
reg:

View File

@ -586,6 +586,14 @@ without significant effort.
The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
In order to avoid a giving a false sense of safety, the syncfs (and fsync)
semantics of volatile mounts are slightly different than that of the rest of
VFS. If any writeback error occurs on the upperdir's filesystem after a
volatile mount takes place, all sync functions will return an error. Once this
condition is reached, the filesystem will not recover, and every subsequent sync
call will return an error, even if the upperdir has not experience a new error
since the last sync call.
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong

View File

@ -11,16 +11,13 @@ compiler [1]_. They are useful for runtime instrumentation and static analysis.
We can analyse, change and add further code during compilation via
callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
The GCC plugin infrastructure of the kernel supports all gcc versions from
4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
separate directory.
Plugin source files have to be compilable by both a C and a C++ compiler as well
because gcc versions 4.5 and 4.6 are compiled by a C compiler,
gcc-4.7 can be compiled by a C or a C++ compiler,
and versions 4.8+ can only be compiled by a C++ compiler.
The GCC plugin infrastructure of the kernel supports building out-of-tree
modules, cross-compilation and building in a separate directory.
Plugin source files have to be compilable by a C++ compiler.
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
powerpc architectures.
Currently the GCC plugin infrastructure supports only some architectures.
Grep "select HAVE_GCC_PLUGINS" to find out which architectures support
GCC plugins.
This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
@ -47,20 +44,13 @@ Files
This is a compatibility header for GCC plugins.
It should be always included instead of individual gcc headers.
**$(src)/scripts/gcc-plugin.sh**
This script checks the availability of the included headers in
gcc-common.h and chooses the proper host compiler to build the plugins
(gcc-4.7 can be built by either gcc or g++).
**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
These headers automatically generate the registration structures for
GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
from 4.5 to 6.0.
GIMPLE, SIMPLE_IPA, IPA and RTL passes.
They should be preferred to creating the structures by hand.
@ -68,21 +58,25 @@ Usage
=====
You must install the gcc plugin headers for your gcc version,
e.g., on Ubuntu for gcc-4.9::
e.g., on Ubuntu for gcc-10::
apt-get install gcc-4.9-plugin-dev
apt-get install gcc-10-plugin-dev
Or on Fedora::
dnf install gcc-plugin-devel
Enable a GCC plugin based feature in the kernel config::
Enable the GCC plugin infrastructure and some plugin(s) you want to use
in the kernel config::
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
CONFIG_GCC_PLUGINS=y
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY=y
CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
...
To compile only the plugin(s)::
To compile the minimum tool set including the plugin(s)::
make gcc-plugins
make scripts
or just run the kernel make and compile the whole kernel with
the cyclomatic complexity GCC plugin.
@ -91,7 +85,8 @@ the cyclomatic complexity GCC plugin.
4. How to add a new GCC plugin
==============================
The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
The GCC plugins are in scripts/gcc-plugins/. You need to put plugin source files
right under scripts/gcc-plugins/. Creating subdirectories is not supported.
It must be added to scripts/gcc-plugins/Makefile, scripts/Makefile.gcc-plugins
and a relevant Kconfig file.
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.

View File

@ -63,6 +63,50 @@ They can be enabled individually. The full list of the parameters: ::
Currently, the integrated assembler is disabled by default. You can pass
``LLVM_IAS=1`` to enable it.
Supported Architectures
-----------------------
LLVM does not target all of the architectures that Linux supports and
just because a target is supported in LLVM does not mean that the kernel
will build or work without any issues. Below is a general summary of
architectures that currently work with ``CC=clang`` or ``LLVM=1``. Level
of support corresponds to "S" values in the MAINTAINERS files. If an
architecture is not present, it either means that LLVM does not target
it or there are known issues. Using the latest stable version of LLVM or
even the development tree will generally yield the best results.
An architecture's ``defconfig`` is generally expected to work well,
certain configurations may have problems that have not been uncovered
yet. Bug reports are always welcome at the issue tracker below!
.. list-table::
:widths: 10 10 10
:header-rows: 1
* - Architecture
- Level of support
- ``make`` command
* - arm
- Supported
- ``LLVM=1``
* - arm64
- Supported
- ``LLVM=1``
* - mips
- Maintained
- ``CC=clang``
* - powerpc
- Maintained
- ``CC=clang``
* - riscv
- Maintained
- ``CC=clang``
* - s390
- Maintained
- ``CC=clang``
* - x86
- Supported
- ``LLVM=1``
Getting Help
------------

View File

@ -755,7 +755,7 @@ more details, with real examples.
bits on the scripts nonetheless.
Kbuild provides variables $(CONFIG_SHELL), $(AWK), $(PERL),
$(PYTHON) and $(PYTHON3) to refer to interpreters for the respective
and $(PYTHON3) to refer to interpreters for the respective
scripts.
Example::

View File

@ -37,8 +37,10 @@ call L2.
Running nested VMX
------------------
The nested VMX feature is disabled by default. It can be enabled by giving
the "nested=1" option to the kvm-intel module.
The nested VMX feature is enabled by default since Linux kernel v4.20. For
older Linux kernel, it can be enabled by giving the "nested=1" option to the
kvm-intel module.
No modifications are required to user space (qemu). However, qemu's default
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be

View File

@ -74,7 +74,7 @@ few:
Enabling "nested" (x86)
-----------------------
From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
by default for Intel and AMD. (Though your Linux distribution might
override this default.)

View File

@ -2616,8 +2616,8 @@ S: Maintained
F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
M: Tero Kristo <t-kristo@ti.com>
M: Nishanth Menon <nm@ti.com>
M: Tero Kristo <kristo@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/arm/ti/k3.yaml
@ -4336,7 +4336,7 @@ S: Maintained
F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <natechancellor@gmail.com>
M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
@ -6506,9 +6506,9 @@ S: Maintained
F: drivers/edac/skx_*.[ch]
EDAC-TI
M: Tero Kristo <t-kristo@ti.com>
M: Tero Kristo <kristo@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
S: Odd Fixes
F: drivers/edac/ti_edac.c
EDIROL UA-101/UA-1000 DRIVER
@ -9591,7 +9591,7 @@ F: Documentation/hwmon/k8temp.rst
F: drivers/hwmon/k8temp.c
KASAN
M: Andrey Ryabinin <aryabinin@virtuozzo.com>
M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com
@ -17613,7 +17613,7 @@ F: include/linux/dma/k3-psil.h
TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <nm@ti.com>
M: Tero Kristo <t-kristo@ti.com>
M: Tero Kristo <kristo@kernel.org>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
@ -17757,9 +17757,9 @@ S: Maintained
F: drivers/clk/clk-cdce706.c
TI CLOCK DRIVER
M: Tero Kristo <t-kristo@ti.com>
M: Tero Kristo <kristo@kernel.org>
L: linux-omap@vger.kernel.org
S: Maintained
S: Odd Fixes
F: drivers/clk/ti/
F: include/linux/clk/ti.h

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -452,7 +452,6 @@ AWK = awk
INSTALLKERNEL := installkernel
DEPMOD = depmod
PERL = perl
PYTHON = python
PYTHON3 = python3
CHECK = sparse
BASH = bash
@ -508,7 +507,7 @@ CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
@ -813,10 +812,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
DEBUG_CFLAGS :=
# Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC
DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
ifdef CONFIG_DEBUG_INFO
@ -949,12 +950,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
# include additional Makefiles when needed
include-y := scripts/Makefile.extrawarn
include-$(CONFIG_KASAN) += scripts/Makefile.kasan

View File

@ -326,9 +326,6 @@
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
assigned-clock-rates = <208000000>;
};
};

View File

@ -114,7 +114,7 @@
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
/* lcd panel */
@ -124,7 +124,6 @@
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;
spi-cs-high;
backlight= <&backlight>;
label = "lcd";

View File

@ -16,8 +16,13 @@
debounce-interval = <10>;
};
/*
* We use pad 0x4a100116 abe_dmic_din3.gpio_122 as the irq instead
* of the gpio interrupt to avoid lost events in deeper idle states.
*/
slider {
label = "Keypad Slide";
interrupts-extended = <&omap4_pmx_core 0xd6>;
gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
linux,input-type = <EV_SW>;
linux,code = <SW_KEYPAD_SLIDE>;

View File

@ -33,9 +33,9 @@
* during TX anyway and that it only controls drive enable DE
* line. Hence, the RX is always enabled here.
*/
rs485-rx-en {
rs485-rx-en-hog {
gpio-hog;
gpios = <8 GPIO_ACTIVE_HIGH>;
gpios = <8 0>;
output-low;
line-name = "rs485-rx-en";
};
@ -61,9 +61,9 @@
* order to reset the Hub when USB bus is powered down, but
* so far there is no such functionality.
*/
usb-hub {
usb-hub-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
gpios = <2 0>;
output-high;
line-name = "usb-hub-reset";
};
@ -87,6 +87,12 @@
};
};
&i2c4 {
touchscreen@49 {
status = "disabled";
};
};
&i2c5 { /* TP7/TP8 */
pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins_a>;
@ -104,7 +110,7 @@
* are used for on-board microSD slot instead.
*/
/delete-property/broken-cd;
cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
disable-wp;
};

View File

@ -43,9 +43,9 @@
* in order to turn on port power when USB bus is powered up, but so
* far there is no such functionality.
*/
usb-port-power {
usb-port-power-hog {
gpio-hog;
gpios = <13 GPIO_ACTIVE_LOW>;
gpios = <13 0>;
output-low;
line-name = "usb-port-power";
};

View File

@ -390,7 +390,8 @@
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
broken-cd;
cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;

View File

@ -110,7 +110,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-supply = <&reg_gmac_3v3>;
status = "okay";
};

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KEXEC_INTERNAL_H
#define _ARM_KEXEC_INTERNAL_H
struct kexec_relocate_data {
unsigned long kexec_start_address;
unsigned long kexec_indirection_page;
unsigned long kexec_mach_type;
unsigned long kexec_r2;
};
#endif

View File

@ -149,7 +149,34 @@
.align
99: .word .
#if defined(ZIMAGE)
.word . + 4
/*
* Storage for the state maintained by the macro.
*
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
* That's because this header is included from multiple files, and we only
* want a single copy of the data. In particular, the UART probing code above
* assumes it's running using physical addresses. This is true when this file
* is included from head.o, but not when included from debug.o. So we need
* to share the probe results between the two copies, rather than having
* to re-run the probing again later.
*
* In the decompressor, we put the storage right here, since common.c
* isn't included in the decompressor build. This storage data gets put in
* .text even though it's really data, since .data is discarded from the
* decompressor. Luckily, .text is writeable in the decompressor, unless
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
*/
/* Debug UART initialization required */
.word 1
/* Debug UART physical address */
.word 0
/* Debug UART virtual address */
.word 0
#else
.word tegra_uart_config
#endif
.ltorg
/* Load previously selected UART address */
@ -189,30 +216,3 @@
.macro waituarttxrdy,rd,rx
.endm
/*
* Storage for the state maintained by the macros above.
*
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
* That's because this header is included from multiple files, and we only
* want a single copy of the data. In particular, the UART probing code above
* assumes it's running using physical addresses. This is true when this file
* is included from head.o, but not when included from debug.o. So we need
* to share the probe results between the two copies, rather than having
* to re-run the probing again later.
*
* In the decompressor, we put the symbol/storage right here, since common.c
* isn't included in the decompressor build. This symbol gets put in .text
* even though it's really data, since .data is discarded from the
* decompressor. Luckily, .text is writeable in the decompressor, unless
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
*/
#if defined(ZIMAGE)
tegra_uart_config:
/* Debug UART initialization required */
.word 1
/* Debug UART physical address */
.word 0
/* Debug UART virtual address */
.word 0
#endif

View File

@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
@ -170,5 +171,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}

View File

@ -13,6 +13,7 @@
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@ -22,11 +23,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
/*
@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
set_kernel_text_rw();
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
data = reboot_code_buffer + relocate_new_kernel_size;
data->kexec_start_address = image->start;
data->kexec_indirection_page = page_list;
data->kexec_mach_type = machine_arch_type;
data->kexec_r2 = image->arch.kernel_r2;
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);

View File

@ -5,14 +5,16 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
adr r7, relocate_new_kernel_end
ldr r0, [r7, #KEXEC_INDIR_PAGE]
ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
mov lr,r1
mov r0,#0
ldr r1,kexec_mach_type
ldr r2,kexec_boot_atags
ARM( ret lr )
THUMB( bx lr )
.align
.globl kexec_start_address
kexec_start_address:
.long 0x0
.globl kexec_indirection_page
kexec_indirection_page:
.long 0x0
.globl kexec_mach_type
kexec_mach_type:
.long 0x0
/* phy addr of the atags for the new kernel */
.globl kexec_boot_atags
kexec_boot_atags:
.long 0x0
mov lr, r1
mov r0, #0
ldr r1, [r7, #KEXEC_MACH_TYPE]
ldr r2, [r7, #KEXEC_R2]
ARM( ret lr )
THUMB( bx lr )
ENDPROC(relocate_new_kernel)
.align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size

View File

@ -693,18 +693,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
/* Poison the entire page */
memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
PAGE_SIZE / sizeof(u32));
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
/* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
ptr = (unsigned long)addr + offset;
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
/* Flush out all instructions in this page */
ptr = (unsigned long)addr;
flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}

View File

@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
asm("ldrb %0, [%1, %2]"
asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 2:
asm("ldrh %0, [%1, %2]"
asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 4:
asm("ldr %0, [%1, %2]"
asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
}
@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
asm("strb %0, [%1, %2]"
asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 2:
asm("strh %0, [%1, %2]"
asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 4:
asm("str %0, [%1, %2]"
asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;

View File

@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
*/
gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
/* Free the GPIO again as the driver will request it */
gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
/* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH);

View File

@ -17,11 +17,10 @@ config ARCH_OMAP3
bool "TI OMAP3"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
select OMAP_HWMOD
select OMAP_INTERCONNECT
select PM_OPP if PM
select PM if CPU_IDLE
select PM_OPP
select SOC_HAS_OMAP2_SDRC
select ARM_ERRATA_430973
@ -30,7 +29,7 @@ config ARCH_OMAP4
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
select ARM_ERRATA_720789
select ARM_GIC
select HAVE_ARM_SCU if SMP
@ -40,7 +39,7 @@ config ARCH_OMAP4
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
select PM_OPP if PM
select PM_OPP
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
@ -50,7 +49,7 @@ config SOC_OMAP5
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
@ -58,14 +57,14 @@ config SOC_OMAP5
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PM_OPP if PM
select PM_OPP
select ZONE_DMA if ARM_LPAE
config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
config SOC_AM43XX
bool "TI AM43x"
@ -79,13 +78,13 @@ config SOC_AM43XX
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select OMAP_INTERCONNECT
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
@ -94,7 +93,7 @@ config SOC_DRA7XX
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PM_OPP if PM
select PM_OPP
select ZONE_DMA if ARM_LPAE
select PINCTRL_TI_IODELAY if OF && PINCTRL
@ -112,9 +111,11 @@ config ARCH_OMAP2PLUS
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM
select PM
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER
select SIMPLE_PM_BUS
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
@ -140,7 +141,6 @@ config ARCH_OMAP2PLUS_TYPICAL
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if CPU_V7
select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4

View File

@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
tick_broadcast_enable();
RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
tick_broadcast_enter();
RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
clkdm_deny_idle(cpu_clkdm[1]);
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
clkdm_allow_idle(cpu_clkdm[1]);
RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit();
cpu_pm_out:
tick_broadcast_exit();
RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);

View File

@ -522,6 +522,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
&dra7_ipu1_dsp_iommu_pdata),
#endif
/* Common auxdata */
OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup),
OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),

View File

@ -280,8 +280,6 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
resets = <&reset RESET_ETHERNET>;
reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_AXG_ETHERNET_MEM_ID>;
status = "disabled";
};

View File

@ -224,8 +224,6 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
resets = <&reset RESET_ETHERNET>;
reset-names = "stmmaceth";
status = "disabled";
mdio0: mdio {
@ -2390,7 +2388,7 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,dis_u2_susphy_quirk;
snps,quirk-frame-length-adjustment;
snps,quirk-frame-length-adjustment = <0x20>;
snps,parkmode-disable-ss-quirk;
};
};

View File

@ -13,7 +13,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@ -576,8 +575,6 @@
interrupt-names = "macirq";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
resets = <&reset RESET_ETHERNET>;
reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
status = "disabled";
};

View File

@ -52,7 +52,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
};

View File

@ -385,7 +385,7 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
reg = <0x0 0x1ee0000 0x0 0x10000>;
reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian;
};

View File

@ -415,7 +415,9 @@
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
};
&gpu {

View File

@ -302,7 +302,9 @@
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
};
&gpu {
@ -320,6 +322,8 @@
&i2c3 {
status = "okay";
clock-frequency = <400000>;
/* Overwrite pinctrl-0 from sdm845.dtsi */
pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
tsel: hid@15 {
compatible = "hid-over-i2c";
@ -327,9 +331,6 @@
hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_hid_active>;
};
tsc2: hid@2c {
@ -338,11 +339,6 @@
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_hid_active>;
status = "disabled";
};
};

View File

@ -1097,7 +1097,7 @@
vopl_mmu: iommu@ff470f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface";

View File

@ -114,6 +114,10 @@
cpu-supply = <&vdd_arm>;
};
&display_subsystem {
status = "disabled";
};
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;

View File

@ -790,7 +790,6 @@
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
max-link-speed = <2>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;

View File

@ -234,6 +234,7 @@
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@ -252,7 +253,6 @@
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
@ -1278,7 +1278,6 @@
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";

View File

@ -1076,7 +1076,7 @@ CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
CONFIG_INTERCONNECT_QCOM_SDM845=m
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=m
CONFIG_EXT2_FS=y

View File

@ -251,9 +251,9 @@ static inline const void *__tag_set(const void *addr, u8 tag)
* lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
* kernel's TTBR1 address range.
*/
#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
@ -332,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
#define virt_addr_valid(addr) ({ \
__typeof__(addr) __addr = addr; \
__typeof__(addr) __addr = __tag_reset(addr); \
__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
})

View File

@ -47,6 +47,8 @@ __invalid:
b .
/*
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
*
* x0: SMCCC function ID
* x1: struct kvm_nvhe_init_params PA
*/
@ -70,9 +72,9 @@ __do_hyp_init:
eret
1: mov x0, x1
mov x4, lr
bl ___kvm_hyp_init
mov lr, x4
mov x3, lr
bl ___kvm_hyp_init // Clobbers x0..x2
mov lr, x3
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
@ -82,8 +84,8 @@ SYM_CODE_END(__kvm_hyp_init)
/*
* Initialize the hypervisor in EL2.
*
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
* and leave x4 for the caller.
* Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
* and leave x3 for the caller.
*
* x0: struct kvm_nvhe_init_params PA
*/
@ -112,9 +114,9 @@ alternative_else_nop_endif
/*
* Set the PS bits in TCR_EL2.
*/
ldr x1, [x0, #NVHE_INIT_TCR_EL2]
tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
msr tcr_el2, x1
ldr x0, [x0, #NVHE_INIT_TCR_EL2]
tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
msr tcr_el2, x0
isb
@ -193,7 +195,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Enable MMU, set vectors and stack. */
mov x0, x28
bl ___kvm_hyp_init // Clobbers x0..x3
bl ___kvm_hyp_init // Clobbers x0..x2
/* Leave idmap. */
mov x0, x29

View File

@ -9,7 +9,7 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
WARN(!__is_lm_address(x),
WARN(!__is_lm_address(__tag_reset(x)),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);

View File

@ -69,7 +69,7 @@ vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
unwcheck: vmlinux
-$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $<
-$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $<
archclean:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE

View File

@ -51,7 +51,7 @@ obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o \
paca.o nvram_64.o note.o syscall_64.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
@ -60,7 +60,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_PPC64) += vdso64_wrapper.o
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o

View File

@ -30,7 +30,7 @@ CC32FLAGS += -m32
KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
endif
targets := $(obj-vdso32) vdso32.so.dbg
targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
@ -46,9 +46,6 @@ obj-y += vdso32_wrapper.o
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso32ld_and_check)

View File

@ -17,7 +17,7 @@ endif
# Build rules
targets := $(obj-vdso64) vdso64.so.dbg
targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
@ -29,15 +29,9 @@ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vgettimeofday.o: %.o: %.c FORCE
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso64ld_and_check)

View File

@ -15,11 +15,20 @@
.text
/*
* __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
* are one function split in two parts. The kernel jumps to the former
* and the signal handler indirectly (by blr) returns to the latter.
* __kernel_sigtramp_rt64 needs to point to the return address so
* glibc can correctly identify the trampoline stack frame.
*/
.balign 8
.balign IFETCH_ALIGN_BYTES
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
.Lsigrt_start:
bctrl /* call the handler */
V_FUNCTION_END(__kernel_start_sigtramp_rt64)
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc

View File

@ -131,4 +131,4 @@ VERSION
/*
* Make the sigreturn code visible to the kernel.
*/
VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64;
VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;

View File

@ -818,13 +818,15 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
break;
if (rev) {
/* reverse 32 bytes */
buf.d[0] = byterev_8(reg->d[3]);
buf.d[1] = byterev_8(reg->d[2]);
buf.d[2] = byterev_8(reg->d[1]);
buf.d[3] = byterev_8(reg->d[0]);
reg = &buf;
union vsx_reg buf32[2];
buf32[0].d[0] = byterev_8(reg[1].d[1]);
buf32[0].d[1] = byterev_8(reg[1].d[0]);
buf32[1].d[0] = byterev_8(reg[0].d[1]);
buf32[1].d[1] = byterev_8(reg[0].d[0]);
memcpy(mem, buf32, size);
} else {
memcpy(mem, reg, size);
}
memcpy(mem, reg, size);
break;
case 16:
/* stxv, stxvx, stxvl, stxvll */

View File

@ -252,8 +252,10 @@ choice
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB
depends on 32BIT
bool "1GiB"
config MAXPHYSMEM_2GB
depends on 64BIT && CMODEL_MEDLOW
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY

View File

@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC

View File

@ -32,14 +32,14 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
#ifdef CONFIG_STRICT_KERNEL_RWX
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
#define SECTION_ALIGN (1 << 22)
#endif
#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
#else /* !CONFIG_STRICT_KERNEL_RWX */
#define SECTION_ALIGN L1_CACHE_BYTES
#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
#endif /* CONFIG_STRICT_KERNEL_RWX */
#endif /* _ASM_RISCV_SET_MEMORY_H */

View File

@ -293,6 +293,8 @@ void free_initmem(void)
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}

View File

@ -15,7 +15,6 @@ config UML
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select NO_DMA
select ARCH_HAS_SET_MEMORY
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS

View File

@ -375,11 +375,11 @@ break_loop:
file = NULL;
backing_file = strsep(&str, ",:");
if (*backing_file == '\0')
if (backing_file && *backing_file == '\0')
backing_file = NULL;
serial = strsep(&str, ",:");
if (*serial == '\0')
if (serial && *serial == '\0')
serial = NULL;
if (backing_file && ubd_dev->no_cow) {
@ -1241,7 +1241,7 @@ static int __init ubd_driver_init(void){
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
stack = alloc_stack(0);
stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
&thread_fd);
if(io_pid < 0){

View File

@ -1084,6 +1084,7 @@ static void virtio_uml_release_dev(struct device *d)
}
os_close_file(vu_dev->sock);
kfree(vu_dev);
}
/* Platform device */
@ -1097,7 +1098,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev)
return -ENOMEM;

View File

@ -5,7 +5,7 @@
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
return (void __iomem *)(unsigned long)offset;
return NULL;
}
#define iounmap iounmap

View File

@ -55,15 +55,12 @@ extern unsigned long end_iomem;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define __PAGE_KERNEL_RO \
(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
/*
* The i386 can't do page protection for execute, and considers that the same

View File

@ -1 +0,0 @@
#include <asm-generic/set_memory.h>

View File

@ -19,7 +19,7 @@ extern int kmalloc_ok;
#define UML_ROUND_UP(addr) \
((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
extern unsigned long alloc_stack(int atomic);
extern unsigned long alloc_stack(int order, int atomic);
extern void free_stack(unsigned long stack, int order);
struct pt_regs;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/console.h>
#include <linux/string.h>
#include <shared/init.h>
#include <shared/kern.h>
#include <os.h>
@ -16,8 +17,12 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
if (!console_trylock())
return;
for_each_console(con)
break;
for_each_console(con) {
if(strcmp(con->name, "tty") == 0 &&
(con->flags & (CON_ENABLED | CON_CONSDEV)) != 0) {
break;
}
}
console_unlock();

View File

@ -32,7 +32,6 @@
#include <os.h>
#include <skas.h>
#include <linux/time-internal.h>
#include <asm/set_memory.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
@ -63,18 +62,16 @@ void free_stack(unsigned long stack, int order)
free_pages(stack, order);
}
unsigned long alloc_stack(int atomic)
unsigned long alloc_stack(int order, int atomic)
{
unsigned long addr;
unsigned long page;
gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
addr = __get_free_pages(flags, 1);
page = __get_free_pages(flags, order);
set_memory_ro(addr, 1);
return addr + PAGE_SIZE;
return page;
}
static inline void set_current(struct task_struct *task)

View File

@ -535,6 +535,31 @@ invalid_number:
return 1;
}
static void time_travel_set_start(void)
{
if (time_travel_start_set)
return;
switch (time_travel_mode) {
case TT_MODE_EXTERNAL:
time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
/* controller gave us the *current* time, so adjust by that */
time_travel_ext_get_time();
time_travel_start -= time_travel_time;
break;
case TT_MODE_INFCPU:
case TT_MODE_BASIC:
if (!time_travel_start_set)
time_travel_start = os_persistent_clock_emulation();
break;
case TT_MODE_OFF:
/* we just read the host clock with os_persistent_clock_emulation() */
break;
}
time_travel_start_set = true;
}
#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
@ -553,6 +578,10 @@ static void time_travel_set_interval(unsigned long long interval)
{
}
static inline void time_travel_set_start(void)
{
}
/* fail link if this actually gets used */
extern u64 time_travel_ext_req(u32 op, u64 time);
@ -731,6 +760,8 @@ void read_persistent_clock64(struct timespec64 *ts)
{
long long nsecs;
time_travel_set_start();
if (time_travel_mode != TT_MODE_OFF)
nsecs = time_travel_start + time_travel_time;
else
@ -742,25 +773,6 @@ void read_persistent_clock64(struct timespec64 *ts)
void __init time_init(void)
{
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
switch (time_travel_mode) {
case TT_MODE_EXTERNAL:
time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
/* controller gave us the *current* time, so adjust by that */
time_travel_ext_get_time();
time_travel_start -= time_travel_time;
break;
case TT_MODE_INFCPU:
case TT_MODE_BASIC:
if (!time_travel_start_set)
time_travel_start = os_persistent_clock_emulation();
break;
case TT_MODE_OFF:
/* we just read the host clock with os_persistent_clock_emulation() */
break;
}
#endif
timer_set_signal_handler();
late_time_init = um_timer_setup;
}

View File

@ -608,57 +608,3 @@ void force_flush_all(void)
vma = vma->vm_next;
}
}
struct page_change_data {
unsigned int set_mask, clear_mask;
};
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
pte_t pte = READ_ONCE(*ptep);
pte_clear_bits(pte, cdata->clear_mask);
pte_set_bits(pte, cdata->set_mask);
set_pte(ptep, pte);
return 0;
}
static int change_memory(unsigned long start, unsigned long pages,
unsigned int set_mask, unsigned int clear_mask)
{
unsigned long size = pages * PAGE_SIZE;
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory(addr, numpages, 0, _PAGE_RW);
}
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory(addr, numpages, _PAGE_RW, 0);
}
int set_memory_nx(unsigned long addr, int numpages)
{
return -EOPNOTSUPP;
}
int set_memory_x(unsigned long addr, int numpages)
{
return -EOPNOTSUPP;
}

View File

@ -26,7 +26,8 @@
#include <mem_user.h>
#include <os.h>
#define DEFAULT_COMMAND_LINE "root=98:0"
#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
@ -109,7 +110,8 @@ unsigned long end_vm;
int ncpus = 1;
/* Set in early boot */
static int have_root __initdata = 0;
static int have_root __initdata;
static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 32 * 1024 * 1024;
@ -161,6 +163,17 @@ __uml_setup("debug", no_skas_debug_setup,
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
static int __init uml_console_setup(char *line, int *add)
{
have_console = 1;
return 0;
}
__uml_setup("console=", uml_console_setup,
"console=<preferred console>\n"
" Specify the preferred console output driver\n\n"
);
static int __init Usage(char *line, int *add)
{
const char **p;
@ -264,7 +277,10 @@ int __init linux_main(int argc, char **argv)
add_arg(argv[i]);
}
if (have_root == 0)
add_arg(DEFAULT_COMMAND_LINE);
add_arg(DEFAULT_COMMAND_LINE_ROOT);
if (have_console == 0)
add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
/*

View File

@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
unsigned long stack, sp;
int pid, fds[2], ret, n;
stack = alloc_stack(__cant_sleep());
stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
stack = alloc_stack(__cant_sleep());
stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;

View File

@ -104,5 +104,18 @@ long long os_nsecs(void)
*/
void os_idle_sleep(void)
{
pause();
struct itimerspec its;
sigset_t set, old;
/* block SIGALRM while we analyze the timer state */
sigemptyset(&set);
sigaddset(&set, SIGALRM);
sigprocmask(SIG_BLOCK, &set, &old);
/* check the timer, and if it'll fire then wait for it */
timer_gettime(event_high_res_timer, &its);
if (its.it_value.tv_sec || its.it_value.tv_nsec)
sigsuspend(&old);
/* either way, restore the signal mask */
sigprocmask(SIG_UNBLOCK, &set, NULL);
}

View File

@ -120,6 +120,9 @@ else
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
ifdef CONFIG_X86_X32

View File

@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC
/*
* Make previous memory operations globally visible before
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
* mfence for this.
*/
static inline void x2apic_wrmsr_fence(void)
{
asm volatile("mfence" : : : "memory");
}
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||

View File

@ -84,4 +84,22 @@ do { \
#include <asm-generic/barrier.h>
/*
* Make previous memory operations globally visible before
* a WRMSR.
*
* MFENCE makes writes visible, but only affects load/store
* instructions. WRMSR is unfortunately not a load/store
* instruction and is unaffected by MFENCE. The LFENCE ensures
* that the WRMSR is not reordered.
*
* Most WRMSRs are full serializing instructions themselves and
* do not require this barrier. This is only required for the
* IA32_TSC_DEADLINE and X2APIC MSRs.
*/
static inline void weak_wrmsr_fence(void)
{
asm volatile("mfence; lfence" : : : "memory");
}
#endif /* _ASM_X86_BARRIER_H */

View File

@ -43,8 +43,6 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
}
#define arch_check_user_regs arch_check_user_regs
#define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{

View File

@ -41,6 +41,7 @@
#include <asm/perf_event.h>
#include <asm/x86_init.h>
#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/mpspec.h>
#include <asm/i8259.h>
#include <asm/proto.h>
@ -477,6 +478,9 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
/* This MSR is special and need a special fence: */
weak_wrmsr_fence();
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;

View File

@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
x2apic_wrmsr_fence();
/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
}
@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long flags;
u32 dest;
x2apic_wrmsr_fence();
/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
local_irq_save(flags);
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);

View File

@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
x2apic_wrmsr_fence();
/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
}
@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long this_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
local_irq_save(flags);
@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
{
unsigned long cfg = __prepare_ICR(which, vector, 0);
x2apic_wrmsr_fence();
/* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
native_x2apic_icr_write(cfg, 0);
}

View File

@ -1159,6 +1159,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
{}
};

View File

@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
CPU_ENTRY_AREA_TOTAL_SIZE))
return true;
/*
* When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
* GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
*/
#ifdef CONFIG_SMP
if (within_area(addr, end, (unsigned long)__per_cpu_offset,
sizeof(unsigned long) * nr_cpu_ids))
return true;
#else
if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
sizeof(pcpu_unit_offsets)))
return true;
#endif
for_each_possible_cpu(cpu) {
/* The original rw GDT is being used after load_direct_gdt() */
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
sizeof(struct tlb_state)))
return true;
/*
* When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
* will read per-cpu cpu_dr7 before clear dr7 register.
*/
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
sizeof(cpu_dr7)))
return true;
}
return false;
@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args)
struct perf_event *bp;
unsigned long *dr6_p;
unsigned long dr6;
bool bpx;
/* The DR6 value is pointed by args->err */
dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p;
/* If it's a single step, TRAP bits are random */
if (dr6 & DR_STEP)
return NOTIFY_DONE;
/* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args)
if (likely(!(dr6 & (DR_TRAP0 << i))))
continue;
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = this_cpu_read(bp_per_reg[i]);
if (!bp)
continue;
bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
/*
* TF and data breakpoints are traps and can be merged, however
* instruction breakpoints are faults and will be raised
* separately.
*
* However DR6 can indicate both TF and instruction
* breakpoints. In that case take TF as that has precedence and
* delay the instruction breakpoint for the next exception.
*/
if (bpx && (dr6 & DR_STEP))
continue;
/*
* Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling
*/
(*dr6_p) &= ~(DR_TRAP0 << i);
/*
* bp can be NULL due to lazy debug register switching
* or due to concurrent perf counter removing.
*/
if (!bp) {
rcu_read_unlock();
break;
}
perf_bp_event(bp, args->regs);
@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args)
* Set up resume flag to avoid breakpoint recursion when
* returning back to origin.
*/
if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
if (bpx)
args->regs->flags |= X86_EFLAGS_RF;
rcu_read_unlock();
}
/*
* Further processing in do_debug() is needed for a) user-space
* breakpoints (to generate signals) and b) when the system has

View File

@ -660,6 +660,17 @@ static void __init trim_platform_memory_ranges(void)
static void __init trim_bios_range(void)
{
/*
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
*
* This typically reserves additional memory (64KiB by default)
* since some BIOSes are known to corrupt low memory. See the
* Kconfig help text for X86_RESERVE_LOW.
*/
e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
/*
* special case: Some BIOSes report the PC BIOS
* area (640Kb -> 1Mb) as RAM even though it is not.
@ -717,15 +728,6 @@ early_param("reservelow", parse_reservelow);
static void __init trim_low_memory_range(void)
{
/*
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
*
* This typically reserves additional memory (64KiB by default)
* since some BIOSes are known to corrupt low memory. See the
* Kconfig help text for X86_RESERVE_LOW.
*/
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
}

View File

@ -1833,6 +1833,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
{

View File

@ -127,12 +127,17 @@ static int enable_single_step(struct task_struct *child)
regs->flags |= X86_EFLAGS_TF;
/*
* Always set TIF_SINGLESTEP - this guarantees that
* we single-step system calls etc.. This will also
* Always set TIF_SINGLESTEP. This will also
* cause us to set TF when returning to user mode.
*/
set_tsk_thread_flag(child, TIF_SINGLESTEP);
/*
* Ensure that a trap is triggered once stepping out of a system
* call prior to executing any user instruction.
*/
set_task_syscall_work(child, SYSCALL_EXIT_TRAP);
oflags = regs->flags;
/* Set TF on the kernel stack.. */
@ -230,6 +235,7 @@ void user_disable_single_step(struct task_struct *child)
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
clear_task_syscall_work(child, SYSCALL_EXIT_TRAP);
/* But touch TF only if it was set by us.. */
if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))

View File

@ -321,7 +321,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;

View File

@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
if (efer & EFER_LMA)
ctxt->mode = X86EMUL_MODE_PROT64;
return X86EMUL_CONTINUE;
}

View File

@ -1049,8 +1049,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
}
/*
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
@ -1062,7 +1062,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) {
if (!is_shadow_present_pte(iter.old_spte) ||
is_last_spte(iter.old_spte, iter.level))
!is_last_spte(iter.old_spte, iter.level))
continue;
pfn = spte_to_pfn(iter.old_spte);

View File

@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
if (!vmcb12_lma) {
if (vmcb12->save.cr4 & X86_CR4_PAE) {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
return false;
} else {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
return false;
}
} else {
if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
(vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false;
}
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))

View File

@ -342,6 +342,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long first, last;
int ret;
lockdep_assert_held(&kvm->lock);
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
@ -1119,12 +1121,20 @@ int svm_register_enc_region(struct kvm *kvm,
if (!region)
return -ENOMEM;
mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
mutex_unlock(&kvm->lock);
goto e_free;
}
region->uaddr = range->addr;
region->size = range->size;
list_add_tail(&region->list, &sev->regions_list);
mutex_unlock(&kvm->lock);
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
@ -1133,13 +1143,6 @@ int svm_register_enc_region(struct kvm *kvm,
*/
sev_clflush_pages(region->pages, region->npages);
region->uaddr = range->addr;
region->size = range->size;
mutex_lock(&kvm->lock);
list_add_tail(&region->list, &sev->regions_list);
mutex_unlock(&kvm->lock);
return ret;
e_free:

View File

@ -454,6 +454,11 @@ static int has_svm(void)
return 0;
}
if (sev_active()) {
pr_info("KVM is unsupported when running as an SEV guest\n");
return 0;
}
return 1;
}

View File

@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
}
/* svm.c */
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
extern int sev;

View File

@ -6860,11 +6860,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
switch (index) {
case MSR_IA32_TSX_CTRL:
/*
* No need to pass TSX_CTRL_CPUID_CLEAR through, so
* let's avoid changing CPUID bits under the host
* kernel's feet.
* TSX_CTRL_CPUID_CLEAR is handled in the CPUID
* interception. Keep the host value unchanged to avoid
* changing CPUID bits under the host kernel's feet.
*
* hle=0, rtm=0, tsx_ctrl=1 can be found with some
* combinations of new kernel and old userspace. If
* those guests run on a tsx=off host, do allow guests
* to use TSX_CTRL, but do not change the value on the
* host so that TSX remains always disabled.
*/
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
if (boot_cpu_has(X86_FEATURE_RTM))
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
else
vmx->guest_uret_msrs[j].mask = 0;
break;
default:
vmx->guest_uret_msrs[j].mask = -1ull;

View File

@ -1394,16 +1394,24 @@ static u64 kvm_get_arch_capabilities(void)
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
/*
* On TAA affected systems:
* - nothing to do if TSX is disabled on the host.
* - we emulate TSX_CTRL if present on the host.
* This lets the guest use VERW to clear CPU buffers.
*/
if (!boot_cpu_has(X86_FEATURE_RTM))
data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
else if (!boot_cpu_has_bug(X86_BUG_TAA))
if (!boot_cpu_has(X86_FEATURE_RTM)) {
/*
* If RTM=0 because the kernel has disabled TSX, the host might
* have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
* and therefore knows that there cannot be TAA) but keep
* TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
* and we want to allow migrating those guests to tsx=off hosts.
*/
data &= ~ARCH_CAP_TAA_NO;
} else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
data |= ARCH_CAP_TAA_NO;
} else {
/*
* Nothing to do here; we emulate TSX_CTRL if present on the
* host so the guest can choose between disabling TSX or
* using VERW to clear CPU buffers.
*/
}
return data;
}
@ -9616,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
*/
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false;
if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
return false;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code
@ -9993,6 +10003,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
@ -10494,7 +10505,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
return 0;
old_npages = slot->npages;
hva = 0;
hva = slot->userspace_addr;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {

View File

@ -425,6 +425,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
__reserved_bits |= X86_CR4_UMIP; \
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
__reserved_bits |= X86_CR4_VMXE; \
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
__reserved_bits |= X86_CR4_PCIDE; \
__reserved_bits; \
})

View File

@ -382,6 +382,7 @@ bool sev_active(void)
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void)

View File

@ -115,31 +115,12 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
/*
* We can share all PGD entries apart from the one entry that
* covers the EFI runtime mapping space.
*
* Make sure the EFI runtime region mappings are guaranteed to
* only span a single PGD entry and that the entry also maps
* other important kernel regions.
*/
MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
(EFI_VA_END & PGDIR_MASK));
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
pgd_k = pgd_offset_k(PAGE_OFFSET);
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
/*
* As with PGDs, we share all P4D entries apart from the one entry
* that covers the EFI runtime mapping space.
*/
BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
pgd_k = pgd_offset_k(EFI_VA_END);
p4d_efi = p4d_offset(pgd_efi, 0);

View File

@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)

Some files were not shown because too many files have changed in this diff Show More