1
0
Fork 0

Merge 4.13-rc2 into char-misc-next

We want the char/misc driver fixes in here as well to handle future
changes.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
zero-colors
Greg Kroah-Hartman 2017-07-23 19:58:30 -07:00
commit 24a81a2c25
449 changed files with 11822 additions and 4476 deletions

View File

@ -11,6 +11,7 @@ Required properties:
- reg-names: Names of the registers. - reg-names: Names of the registers.
"amac_base": Address and length of the GMAC registers "amac_base": Address and length of the GMAC registers
"idm_base": Address and length of the GMAC IDM registers "idm_base": Address and length of the GMAC IDM registers
(required for NSP and Northstar2)
"nicpm_base": Address and length of the NIC Port Manager "nicpm_base": Address and length of the NIC Port Manager
registers (required for Northstar2) registers (required for Northstar2)
- interrupts: Interrupt number - interrupts: Interrupt number

View File

@ -1,24 +0,0 @@
Broadcom GMAC Ethernet Controller Device Tree Bindings
-------------------------------------------------------------
Required properties:
- compatible: "brcm,bgmac-nsp"
- reg: Address and length of the GMAC registers,
Address and length of the GMAC IDM registers
- reg-names: Names of the registers. Must have both "gmac_base" and
"idm_base"
- interrupts: Interrupt number
Optional properties:
- mac-address: See ethernet.txt file in the same directory
Examples:
gmac0: ethernet@18022000 {
compatible = "brcm,bgmac-nsp";
reg = <0x18022000 0x1000>,
<0x18110000 0x1000>;
reg-names = "gmac_base", "idm_base";
interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};

View File

@ -4,7 +4,7 @@ Required properties:
- compatible: Should be one of the following. - compatible: Should be one of the following.
- "rockchip,rk3066a-efuse" - for RK3066a SoCs. - "rockchip,rk3066a-efuse" - for RK3066a SoCs.
- "rockchip,rk3188-efuse" - for RK3188 SoCs. - "rockchip,rk3188-efuse" - for RK3188 SoCs.
- "rockchip,rk322x-efuse" - for RK322x SoCs. - "rockchip,rk3228-efuse" - for RK3228 SoCs.
- "rockchip,rk3288-efuse" - for RK3288 SoCs. - "rockchip,rk3288-efuse" - for RK3288 SoCs.
- "rockchip,rk3399-efuse" - for RK3399 SoCs. - "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size - reg: Should contain the registers location and exact eFuse size

View File

@ -1,13 +1,20 @@
* Broadcom Digital Timing Engine(DTE) based PTP clock driver * Broadcom Digital Timing Engine(DTE) based PTP clock
Required properties: Required properties:
- compatible: should be "brcm,ptp-dte" - compatible: should contain the core compatibility string
and the SoC compatibility string. The SoC
compatibility string is to handle SoC specific
hardware differences.
Core compatibility string:
"brcm,ptp-dte"
SoC compatibility strings:
"brcm,iproc-ptp-dte" - for iproc based SoC's
- reg: address and length of the DTE block's NCO registers - reg: address and length of the DTE block's NCO registers
Example: Example:
ptp_dte: ptp_dte@180af650 { ptp: ptp-dte@180af650 {
compatible = "brcm,ptp-dte"; compatible = "brcm,iproc-ptp-dte", "brcm,ptp-dte";
reg = <0x180af650 0x10>; reg = <0x180af650 0x10>;
status = "okay"; status = "okay";
}; };

View File

@ -9,7 +9,6 @@ Optional properties:
- fsl,irda-mode : Indicate the uart supports irda mode - fsl,irda-mode : Indicate the uart supports irda mode
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works - fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
in DCE mode by default. in DCE mode by default.
- fsl,dma-size : Indicate the size of the DMA buffer and its periods
Please check Documentation/devicetree/bindings/serial/serial.txt Please check Documentation/devicetree/bindings/serial/serial.txt
for the complete list of generic properties. for the complete list of generic properties.
@ -29,5 +28,4 @@ uart1: serial@73fbc000 {
interrupts = <31>; interrupts = <31>;
uart-has-rtscts; uart-has-rtscts;
fsl,dte-mode; fsl,dte-mode;
fsl,dma-size = <1024 4>;
}; };

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 13 PATCHLEVEL = 13
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -100,7 +100,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG 0x5453 #define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454 #define TIOCSERGWILD 0x5454

View File

@ -37,7 +37,7 @@ do { \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \ "2:\t.asciz " #__file "\n" \
".popsection\n" \ ".popsection\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
".align 2\n" \ ".align 2\n" \
"3:\t.word 1b, 2b\n" \ "3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \ "\t.hword " #__line ", 0\n" \

View File

@ -116,7 +116,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *);
}; } __no_randomize_layout;
/* /*
* Select the calling method * Select the calling method

View File

@ -36,7 +36,7 @@
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
#define __BUG_ENTRY(flags) \ #define __BUG_ENTRY(flags) \
".pushsection __bug_table,\"a\"\n\t" \ ".pushsection __bug_table,\"aw\"\n\t" \
".align 2\n\t" \ ".align 2\n\t" \
"0: .long 1f - 0b\n\t" \ "0: .long 1f - 0b\n\t" \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \

View File

@ -21,7 +21,7 @@
#define _BUG_OR_WARN(flags) \ #define _BUG_OR_WARN(flags) \
asm volatile( \ asm volatile( \
"1: .hword %0\n" \ "1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \ "2: .long 1b\n" \
" .long %1\n" \ " .long %1\n" \
" .short %2\n" \ " .short %2\n" \
@ -38,7 +38,7 @@
#define _BUG_OR_WARN(flags) \ #define _BUG_OR_WARN(flags) \
asm volatile( \ asm volatile( \
"1: .hword %0\n" \ "1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \ "2: .long 1b\n" \
" .short %1\n" \ " .short %1\n" \
" .org 2b + %2\n" \ " .org 2b + %2\n" \

View File

@ -44,8 +44,7 @@ flat_get_relocate_addr (unsigned long relval)
return relval & 0x03ffffff; /* Mask out top 6 bits */ return relval & 0x03ffffff; /* Mask out top 6 bits */
} }
static inline int flat_set_persistent(unsigned long relval, static inline int flat_set_persistent(u32 relval, u32 *persistent)
unsigned long *persistent)
{ {
int type = (relval >> 26) & 7; int type = (relval >> 26) & 7;
if (type == 3) { if (type == 3) {

View File

@ -32,7 +32,7 @@ unsigned long bfin_get_addr_from_rp(u32 *ptr,
break; break;
case FLAT_BFIN_RELOC_TYPE_32_BIT: case FLAT_BFIN_RELOC_TYPE_32_BIT:
pr_debug("*ptr = %lx", get_unaligned(ptr)); pr_debug("*ptr = %x", get_unaligned(ptr));
val = get_unaligned(ptr); val = get_unaligned(ptr);
break; break;
@ -77,7 +77,7 @@ void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval)
case FLAT_BFIN_RELOC_TYPE_32_BIT: case FLAT_BFIN_RELOC_TYPE_32_BIT:
put_unaligned(addr, ptr); put_unaligned(addr, ptr);
pr_debug("new ptr =%lx", get_unaligned(ptr)); pr_debug("new ptr =%x", get_unaligned(ptr));
break; break;
} }
} }

View File

@ -24,7 +24,7 @@ static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
u32 *addr, u32 *persistent) u32 *addr, u32 *persistent)
{ {
u32 val = get_unaligned((__force u32 *)rp); u32 val = get_unaligned((__force u32 *)rp);
if (!(flags & FLAT_FLAG_GOTPIC) if (!(flags & FLAT_FLAG_GOTPIC))
val &= 0x00ffffff; val &= 0x00ffffff;
*addr = val; *addr = val;
return 0; return 0;

View File

@ -30,8 +30,7 @@ static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
} }
#define flat_get_relocate_addr(rel) (rel) #define flat_get_relocate_addr(rel) (rel)
static inline int flat_set_persistent(unsigned long relval, static inline int flat_set_persistent(u32 relval, u32 *persistent)
unsigned long *persistent)
{ {
return 0; return 0;
} }

View File

@ -91,7 +91,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
/* I hope the range from 0x5480 on is free ... */ /* I hope the range from 0x5480 on is free ... */
#define TIOCSCTTY 0x5480 /* become controlling tty */ #define TIOCSCTTY 0x5480 /* become controlling tty */

View File

@ -21,7 +21,7 @@ do { \
asm volatile( \ asm volatile( \
" syscall 15 \n" \ " syscall 15 \n" \
"0: \n" \ "0: \n" \
" .section __bug_table,\"a\" \n" \ " .section __bug_table,\"aw\" \n" \
" .long 0b,%0,%1 \n" \ " .long 0b,%0,%1 \n" \
" .previous \n" \ " .previous \n" \
: \ : \

View File

@ -27,7 +27,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \ "\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \ "\t.org 2b+%c3\n" \
@ -50,7 +50,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \ "\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \ "\t.org 2b+%c3\n" \
@ -64,7 +64,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \ "2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \ "\t.short %c0\n" \
"\t.org 2b+%c1\n" \ "\t.org 2b+%c1\n" \

View File

@ -60,7 +60,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451 #define FIOCLEX 0x5451

View File

@ -91,6 +91,7 @@ static inline int hash__pgd_bad(pgd_t pgd)
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
extern void hash__mark_rodata_ro(void); extern void hash__mark_rodata_ro(void);
extern void hash__mark_initmem_nx(void);
#endif #endif
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,

View File

@ -1192,5 +1192,6 @@ static inline const int pud_pfn(pud_t pud)
BUILD_BUG(); BUILD_BUG();
return 0; return 0;
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */

View File

@ -118,6 +118,7 @@
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
extern void radix__mark_rodata_ro(void); extern void radix__mark_rodata_ro(void);
extern void radix__mark_initmem_nx(void);
#endif #endif
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,

View File

@ -18,7 +18,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags .macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a" .section __bug_table,"aw"
5001: PPC_LONG \addr, 5002f 5001: PPC_LONG \addr, 5002f
.short \line, \flags .short \line, \flags
.org 5001b+BUG_ENTRY_SIZE .org 5001b+BUG_ENTRY_SIZE
@ -29,7 +29,7 @@
.endm .endm
#else #else
.macro EMIT_BUG_ENTRY addr,file,line,flags .macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a" .section __bug_table,"aw"
5001: PPC_LONG \addr 5001: PPC_LONG \addr
.short \flags .short \flags
.org 5001b+BUG_ENTRY_SIZE .org 5001b+BUG_ENTRY_SIZE
@ -42,14 +42,14 @@
sizeof(struct bug_entry), respectively */ sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \ "2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \ "\t.short %1, %2\n" \
".org 2b+%3\n" \ ".org 2b+%3\n" \
".previous\n" ".previous\n"
#else #else
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b\n" \ "2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \ "\t.short %2\n" \
".org 2b+%3\n" \ ".org 2b+%3\n" \

View File

@ -80,6 +80,13 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void); void pgtable_cache_init(void);
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_initmem_nx(void);
#else
static inline void mark_initmem_nx(void) { }
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */ #endif /* _ASM_POWERPC_PGTABLE_H */

View File

@ -100,7 +100,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG 0x5453 #define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454 #define TIOCSERGWILD 0x5454

View File

@ -824,7 +824,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* r3 volatile parameter and return value for status * r3 volatile parameter and return value for status
* r4-r10 volatile input and output value * r4-r10 volatile input and output value
* r11 volatile hypercall number and output value * r11 volatile hypercall number and output value
* r12 volatile * r12 volatile input and output value
* r13-r31 nonvolatile * r13-r31 nonvolatile
* LR nonvolatile * LR nonvolatile
* CTR volatile * CTR volatile
@ -834,25 +834,26 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* Other registers nonvolatile * Other registers nonvolatile
* *
* The intersection of volatile registers that don't contain possible * The intersection of volatile registers that don't contain possible
* inputs is: r12, cr0, xer, ctr. We may use these as scratch regs * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
* upon entry without saving. * without saving, though xer is not a good idea to use, as hardware may
* interpret some bits so it may be costly to change them.
*/ */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/* /*
* There is a little bit of juggling to get syscall and hcall * There is a little bit of juggling to get syscall and hcall
* working well. Save r10 in ctr to be restored in case it is a * working well. Save r13 in ctr to avoid using SPRG scratch
* hcall. * register.
* *
* Userspace syscalls have already saved the PPR, hcalls must save * Userspace syscalls have already saved the PPR, hcalls must save
* it before setting HMT_MEDIUM. * it before setting HMT_MEDIUM.
*/ */
#define SYSCALL_KVMTEST \ #define SYSCALL_KVMTEST \
mr r12,r13; \ mtctr r13; \
GET_PACA(r13); \ GET_PACA(r13); \
mtctr r10; \ std r10,PACA_EXGEN+EX_R10(r13); \
KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
HMT_MEDIUM; \ HMT_MEDIUM; \
mr r9,r12; \ mfctr r9;
#else #else
#define SYSCALL_KVMTEST \ #define SYSCALL_KVMTEST \
@ -935,8 +936,8 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100)
* This is a hcall, so register convention is as above, with these * This is a hcall, so register convention is as above, with these
* differences: * differences:
* r13 = PACA * r13 = PACA
* r12 = orig r13 * ctr = orig r13
* ctr = orig r10 * orig r10 saved in PACA
*/ */
TRAMP_KVM_BEGIN(do_kvm_0xc00) TRAMP_KVM_BEGIN(do_kvm_0xc00)
/* /*
@ -944,14 +945,13 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00)
* HMT_MEDIUM. That allows the KVM code to save that value into the * HMT_MEDIUM. That allows the KVM code to save that value into the
* guest state (it is the guest's PPR value). * guest state (it is the guest's PPR value).
*/ */
OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR) OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
HMT_MEDIUM HMT_MEDIUM
OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR) OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
mfctr r10 mfctr r10
SET_SCRATCH0(r12) SET_SCRATCH0(r10)
std r9,PACA_EXGEN+EX_R9(r13) std r9,PACA_EXGEN+EX_R9(r13)
mfcr r9 mfcr r9
std r10,PACA_EXGEN+EX_R10(r13)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
#endif #endif

View File

@ -30,6 +30,7 @@
* Use unused space in the interrupt stack to save and restore * Use unused space in the interrupt stack to save and restore
* registers for winkle support. * registers for winkle support.
*/ */
#define _MMCR0 GPR0
#define _SDR1 GPR3 #define _SDR1 GPR3
#define _PTCR GPR3 #define _PTCR GPR3
#define _RPR GPR4 #define _RPR GPR4
@ -272,6 +273,14 @@ power_enter_stop:
b pnv_wakeup_noloss b pnv_wakeup_noloss
.Lhandle_esl_ec_set: .Lhandle_esl_ec_set:
/*
* POWER9 DD2 can incorrectly set PMAO when waking up after a
* state-loss idle. Saving and restoring MMCR0 over idle is a
* workaround.
*/
mfspr r4,SPRN_MMCR0
std r4,_MMCR0(r1)
/* /*
* Check if the requested state is a deep idle state. * Check if the requested state is a deep idle state.
*/ */
@ -450,10 +459,14 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
pnv_restore_hyp_resource_arch300: pnv_restore_hyp_resource_arch300:
/* /*
* Workaround for POWER9, if we lost resources, the ERAT * Workaround for POWER9, if we lost resources, the ERAT
* might have been mixed up and needs flushing. * might have been mixed up and needs flushing. We also need
* to reload MMCR0 (see comment above).
*/ */
blt cr3,1f blt cr3,1f
PPC_INVALIDATE_ERAT PPC_INVALIDATE_ERAT
ld r1,PACAR1(r13)
ld r4,_MMCR0(r1)
mtspr SPRN_MMCR0,r4
1: 1:
/* /*
* POWER ISA 3. Use PSSCR to determine if we * POWER ISA 3. Use PSSCR to determine if we

View File

@ -402,6 +402,7 @@ void __init mem_init(void)
void free_initmem(void) void free_initmem(void)
{ {
ppc_md.progress = ppc_printk_progress; ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }

View File

@ -425,33 +425,51 @@ int hash__has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
void hash__mark_rodata_ro(void) static bool hash__change_memory_range(unsigned long start, unsigned long end,
unsigned long newpp)
{ {
unsigned long start = (unsigned long)_stext;
unsigned long end = (unsigned long)__init_begin;
unsigned long idx; unsigned long idx;
unsigned int step, shift; unsigned int step, shift;
unsigned long newpp = PP_RXXX;
shift = mmu_psize_defs[mmu_linear_psize].shift; shift = mmu_psize_defs[mmu_linear_psize].shift;
step = 1 << shift; step = 1 << shift;
start = ((start + step - 1) >> shift) << shift; start = ALIGN_DOWN(start, step);
end = (end >> shift) << shift; end = ALIGN(end, step); // aligns up
pr_devel("marking ro start %lx, end %lx, step %x\n", if (start >= end)
start, end, step); return false;
if (start == end) { pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
pr_warn("could not set rodata ro, relocate the start" start, end, newpp, step);
" of the kernel to a 0x%x boundary\n", step);
return;
}
for (idx = start; idx < end; idx += step) for (idx = start; idx < end; idx += step)
/* Not sure if we can do much with the return value */ /* Not sure if we can do much with the return value */
mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
mmu_kernel_ssize); mmu_kernel_ssize);
return true;
}
void hash__mark_rodata_ro(void)
{
unsigned long start, end;
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
}
void hash__mark_initmem_nx(void)
{
unsigned long start, end, pp;
start = (unsigned long)__init_begin;
end = (unsigned long)__init_end;
pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
WARN_ON(!hash__change_memory_range(start, end, pp));
} }
#endif #endif

View File

@ -112,10 +112,9 @@ set_the_pte:
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
void radix__mark_rodata_ro(void) void radix__change_memory_range(unsigned long start, unsigned long end,
unsigned long clear)
{ {
unsigned long start = (unsigned long)_stext;
unsigned long end = (unsigned long)__init_begin;
unsigned long idx; unsigned long idx;
pgd_t *pgdp; pgd_t *pgdp;
pud_t *pudp; pud_t *pudp;
@ -125,7 +124,8 @@ void radix__mark_rodata_ro(void)
start = ALIGN_DOWN(start, PAGE_SIZE); start = ALIGN_DOWN(start, PAGE_SIZE);
end = PAGE_ALIGN(end); // aligns up end = PAGE_ALIGN(end); // aligns up
pr_devel("marking ro start %lx, end %lx\n", start, end); pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
start, end, clear);
for (idx = start; idx < end; idx += PAGE_SIZE) { for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx); pgdp = pgd_offset_k(idx);
@ -147,11 +147,29 @@ void radix__mark_rodata_ro(void)
if (!ptep) if (!ptep)
continue; continue;
update_the_pte: update_the_pte:
radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0); radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
} }
radix__flush_tlb_kernel_range(start, end); radix__flush_tlb_kernel_range(start, end);
} }
void radix__mark_rodata_ro(void)
{
unsigned long start, end;
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
radix__change_memory_range(start, end, _PAGE_WRITE);
}
void radix__mark_initmem_nx(void)
{
unsigned long start = (unsigned long)__init_begin;
unsigned long end = (unsigned long)__init_end;
radix__change_memory_range(start, end, _PAGE_EXEC);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */ #endif /* CONFIG_STRICT_KERNEL_RWX */
static inline void __meminit print_mapping(unsigned long start, static inline void __meminit print_mapping(unsigned long start,

View File

@ -505,4 +505,12 @@ void mark_rodata_ro(void)
else else
hash__mark_rodata_ro(); hash__mark_rodata_ro();
} }
void mark_initmem_nx(void)
{
if (radix_enabled())
radix__mark_initmem_nx();
else
hash__mark_initmem_nx();
}
#endif #endif

View File

@ -78,7 +78,7 @@ void opal_configure_cores(void)
* ie. Host hash supports hash guests * ie. Host hash supports hash guests
* Host radix supports hash/radix guests * Host radix supports hash/radix guests
*/ */
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH; reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
if (early_radix_enabled()) if (early_radix_enabled())
reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX; reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;

View File

@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \ ".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \ "2: .asciz \""__FILE__"\"\n" \
".previous\n" \ ".previous\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"3: .long 1b-3b,2b-3b\n" \ "3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \ " .short %0,%1\n" \
" .org 3b+%2\n" \ " .org 3b+%2\n" \
@ -30,7 +30,7 @@
asm volatile( \ asm volatile( \
"0: j 0b+2\n" \ "0: j 0b+2\n" \
"1:\n" \ "1:\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2: .long 1b-2b\n" \ "2: .long 1b-2b\n" \
" .short %0\n" \ " .short %0\n" \
" .org 2b+%1\n" \ " .org 2b+%1\n" \

View File

@ -24,14 +24,14 @@
*/ */
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b, %O1\n" \ "2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \ "\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \ "\t.org 2b+%O4\n" \
"\t.popsection\n" "\t.popsection\n"
#else #else
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b\n" \ "2:\t.long 1b\n" \
"\t.short %O3\n" \ "\t.short %O3\n" \
"\t.org 2b+%O4\n" \ "\t.org 2b+%O4\n" \

View File

@ -93,7 +93,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */
#define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */

View File

@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
void init_cur_cpu_trap(struct thread_info *); void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void); void setup_tba(void);
extern int ncpus_probed; extern int ncpus_probed;
extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void); unsigned long real_hard_smp_processor_id(void);

View File

@ -88,7 +88,7 @@
#define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */ #define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */
#define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */ #define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */
#define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */ #define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */
#define TIOCGPTPEER _IOR('t', 137, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('t', 137) /* Safely open the slave */
/* Little f */ /* Little f */
#define FIOCLEX _IO('f', 1) #define FIOCLEX _IO('f', 1)

View File

@ -673,12 +673,14 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int dma_4v_supported(struct device *dev, u64 device_mask) static int dma_4v_supported(struct device *dev, u64 device_mask)
{ {
struct iommu *iommu = dev->archdata.iommu; struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask; u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask > DMA_BIT_MASK(32) && iommu->atu) if (device_mask > DMA_BIT_MASK(32)) {
dma_addr_mask = iommu->atu->dma_addr_mask; if (iommu->atu)
else dma_addr_mask = iommu->atu->dma_addr_mask;
dma_addr_mask = iommu->dma_addr_mask; else
return 0;
}
if ((device_mask & dma_addr_mask) == dma_addr_mask) if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1; return 1;

View File

@ -622,22 +622,48 @@ retry:
} }
} }
/* Multi-cpu list version. */ #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
#define MONDO_USEC_WAIT_MIN 2
#define MONDO_USEC_WAIT_MAX 100
#define MONDO_RETRY_LIMIT 500000
/* Multi-cpu list version.
*
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
* Sometimes not all cpus receive the mondo, requiring us to re-send
* the mondo until all cpus have received, or cpus are truly stuck
* unable to receive mondo, and we timeout.
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
* perform guest service, such as PCIe error handling. Consider the
* service time, 1 second overall wait is reasonable for 1 cpu.
* Here two in-between mondo check wait time are defined: 2 usec for
* single cpu quick turn around and up to 100usec for large cpu count.
* Deliver mondo to large number of cpus could take longer, we adjusts
* the retry count as long as target cpus are making forward progress.
*/
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{ {
int retries, this_cpu, prev_sent, i, saw_cpu_error; int this_cpu, tot_cpus, prev_sent, i, rem;
int usec_wait, retries, tot_retries;
u16 first_cpu = 0xffff;
unsigned long xc_rcvd = 0;
unsigned long status; unsigned long status;
int ecpuerror_id = 0;
int enocpu_id = 0;
u16 *cpu_list; u16 *cpu_list;
u16 cpu;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpu_list = __va(tb->cpu_list_pa); cpu_list = __va(tb->cpu_list_pa);
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
saw_cpu_error = 0; if (usec_wait > MONDO_USEC_WAIT_MAX)
retries = 0; usec_wait = MONDO_USEC_WAIT_MAX;
retries = tot_retries = 0;
tot_cpus = cnt;
prev_sent = 0; prev_sent = 0;
do { do {
int forward_progress, n_sent; int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt, status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa, tb->cpu_list_pa,
@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */ /* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK)) if (likely(status == HV_EOK))
break; goto xcall_done;
/* If not these non-fatal errors, panic */
if (unlikely((status != HV_EWOULDBLOCK) &&
(status != HV_ECPUERROR) &&
(status != HV_ENOCPU)))
goto fatal_errors;
/* First, see if we made any forward progress. /* First, see if we made any forward progress.
*
* Go through the cpu_list, count the target cpus that have
* received our mondo (n_sent), and those that did not (rem).
* Re-pack cpu_list with the cpus remain to be retried in the
* front - this simplifies tracking the truly stalled cpus.
* *
* The hypervisor indicates successful sends by setting * The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff. * cpu list entries to the value 0xffff.
*
* EWOULDBLOCK means some target cpus did not receive the
* mondo and retry usually helps.
*
* ECPUERROR means at least one target cpu is in error state,
* it's usually safe to skip the faulty cpu and retry.
*
* ENOCPU means one of the target cpu doesn't belong to the
* domain, perhaps offlined which is unexpected, but not
* fatal and it's okay to skip the offlined cpu.
*/ */
rem = 0;
n_sent = 0; n_sent = 0;
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
if (likely(cpu_list[i] == 0xffff)) cpu = cpu_list[i];
if (likely(cpu == 0xffff)) {
n_sent++; n_sent++;
} else if ((status == HV_ECPUERROR) &&
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
ecpuerror_id = cpu + 1;
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
enocpu_id = cpu + 1;
} else {
cpu_list[rem++] = cpu;
}
} }
forward_progress = 0; /* No cpu remained, we're done. */
if (n_sent > prev_sent) if (rem == 0)
forward_progress = 1; break;
/* Otherwise, update the cpu count for retry. */
cnt = rem;
/* Record the overall number of mondos received by the
* first of the remaining cpus.
*/
if (first_cpu != cpu_list[0]) {
first_cpu = cpu_list[0];
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
}
/* Was any mondo delivered successfully? */
mondo_delivered = (n_sent > prev_sent);
prev_sent = n_sent; prev_sent = n_sent;
/* If we get a HV_ECPUERROR, then one or more of the cpus /* or, was any target cpu busy processing other mondos? */
* in the list are in error state. Use the cpu_state() target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
* hypervisor call to find out which cpus are in error state. xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
/* Retry count is for no progress. If we're making progress,
* reset the retry count.
*/ */
if (unlikely(status == HV_ECPUERROR)) { if (likely(mondo_delivered || target_cpu_busy)) {
for (i = 0; i < cnt; i++) { tot_retries += retries;
long err; retries = 0;
u16 cpu; } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
goto fatal_mondo_timeout;
cpu = cpu_list[i];
if (cpu == 0xffff)
continue;
err = sun4v_cpu_state(cpu);
if (err == HV_CPU_STATE_ERROR) {
saw_cpu_error = (cpu + 1);
cpu_list[i] = 0xffff;
}
}
} else if (unlikely(status != HV_EWOULDBLOCK))
goto fatal_mondo_error;
/* Don't bother rewriting the CPU list, just leave the
* 0xffff and non-0xffff entries in there and the
* hypervisor will do the right thing.
*
* Only advance timeout state if we didn't make any
* forward progress.
*/
if (unlikely(!forward_progress)) {
if (unlikely(++retries > 10000))
goto fatal_mondo_timeout;
/* Delay a little bit to let other cpus catch up
* on their cpu mondo queue work.
*/
udelay(2 * cnt);
} }
/* Delay a little bit to let other cpus catch up on
* their cpu mondo queue work.
*/
if (!mondo_delivered)
udelay(usec_wait);
retries++;
} while (1); } while (1);
if (unlikely(saw_cpu_error)) xcall_done:
goto fatal_mondo_cpu_error; if (unlikely(ecpuerror_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
this_cpu, ecpuerror_id - 1);
} else if (unlikely(enocpu_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
this_cpu, enocpu_id - 1);
}
return; return;
fatal_mondo_cpu_error: fatal_errors:
printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " /* fatal errors include bad alignment, etc */
"(including %d) were in error state\n", pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
this_cpu, saw_cpu_error - 1); this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
return; panic("Unexpected SUN4V mondo error %lu\n", status);
fatal_mondo_timeout: fatal_mondo_timeout:
printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " /* some cpus being non-responsive to the cpu mondo */
" progress after %d retries.\n", pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
this_cpu, retries); this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
goto dump_cpu_list_and_out; panic("SUN4V mondo timeout panic\n");
fatal_mondo_error:
printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
this_cpu, status);
printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
"mondo_block_pa(%lx)\n",
this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
dump_cpu_list_and_out:
printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
for (i = 0; i < cnt; i++)
printk("%u ", cpu_list[i]);
printk("]\n");
} }
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);

View File

@ -26,6 +26,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4 ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get smp_processor_id() into %g3 */
sethi %hi(trap_block), %g5
or %g5, %lo(trap_block), %g5
sub %g4, %g5, %g3
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
/* Increment cpu_mondo_counter[smp_processor_id()] */
sethi %hi(cpu_mondo_counter), %g5
or %g5, %lo(cpu_mondo_counter), %g5
sllx %g3, 3, %g3
add %g5, %g3, %g5
ldx [%g5], %g3
add %g3, 1, %g3
stx %g3, [%g5]
/* Get CPU mondo queue base phys address into %g7. */ /* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7

View File

@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
} }
} }
u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS]; struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block); EXPORT_SYMBOL(trap_block);

View File

@ -348,6 +348,7 @@ config X86_DEBUG_FPU
config PUNIT_ATOM_DEBUG config PUNIT_ATOM_DEBUG
tristate "ATOM Punit debug driver" tristate "ATOM Punit debug driver"
depends on PCI
select DEBUG_FS select DEBUG_FS
select IOSF_MBI select IOSF_MBI
---help--- ---help---

View File

@ -73,12 +73,13 @@ UBSAN_SANITIZE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@ quiet_cmd_image = BUILD $@
silent_redirect_image = >/dev/null
cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \ cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
$(obj)/zoffset.h $@ $(obj)/zoffset.h $@ $($(quiet)redirect_image)
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image) $(call if_changed,image)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')' @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE

View File

@ -1,5 +1,4 @@
# CONFIG_64BIT is not set # CONFIG_64BIT is not set
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
@ -125,7 +124,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_MANGLE=y
@ -255,7 +253,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE=y
CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y CONFIG_EDAC=y
CONFIG_RTC_CLASS=y CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set # CONFIG_RTC_HCTOSYS is not set

View File

@ -1,4 +1,3 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
@ -124,7 +123,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_MANGLE=y
@ -251,7 +249,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE=y
CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y CONFIG_EDAC=y
CONFIG_RTC_CLASS=y CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set # CONFIG_RTC_HCTOSYS is not set

View File

@ -191,8 +191,8 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void) static bool check_hw_exists(void)
{ {
u64 val, val_fail, val_new= ~0; u64 val, val_fail = -1, val_new= ~0;
int i, reg, reg_fail, ret = 0; int i, reg, reg_fail = -1, ret = 0;
int bios_fail = 0; int bios_fail = 0;
int reg_safe = -1; int reg_safe = -1;

View File

@ -1708,6 +1708,120 @@ static __initconst const u64 glm_hw_cache_extra_regs
}, },
}; };
static __initconst const u64 glp_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[C(RESULT_MISS)] = 0x0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[C(RESULT_MISS)] = 0x0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
[C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
[C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
[C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
[C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
static __initconst const u64 glp_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = GLM_DEMAND_READ|
GLM_LLC_ACCESS,
[C(RESULT_MISS)] = GLM_DEMAND_READ|
GLM_LLC_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
GLM_LLC_ACCESS,
[C(RESULT_MISS)] = GLM_DEMAND_WRITE|
GLM_LLC_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
};
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21) #define KNL_MCDRAM_LOCAL BIT_ULL(21)
@ -3016,6 +3130,9 @@ static int hsw_hw_config(struct perf_event *event)
return 0; return 0;
} }
static struct event_constraint counter0_constraint =
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
static struct event_constraint counter2_constraint = static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0); EVENT_CONSTRAINT(0, 0x4, 0);
@ -3037,6 +3154,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c; return c;
} }
static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
struct event_constraint *c;
/* :ppp means to do reduced skid PEBS which is PMC0 only. */
if (event->attr.precise_ip == 3)
return &counter0_constraint;
c = intel_get_event_constraints(cpuc, idx, event);
return c;
}
/* /*
* Broadwell: * Broadwell:
* *
@ -3265,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu)
static void intel_pmu_sched_task(struct perf_event_context *ctx, static void intel_pmu_sched_task(struct perf_event_context *ctx,
bool sched_in) bool sched_in)
{ {
if (x86_pmu.pebs_active) intel_pmu_pebs_sched_task(ctx, sched_in);
intel_pmu_pebs_sched_task(ctx, sched_in); intel_pmu_lbr_sched_task(ctx, sched_in);
if (x86_pmu.lbr_nr)
intel_pmu_lbr_sched_task(ctx, sched_in);
} }
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@ -3838,6 +3968,32 @@ __init int intel_pmu_init(void)
pr_cont("Goldmont events, "); pr_cont("Goldmont events, ");
break; break;
case INTEL_FAM6_ATOM_GEMINI_LAKE:
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_skl();
x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
x86_pmu.extra_regs = intel_glm_extra_regs;
/*
* It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
* for precise cycles.
*/
x86_pmu.pebs_aliases = NULL;
x86_pmu.pebs_prec_dist = true;
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
event_attr_td_total_slots_scale_glm.event_str = "4";
pr_cont("Goldmont plus events, ");
break;
case INTEL_FAM6_WESTMERE: case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP: case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX: case INTEL_FAM6_WESTMERE_EX:

View File

@ -40,16 +40,16 @@
* Model specific counters: * Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter * MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00 * perf code: 0x00
* Available model: SLM,AMT * Available model: SLM,AMT,GLM
* Scope: Core (each processor core has a MSR) * Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01 * perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
* Scope: Core * Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL * SKL,KNL,GLM
* Scope: Core * Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03 * perf code: 0x03
@ -57,16 +57,17 @@
* Scope: Core * Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00 * perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01 * perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* GLM
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02 * perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL * SKL,KNL,GLM
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03 * perf code: 0x03
@ -82,7 +83,7 @@
* Scope: Package (physical package) * Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06 * perf code: 0x06
* Available model: HSW ULT only * Available model: HSW ULT, GLM
* Scope: Package (physical package) * Scope: Package (physical package)
* *
*/ */
@ -504,6 +505,17 @@ static const struct cstate_model knl_cstates __initconst = {
}; };
static const struct cstate_model glm_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C3_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C3_RES) |
BIT(PERF_CSTATE_PKG_C6_RES) |
BIT(PERF_CSTATE_PKG_C10_RES),
};
#define X86_CSTATES_MODEL(model, states) \ #define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@ -546,6 +558,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
{ }, { },
}; };
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);

View File

@ -606,12 +606,6 @@ static inline void intel_pmu_drain_pebs_buffer(void)
x86_pmu.drain_pebs(&regs); x86_pmu.drain_pebs(&regs);
} }
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
if (!sched_in)
intel_pmu_drain_pebs_buffer();
}
/* /*
* PEBS * PEBS
*/ */
@ -651,6 +645,12 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
struct event_constraint intel_glp_pebs_event_constraints[] = {
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_nehalem_pebs_event_constraints[] = { struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@ -816,6 +816,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
} }
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!sched_in && pebs_needs_sched_cb(cpuc))
intel_pmu_drain_pebs_buffer();
}
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{ {
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
@ -889,6 +897,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
ds->pebs_event_reset[hwc->idx] = ds->pebs_event_reset[hwc->idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask; (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[hwc->idx] = 0;
} }
} }

View File

@ -380,8 +380,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx; struct x86_perf_task_context *task_ctx;
if (!cpuc->lbr_users)
return;
/* /*
* If LBR callstack feature is enabled and the stack was saved when * If LBR callstack feature is enabled and the stack was saved when
* the task was scheduled out, restore the stack. Otherwise flush * the task was scheduled out, restore the stack. Otherwise flush

View File

@ -879,6 +879,8 @@ extern struct event_constraint intel_slm_pebs_event_constraints[];
extern struct event_constraint intel_glm_pebs_event_constraints[]; extern struct event_constraint intel_glm_pebs_event_constraints[];
extern struct event_constraint intel_glp_pebs_event_constraints[];
extern struct event_constraint intel_nehalem_pebs_event_constraints[]; extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[]; extern struct event_constraint intel_westmere_pebs_event_constraints[];

View File

@ -35,7 +35,7 @@
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("1:\t" ins "\n" \ asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \ "\t.word %c1" "\t# bug_entry::line\n" \
@ -52,7 +52,7 @@ do { \
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("1:\t" ins "\n" \ asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \ "\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \ "\t.org 2b+%c1\n" \

View File

@ -328,13 +328,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \ { \
asm volatile("rep; outs" #bwl \ asm volatile("rep; outs" #bwl \
: "+S"(addr), "+c"(count) : "d"(port)); \ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \ } \
\ \
static inline void ins##bwl(int port, void *addr, unsigned long count) \ static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \ { \
asm volatile("rep; ins" #bwl \ asm volatile("rep; ins" #bwl \
: "+D"(addr), "+c"(count) : "d"(port)); \ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
} }
BUILDIO(b, b, char) BUILDIO(b, b, char)

View File

@ -52,10 +52,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0) #define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */ /* optinsn template addresses */
extern __visible kprobe_opcode_t optprobe_template_entry; extern __visible kprobe_opcode_t optprobe_template_entry[];
extern __visible kprobe_opcode_t optprobe_template_val; extern __visible kprobe_opcode_t optprobe_template_val[];
extern __visible kprobe_opcode_t optprobe_template_call; extern __visible kprobe_opcode_t optprobe_template_call[];
extern __visible kprobe_opcode_t optprobe_template_end; extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \ #define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \ (((unsigned long)&optprobe_template_end - \

View File

@ -293,7 +293,7 @@ static inline unsigned long __get_current_cr3_fast(void)
unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
/* For now, be very restrictive about when this can be called. */ /* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || !in_atomic()); VM_WARN_ON(in_nmi() || preemptible());
VM_BUG_ON(cr3 != __read_cr3()); VM_BUG_ON(cr3 != __read_cr3());
return cr3; return cr3;

View File

@ -84,7 +84,7 @@ struct pv_init_ops {
*/ */
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len); unsigned long addr, unsigned len);
}; } __no_randomize_layout;
struct pv_lazy_ops { struct pv_lazy_ops {
@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void); void (*enter)(void);
void (*leave)(void); void (*leave)(void);
void (*flush)(void); void (*flush)(void);
}; } __no_randomize_layout;
struct pv_time_ops { struct pv_time_ops {
unsigned long long (*sched_clock)(void); unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu); unsigned long long (*steal_clock)(int cpu);
}; } __no_randomize_layout;
struct pv_cpu_ops { struct pv_cpu_ops {
/* hooks for various privileged instructions */ /* hooks for various privileged instructions */
@ -176,7 +176,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev); void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next); void (*end_context_switch)(struct task_struct *next);
}; } __no_randomize_layout;
struct pv_irq_ops { struct pv_irq_ops {
/* /*
@ -199,7 +199,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void); void (*adjust_exception_frame)(void);
#endif #endif
}; } __no_randomize_layout;
struct pv_mmu_ops { struct pv_mmu_ops {
unsigned long (*read_cr2)(void); unsigned long (*read_cr2)(void);
@ -305,7 +305,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */ an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags); phys_addr_t phys, pgprot_t flags);
}; } __no_randomize_layout;
struct arch_spinlock; struct arch_spinlock;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -322,7 +322,7 @@ struct pv_lock_ops {
void (*kick)(int cpu); void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted; struct paravirt_callee_save vcpu_is_preempted;
}; } __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient /* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate * number for each function using the offset which we use to indicate
@ -334,7 +334,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops; struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops; struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops; struct pv_lock_ops pv_lock_ops;
}; } __no_randomize_layout;
extern struct pv_info pv_info; extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops; extern struct pv_init_ops pv_init_ops;

View File

@ -129,7 +129,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */ /* Index into per_cpu list: */
u16 cpu_index; u16 cpu_index;
u32 microcode; u32 microcode;
}; } __randomize_layout;
struct cpuid_regs { struct cpuid_regs {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;

View File

@ -346,6 +346,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
int pin; int pin;
struct mpc_intsrc mp_irq; struct mpc_intsrc mp_irq;
/*
* Check bus_irq boundary.
*/
if (bus_irq >= NR_IRQS_LEGACY) {
pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
return;
}
/* /*
* Convert 'gsi' to 'ioapic.pin'. * Convert 'gsi' to 'ioapic.pin'.
*/ */

View File

@ -2093,7 +2093,7 @@ static inline void __init check_timer(void)
int idx; int idx;
idx = find_irq_entry(apic1, pin1, mp_INT); idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx)) if (idx != -1 && irq_trigger(idx))
unmask_ioapic_irq(irq_get_chip_data(0)); unmask_ioapic_irq(irq_get_irq_data(0));
} }
irq_domain_deactivate_irq(irq_data); irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data); irq_domain_activate_irq(irq_data);

View File

@ -134,6 +134,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
n = K6_BUG_LOOP; n = K6_BUG_LOOP;
f_vide = vide; f_vide = vide;
OPTIMIZER_HIDE_VAR(f_vide);
d = rdtsc(); d = rdtsc();
while (n--) while (n--)
f_vide(); f_vide();

View File

@ -235,8 +235,7 @@ static void __init dtb_add_ioapic(struct device_node *dn)
ret = of_address_to_resource(dn, 0, &r); ret = of_address_to_resource(dn, 0, &r);
if (ret) { if (ret) {
printk(KERN_ERR "Can't obtain address from node %s.\n", printk(KERN_ERR "Can't obtain address from device node %pOF.\n", dn);
dn->full_name);
return; return;
} }
mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg); mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg);

View File

@ -22,7 +22,7 @@ config KVM
depends on HAVE_KVM depends on HAVE_KVM
depends on HIGH_RES_TIMERS depends on HIGH_RES_TIMERS
# for TASKSTATS/TASK_DELAY_ACCT: # for TASKSTATS/TASK_DELAY_ACCT:
depends on NET depends on NET && MULTIUSER
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select MMU_NOTIFIER select MMU_NOTIFIER
select ANON_INODES select ANON_INODES

View File

@ -649,9 +649,10 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
} }
if ((stimer->config & HV_STIMER_ENABLE) && if ((stimer->config & HV_STIMER_ENABLE) &&
stimer->count) stimer->count) {
stimer_start(stimer); if (!stimer->msg_pending)
else stimer_start(stimer);
} else
stimer_cleanup(stimer); stimer_cleanup(stimer);
} }
} }

View File

@ -198,7 +198,8 @@ struct loaded_vmcs {
struct vmcs *vmcs; struct vmcs *vmcs;
struct vmcs *shadow_vmcs; struct vmcs *shadow_vmcs;
int cpu; int cpu;
int launched; bool launched;
bool nmi_known_unmasked;
struct list_head loaded_vmcss_on_cpu_link; struct list_head loaded_vmcss_on_cpu_link;
}; };
@ -2326,6 +2327,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
__vmx_load_host_state(to_vmx(vcpu)); __vmx_load_host_state(to_vmx(vcpu));
} }
static bool emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !guest_state_valid(vcpu);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
/* /*
@ -2363,6 +2369,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ {
unsigned long old_rflags = vmx_get_rflags(vcpu);
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->rflags = rflags; to_vmx(vcpu)->rflags = rflags;
if (to_vmx(vcpu)->rmode.vm86_active) { if (to_vmx(vcpu)->rmode.vm86_active) {
@ -2370,6 +2378,9 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
} }
vmcs_writel(GUEST_RFLAGS, rflags); vmcs_writel(GUEST_RFLAGS, rflags);
if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
} }
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
@ -3857,11 +3868,6 @@ static __init int alloc_kvm_area(void)
return 0; return 0;
} }
static bool emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !guest_state_valid(vcpu);
}
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
struct kvm_segment *save) struct kvm_segment *save)
{ {
@ -5510,10 +5516,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!is_guest_mode(vcpu)) { ++vcpu->stat.nmi_injections;
++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false;
vmx->nmi_known_unmasked = false;
}
if (vmx->rmode.vm86_active) { if (vmx->rmode.vm86_active) {
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
@ -5527,16 +5531,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{ {
if (to_vmx(vcpu)->nmi_known_unmasked) struct vcpu_vmx *vmx = to_vmx(vcpu);
bool masked;
if (vmx->loaded_vmcs->nmi_known_unmasked)
return false; return false;
return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
vmx->loaded_vmcs->nmi_known_unmasked = !masked;
return masked;
} }
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->nmi_known_unmasked = !masked; vmx->loaded_vmcs->nmi_known_unmasked = !masked;
if (masked) if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
@ -8736,7 +8745,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
if (vmx->nmi_known_unmasked) if (vmx->loaded_vmcs->nmi_known_unmasked)
return; return;
/* /*
* Can't use vmx->exit_intr_info since we're not sure what * Can't use vmx->exit_intr_info since we're not sure what
@ -8760,7 +8769,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
else else
vmx->nmi_known_unmasked = vmx->loaded_vmcs->nmi_known_unmasked =
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI); & GUEST_INTR_STATE_NMI);
} }
@ -10488,6 +10497,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{ {
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
u32 exit_qual; u32 exit_qual;
int ret; int ret;
@ -10512,6 +10522,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor * for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02. * when using the merged vmcs02.
*/ */
if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
nested_vmx_failValid(vcpu,
VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
goto out;
}
if (vmcs12->launch_state == launch) { if (vmcs12->launch_state == launch) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS

View File

@ -597,8 +597,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_avail)) (unsigned long *)&vcpu->arch.regs_avail))
return true; return true;
gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT;
offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK); PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0) if (r < 0)

View File

@ -5,8 +5,8 @@
#DEBUG = -DDEBUGGING #DEBUG = -DDEBUGGING
DEBUG = DEBUG =
PARANOID = -DPARANOID PARANOID = -DPARANOID
EXTRA_CFLAGS := $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION) ccflags-y += $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
EXTRA_AFLAGS := $(PARANOID) asflags-y += $(PARANOID)
# From 'C' language sources: # From 'C' language sources:
C_OBJS =fpu_entry.o errors.o \ C_OBJS =fpu_entry.o errors.o \

View File

@ -157,7 +157,7 @@ extern u_char const data_sizes_16[32];
#define signbyte(a) (((u_char *)(a))[9]) #define signbyte(a) (((u_char *)(a))[9])
#define getsign(a) (signbyte(a) & 0x80) #define getsign(a) (signbyte(a) & 0x80)
#define setsign(a,b) { if (b) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; } #define setsign(a,b) { if ((b) != 0) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
#define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \ #define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \
else signbyte(b) &= 0x7f; } else signbyte(b) &= 0x7f; }
#define changesign(a) { signbyte(a) ^= 0x80; } #define changesign(a) { signbyte(a) ^= 0x80; }

View File

@ -168,7 +168,7 @@ static int compare(FPU_REG const *b, int tagb)
/* This function requires that st(0) is not empty */ /* This function requires that st(0) is not empty */
int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag) int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
{ {
int f = 0, c; int f, c;
c = compare(loaded_data, loaded_tag); c = compare(loaded_data, loaded_tag);
@ -189,12 +189,12 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
case COMP_No_Comp: case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0; f = SW_C3 | SW_C2 | SW_C0;
break; break;
#ifdef PARANOID
default: default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x121); EXCEPTION(EX_INTERNAL | 0x121);
#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0; f = SW_C3 | SW_C2 | SW_C0;
break; break;
#endif /* PARANOID */
} }
setcc(f); setcc(f);
if (c & COMP_Denormal) { if (c & COMP_Denormal) {
@ -205,7 +205,7 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
static int compare_st_st(int nr) static int compare_st_st(int nr)
{ {
int f = 0, c; int f, c;
FPU_REG *st_ptr; FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
@ -235,12 +235,12 @@ static int compare_st_st(int nr)
case COMP_No_Comp: case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0; f = SW_C3 | SW_C2 | SW_C0;
break; break;
#ifdef PARANOID
default: default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122); EXCEPTION(EX_INTERNAL | 0x122);
#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0; f = SW_C3 | SW_C2 | SW_C0;
break; break;
#endif /* PARANOID */
} }
setcc(f); setcc(f);
if (c & COMP_Denormal) { if (c & COMP_Denormal) {
@ -283,12 +283,12 @@ static int compare_i_st_st(int nr)
case COMP_No_Comp: case COMP_No_Comp:
f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF; f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
break; break;
#ifdef PARANOID
default: default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122); EXCEPTION(EX_INTERNAL | 0x122);
#endif /* PARANOID */
f = 0; f = 0;
break; break;
#endif /* PARANOID */
} }
FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f; FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
if (c & COMP_Denormal) { if (c & COMP_Denormal) {

View File

@ -38,8 +38,10 @@ static void __init *max7315_platform_data(void *info)
*/ */
strcpy(i2c_info->type, "max7315"); strcpy(i2c_info->type, "max7315");
if (nr++) { if (nr++) {
sprintf(base_pin_name, "max7315_%d_base", nr); snprintf(base_pin_name, sizeof(base_pin_name),
sprintf(intr_pin_name, "max7315_%d_int", nr); "max7315_%d_base", nr);
snprintf(intr_pin_name, sizeof(intr_pin_name),
"max7315_%d_int", nr);
} else { } else {
strcpy(base_pin_name, "max7315_base"); strcpy(base_pin_name, "max7315_base");
strcpy(intr_pin_name, "max7315_int"); strcpy(intr_pin_name, "max7315_int");

View File

@ -40,7 +40,6 @@ static int timeout_base_ns[] = {
static int timeout_us; static int timeout_us;
static bool nobau = true; static bool nobau = true;
static int nobau_perm; static int nobau_perm;
static cycles_t congested_cycles;
/* tunables: */ /* tunables: */
static int max_concurr = MAX_BAU_CONCURRENT; static int max_concurr = MAX_BAU_CONCURRENT;
@ -829,10 +828,10 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
bcp->period_requests++; bcp->period_requests++;
bcp->period_time += elapsed; bcp->period_time += elapsed;
if ((elapsed > congested_cycles) && if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
(bcp->period_requests > bcp->cong_reps) && (bcp->period_requests > bcp->cong_reps) &&
((bcp->period_time / bcp->period_requests) > ((bcp->period_time / bcp->period_requests) >
congested_cycles)) { usec_2_cycles(bcp->cong_response_us))) {
stat->s_congested++; stat->s_congested++;
disable_for_period(bcp, stat); disable_for_period(bcp, stat);
} }
@ -2222,14 +2221,17 @@ static int __init uv_bau_init(void)
else if (is_uv1_hub()) else if (is_uv1_hub())
ops = uv1_bau_ops; ops = uv1_bau_ops;
nuvhubs = uv_num_possible_blades();
if (nuvhubs < 2) {
pr_crit("UV: BAU disabled - insufficient hub count\n");
goto err_bau_disable;
}
for_each_possible_cpu(cur_cpu) { for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
} }
nuvhubs = uv_num_possible_blades();
congested_cycles = usec_2_cycles(congested_respns_us);
uv_base_pnode = 0x7fffffff; uv_base_pnode = 0x7fffffff;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) { for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
cpus = uv_blade_nr_possible_cpus(uvhub); cpus = uv_blade_nr_possible_cpus(uvhub);
@ -2242,9 +2244,8 @@ static int __init uv_bau_init(void)
enable_timeouts(); enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) { if (init_per_cpu(nuvhubs, uv_base_pnode)) {
set_bau_off(); pr_crit("UV: BAU disabled - per CPU init failed\n");
nobau_perm = 1; goto err_bau_disable;
return 0;
} }
vector = UV_BAU_MESSAGE; vector = UV_BAU_MESSAGE;
@ -2270,6 +2271,16 @@ static int __init uv_bau_init(void)
} }
return 0; return 0;
err_bau_disable:
for_each_possible_cpu(cur_cpu)
free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
set_bau_off();
nobau_perm = 1;
return -EINVAL;
} }
core_initcall(uv_bau_init); core_initcall(uv_bau_init);
fs_initcall(uv_ptc_init); fs_initcall(uv_ptc_init);

View File

@ -19,6 +19,7 @@
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/cpuhotplug.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/desc.h> #include <asm/desc.h>
@ -413,7 +414,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
*/ */
tick_nohz_idle_enter(); tick_nohz_idle_enter();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
} }
#else /* !CONFIG_HOTPLUG_CPU */ #else /* !CONFIG_HOTPLUG_CPU */

View File

@ -309,7 +309,6 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
void xen_teardown_timer(int cpu) void xen_teardown_timer(int cpu)
{ {
struct clock_event_device *evt; struct clock_event_device *evt;
BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu).evt; evt = &per_cpu(xen_clock_events, cpu).evt;
if (evt->irq >= 0) { if (evt->irq >= 0) {

View File

@ -105,7 +105,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ #define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG _IO('T', 83) #define TIOCSERCONFIG _IO('T', 83)
#define TIOCSERGWILD _IOR('T', 84, int) #define TIOCSERGWILD _IOR('T', 84, int)

View File

@ -4299,6 +4299,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
bfq_bfqq_expire(bfqd, bfqq, false, bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS); BFQQE_NO_MORE_REQUESTS);
} }
if (!bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
} }
static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)

View File

@ -52,7 +52,7 @@ struct bfq_entity;
struct bfq_service_tree { struct bfq_service_tree {
/* tree for active entities (i.e., those backlogged) */ /* tree for active entities (i.e., those backlogged) */
struct rb_root active; struct rb_root active;
/* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ /* tree for idle entities (i.e., not backlogged, with V < F_i)*/
struct rb_root idle; struct rb_root idle;
/* idle entity with minimum F_i */ /* idle entity with minimum F_i */

View File

@ -1297,7 +1297,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
* *
* This function searches the first schedulable entity, starting from the * This function searches the first schedulable entity, starting from the
* root of the tree and going on the left every time on this side there is * root of the tree and going on the left every time on this side there is
* a subtree with at least one eligible (start >= vtime) entity. The path on * a subtree with at least one eligible (start <= vtime) entity. The path on
* the right is followed only if a) the left subtree contains no eligible * the right is followed only if a) the left subtree contains no eligible
* entities and b) no eligible entity has been found yet. * entities and b) no eligible entity has been found yet.
*/ */

View File

@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
static __init int nfit_init(void) static __init int nfit_init(void)
{ {
int ret;
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
return -ENOMEM; return -ENOMEM;
nfit_mce_register(); nfit_mce_register();
ret = acpi_bus_register_driver(&acpi_nfit_driver);
if (ret) {
nfit_mce_unregister();
destroy_workqueue(nfit_wq);
}
return ret;
return acpi_bus_register_driver(&acpi_nfit_driver);
} }
static __exit void nfit_exit(void) static __exit void nfit_exit(void)

View File

@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret < 0) if (ret < 0)
goto out_disable; goto out_release;
zatm_dev->pci_dev = pci_dev; zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev; dev->dev_data = zatm_dev;

View File

@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
dev_pm_domain_set(dev, &genpd->domain);
return gpd_data; return gpd_data;
err_free: err_free:
@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
static void genpd_free_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data) struct generic_pm_domain_data *gpd_data)
{ {
dev_pm_domain_set(dev, NULL);
spin_lock_irq(&dev->power.lock); spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL; dev->power.subsys_data->domain_data = NULL;
@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret) if (ret)
goto out; goto out;
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++; genpd->device_count++;
genpd->max_off_time_changed = true; genpd->max_off_time_changed = true;
@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev) if (genpd->detach_dev)
genpd->detach_dev(genpd, dev); genpd->detach_dev(genpd, dev);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node); list_del_init(&pdd->list_node);
genpd_unlock(genpd); genpd_unlock(genpd);

View File

@ -1,7 +1,7 @@
/* /*
* Register map access API - W1 (1-Wire) support * Register map access API - W1 (1-Wire) support
* *
* Copyright (C) 2017 OAO Radioavionica * Copyright (c) 2017 Radioavionica Corporation
* Author: Alex A. Mihaylov <minimumlaw@rambler.ru> * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
@ -11,7 +11,7 @@
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/module.h> #include <linux/module.h>
#include "../../w1/w1.h" #include <linux/w1.h>
#include "internal.h" #include "internal.h"

View File

@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd; struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd; struct nbd_cmd *cmd;
int ret = 0;
while (1) { while (1) {
cmd = nbd_read_stat(nbd, args->index); cmd = nbd_read_stat(nbd, args->index);
@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
mutex_lock(&nsock->tx_lock); mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
ret = PTR_ERR(cmd);
break; break;
} }

View File

@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt; struct clock_event_device *clkevt = &to->clkevt;
of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name): if (of_irq->name) {
irq_of_parse_and_map(np, of_irq->index); of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
if (ret < 0) {
pr_err("Failed to get interrupt %s for %s\n",
of_irq->name, np->full_name);
return ret;
}
} else {
of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
}
if (!of_irq->irq) { if (!of_irq->irq) {
pr_err("Failed to map interrupt for %s\n", np->full_name); pr_err("Failed to map interrupt for %s\n", np->full_name);
return -EINVAL; return -EINVAL;

View File

@ -225,6 +225,9 @@ struct global_params {
* @vid: Stores VID limits for this CPU * @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU * @pid: Stores PID parameters for this CPU
* @last_sample_time: Last Sample time * @last_sample_time: Last Sample time
* @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
* This shift is a multiplier to mperf delta to
* calculate CPU busy.
* @prev_aperf: Last APERF value read from APERF MSR * @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR * @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value * @prev_tsc: Last timestamp counter (TSC) value
@ -259,6 +262,7 @@ struct cpudata {
u64 last_update; u64 last_update;
u64 last_sample_time; u64 last_sample_time;
u64 aperf_mperf_shift;
u64 prev_aperf; u64 prev_aperf;
u64 prev_mperf; u64 prev_mperf;
u64 prev_tsc; u64 prev_tsc;
@ -321,6 +325,7 @@ struct pstate_funcs {
int (*get_min)(void); int (*get_min)(void);
int (*get_turbo)(void); int (*get_turbo)(void);
int (*get_scaling)(void); int (*get_scaling)(void);
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate); u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *); void (*get_vid)(struct cpudata *);
void (*update_util)(struct update_util_data *data, u64 time, void (*update_util)(struct update_util_data *data, u64 time,
@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
return val; return val;
} }
static int knl_get_aperf_mperf_shift(void)
{
return 10;
}
static int knl_get_turbo_pstate(void) static int knl_get_turbo_pstate(void)
{ {
u64 value; u64 value;
@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
if (pstate_funcs.get_vid) if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu); pstate_funcs.get_vid(cpu);
@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost; int32_t busy_frac, boost;
int target, avg_pstate; int target, avg_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc); busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc);
boost = cpu->iowait_boost; boost = cpu->iowait_boost;
cpu->iowait_boost >>= 1; cpu->iowait_boost >>= 1;
@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
perf_scaled = mul_fp(perf_scaled, sample_ratio); perf_scaled = mul_fp(perf_scaled, sample_ratio);
} else { } else {
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
cpu->sample.tsc);
if (sample_ratio < int_tofp(1)) if (sample_ratio < int_tofp(1))
perf_scaled = 0; perf_scaled = 0;
} }
@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = {
.get_max_physical = core_get_max_pstate_physical, .get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate, .get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate, .get_turbo = knl_get_turbo_pstate,
.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling, .get_scaling = core_get_scaling,
.get_val = core_get_val, .get_val = core_get_val,
.update_util = intel_pstate_update_util_pid, .update_util = intel_pstate_update_util_pid,
@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.update_util = funcs->update_util; pstate_funcs.update_util = funcs->update_util;
pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
intel_pstate_use_acpi_profile(); intel_pstate_use_acpi_profile();
} }

View File

@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
int region_id, struct resource *res, unsigned int align, int region_id, struct resource *res, unsigned int align,
void *addr, unsigned long flags); void *addr, unsigned long flags);
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct resource *res, int count); int id, struct resource *res, int count);
#endif /* __DEVICE_DAX_H__ */ #endif /* __DEVICE_DAX_H__ */

View File

@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
struct dax_region *dax_region = dev_dax->region; struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev; struct dax_device *dax_dev = dev_dax->dax_dev;
ida_simple_remove(&dax_region->ida, dev_dax->id); if (dev_dax->id >= 0)
ida_simple_remove(&dax_region->ida, dev_dax->id);
dax_region_put(dax_region); dax_region_put(dax_region);
put_dax(dax_dev); put_dax(dax_dev);
kfree(dev_dax); kfree(dev_dax);
@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
} }
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct resource *res, int count) int id, struct resource *res, int count)
{ {
struct device *parent = dax_region->dev; struct device *parent = dax_region->dev;
struct dax_device *dax_dev; struct dax_device *dax_dev;
@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct inode *inode; struct inode *inode;
struct device *dev; struct device *dev;
struct cdev *cdev; struct cdev *cdev;
int rc = 0, i; int rc, i;
if (!count)
return ERR_PTR(-EINVAL);
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
if (!dev_dax) if (!dev_dax)
@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
if (i < count) if (i < count)
goto err_id; goto err_id;
dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); if (id < 0) {
if (dev_dax->id < 0) { id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
rc = dev_dax->id; dev_dax->id = id;
goto err_id; if (id < 0) {
rc = id;
goto err_id;
}
} else {
/* region provider owns @id lifetime */
dev_dax->id = -1;
} }
/* /*
@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
* device outside of mmap of the resulting character device. * device outside of mmap of the resulting character device.
*/ */
dax_dev = alloc_dax(dev_dax, NULL, NULL); dax_dev = alloc_dax(dev_dax, NULL, NULL);
if (!dax_dev) if (!dax_dev) {
rc = -ENOMEM;
goto err_dax; goto err_dax;
}
/* from here on we're committed to teardown via dax_dev_release() */ /* from here on we're committed to teardown via dax_dev_release() */
dev = &dev_dax->dev; dev = &dev_dax->dev;
@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
dev->parent = parent; dev->parent = parent;
dev->groups = dax_attribute_groups; dev->groups = dax_attribute_groups;
dev->release = dev_dax_release; dev->release = dev_dax_release;
dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = cdev_device_add(cdev, dev); rc = cdev_device_add(cdev, dev);
if (rc) { if (rc) {
@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
return dev_dax; return dev_dax;
err_dax: err_dax:
ida_simple_remove(&dax_region->ida, dev_dax->id); if (dev_dax->id >= 0)
ida_simple_remove(&dax_region->ida, dev_dax->id);
err_id: err_id:
kfree(dev_dax); kfree(dev_dax);

View File

@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
static int dax_pmem_probe(struct device *dev) static int dax_pmem_probe(struct device *dev)
{ {
int rc;
void *addr; void *addr;
struct resource res; struct resource res;
int rc, id, region_id;
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
struct dev_dax *dev_dax; struct dev_dax *dev_dax;
struct dax_pmem *dax_pmem; struct dax_pmem *dax_pmem;
struct nd_region *nd_region;
struct nd_namespace_io *nsio; struct nd_namespace_io *nsio;
struct dax_region *dax_region; struct dax_region *dax_region;
struct nd_namespace_common *ndns; struct nd_namespace_common *ndns;
@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
/* adjust the dax_region resource to the start of data */ /* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff); res.start += le64_to_cpu(pfn_sb->dataoff);
nd_region = to_nd_region(dev->parent); rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
dax_region = alloc_dax_region(dev, nd_region->id, &res, if (rc != 2)
return -EINVAL;
dax_region = alloc_dax_region(dev, region_id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
if (!dax_region) if (!dax_region)
return -ENOMEM; return -ENOMEM;
/* TODO: support for subdividing a dax region... */ /* TODO: support for subdividing a dax region... */
dev_dax = devm_create_dev_dax(dax_region, &res, 1); dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
/* child dev_dax instances now own the lifetime of the dax_region */ /* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region); dax_region_put(dax_region);

View File

@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
if (WARN_ON(!fence)) if (WARN_ON(!fence))
return -EINVAL; return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL; ret = -EINVAL;
@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
* we might have raced with the unlocked dma_fence_signal, * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks * still run through all callbacks
*/ */
} else } else {
fence->timestamp = ktime_get();
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence); trace_dma_fence_signaled(fence);
}
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node); list_del_init(&cur->node);
@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence) if (!fence)
return -EINVAL; return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL; return -EINVAL;
fence->timestamp = ktime_get();
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence); trace_dma_fence_signaled(fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {

View File

@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
show ? "_" : "", show ? "_" : "",
sync_status_str(status)); sync_status_str(status));
if (status) { if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
struct timespec64 ts64 = struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp); ktime_to_timespec64(fence->timestamp);

View File

@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name)); sizeof(info->driver_name));
info->status = dma_fence_get_status(fence); info->status = dma_fence_get_status(fence);
info->timestamp_ns = ktime_to_ns(fence->timestamp); while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
cpu_relax();
info->timestamp_ns =
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
} }
static long sync_file_ioctl_fence_info(struct sync_file *sync_file, static long sync_file_ioctl_fence_info(struct sync_file *sync_file,

View File

@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT; return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
} }
static const uint32_t fsi_slave_smode(int id) static uint32_t fsi_slave_smode(int id)
{ {
return FSI_SMODE_WSC | FSI_SMODE_ECRC return FSI_SMODE_WSC | FSI_SMODE_ECRC
| fsi_smode_sid(id) | fsi_smode_sid(id)
@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
}; };
EXPORT_SYMBOL_GPL(fsi_bus_type); EXPORT_SYMBOL_GPL(fsi_bus_type);
static int fsi_init(void) static int __init fsi_init(void)
{ {
return bus_register(&fsi_bus_type); return bus_register(&fsi_bus_type);
} }
postcore_initcall(fsi_init);
static void fsi_exit(void) static void fsi_exit(void)
{ {
bus_unregister(&fsi_bus_type); bus_unregister(&fsi_bus_type);
} }
module_init(fsi_init);
module_exit(fsi_exit); module_exit(fsi_exit);
module_param(discard_errors, int, 0664); module_param(discard_errors, int, 0664);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
if (adev->kfd) { if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = { struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00, .compute_vmid_bitmap = 0xFF00,
.num_mec = adev->gfx.mec.num_mec,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
}; };
@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* According to linux/bitmap.h we shouldn't use bitmap_clear if /* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant */ * nbits is not compile time constant */
last_valid_bit = adev->gfx.mec.num_mec last_valid_bit = 1 /* only first MEC can have compute queues */
* adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe; * adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)

View File

@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources; kfd->shared_resources = *gpu_resources;
/* We only use the first MEC */
if (kfd->shared_resources.num_mec > 1)
kfd->shared_resources.num_mec = 1;
/* calculate max size of mqds needed for queues */ /* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device * size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned; kfd->device_info->mqd_size_aligned;

View File

@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
return false; return false;
} }
unsigned int get_mec_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_mec;
}
unsigned int get_queues_num(struct device_queue_manager *dqm) unsigned int get_queues_num(struct device_queue_manager *dqm)
{ {
BUG_ON(!dqm || !dqm->dev); BUG_ON(!dqm || !dqm->dev);

View File

@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm, void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd); struct qcm_process_device *qpd);
unsigned int get_mec_num(struct device_queue_manager *dqm);
unsigned int get_queues_num(struct device_queue_manager *dqm); unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);

View File

@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */ /* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap; unsigned int compute_vmid_bitmap;
/* number of mec available from the hardware */
uint32_t num_mec;
/* number of pipes per mec */ /* number of pipes per mec */
uint32_t num_pipe_per_mec; uint32_t num_pipe_per_mec;

View File

@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false; return false;
} }
/*
* ignore out-of-order messages or messages that are part of a
* failed transaction
*/
if (!recv_hdr.somt && !msg->have_somt)
return false;
/* get length contained in this portion */ /* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen; msg->curchunk_hdrlen = hdrlen;
@ -2164,7 +2171,7 @@ out_unlock:
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{ {
int len; int len;
u8 replyblock[32]; u8 replyblock[32];
@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len); replyblock, len);
if (ret != len) { if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return; return false;
} }
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) { if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return; return false;
} }
replylen = msg->curchunk_len + msg->curchunk_hdrlen; replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len); replyblock, len);
if (ret != len) { if (ret != len) {
DRM_DEBUG_KMS("failed to read a chunk\n"); DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
} }
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
if (ret == false) if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n"); DRM_DEBUG_KMS("failed to build sideband msg\n");
return false;
}
curreply += len; curreply += len;
replylen -= len; replylen -= len;
} }
return true;
} }
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{ {
int ret = 0; int ret = 0;
drm_dp_get_one_sb_msg(mgr, false); if (!drm_dp_get_one_sb_msg(mgr, false)) {
memset(&mgr->down_rep_recv, 0,
sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
if (mgr->down_rep_recv.have_eomt) { if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{ {
int ret = 0; int ret = 0;
drm_dp_get_one_sb_msg(mgr, true);
if (!drm_dp_get_one_sb_msg(mgr, true)) {
memset(&mgr->up_req_recv, 0,
sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
if (mgr->up_req_recv.have_eomt) { if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_req_body msg;
@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
} }
drm_dp_put_mst_branch_device(mstb); if (mstb)
drm_dp_put_mst_branch_device(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
} }
return ret; return ret;

View File

@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888, DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888,
DRM_FORMAT_UYVY, DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY, DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV, DRM_FORMAT_YUYV,

View File

@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
/* port@1 is the output port */ /* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
if (ret) if (ret && ret != -ENODEV)
return ret; return ret;
imxpd->dev = dev; imxpd->dev = dev;

View File

@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
if (rdev->kfd) { if (rdev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = { struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00, .compute_vmid_bitmap = 0xFF00,
.num_mec = 1,
.num_pipe_per_mec = 4, .num_pipe_per_mec = 4,
.num_queue_per_pipe = 8 .num_queue_per_pipe = 8
}; };

Some files were not shown because too many files have changed in this diff Show More