1
0
Fork 0

Merge commit 'v2.6.36-rc7' into spi/next

hifive-unleashed-5.1
Grant Likely 2010-10-12 21:38:02 -06:00
commit 492c032bec
167 changed files with 994 additions and 564 deletions

View File

@ -3554,12 +3554,12 @@ E: cvance@nai.com
D: portions of the Linux Security Module (LSM) framework and security modules
N: Petr Vandrovec
E: vandrove@vc.cvut.cz
E: petr@vandrovec.name
D: Small contributions to ncpfs
D: Matrox framebuffer driver
S: Chudenicka 8
S: 10200 Prague 10, Hostivar
S: Czech Republic
S: 21513 Conradia Ct
S: Cupertino, CA 95014
S: USA
N: Thibaut Varene
E: T-Bone@parisc-linux.org

View File

@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6410/
ARM/S5P ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5p*/
ARM/SHMOBILE ARM ARCHITECTURE
M: Paul Mundt <lethal@linux-sh.org>
M: Magnus Damm <magnus.damm@gmail.com>
@ -3781,9 +3788,8 @@ W: http://www.syskonnect.com
S: Supported
MATROX FRAMEBUFFER DRIVER
M: Petr Vandrovec <vandrove@vc.cvut.cz>
L: linux-fbdev@vger.kernel.org
S: Maintained
S: Orphan
F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
@ -3970,8 +3976,8 @@ S: Maintained
F: drivers/net/natsemi.c
NCP FILESYSTEM
M: Petr Vandrovec <vandrove@vc.cvut.cz>
S: Maintained
M: Petr Vandrovec <petr@vandrovec.name>
S: Odd Fixes
F: fs/ncpfs/
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Sheep on Meth
# *DOCUMENTATION*

View File

@ -48,7 +48,7 @@ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
sigset_t mask;
unsigned long res;
siginitset(&mask, newmask & ~_BLOCKABLE);
siginitset(&mask, newmask & _BLOCKABLE);
res = sigprocmask(how, &mask, &oldmask);
if (!res) {
force_successful_syscall_return();

View File

@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event)
if (IS_ERR(pevent)) {
ret = PTR_ERR(pevent);
} else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
perf_event_release_kernel(pevent);
pr_warning("oprofile: failed to enable event %d "
"on CPU %d\n", event, cpu);
ret = -EBUSY;
@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ret = init_driverfs();
if (ret) {
kfree(counter_config);
counter_config = NULL;
return ret;
}
@ -402,7 +404,6 @@ void oprofile_arch_exit(void)
struct perf_event *event;
if (*perf_events) {
exit_driverfs();
for_each_possible_cpu(cpu) {
for (id = 0; id < perf_num_counters; ++id) {
event = perf_events[cpu][id];
@ -413,8 +414,10 @@ void oprofile_arch_exit(void)
}
}
if (counter_config)
if (counter_config) {
kfree(counter_config);
exit_driverfs();
}
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)

View File

@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
config OMAP_DEBUG_LEDS
bool
depends on OMAP_DEBUG_DEVICES
default y if LEDS
default y if LEDS_CLASS
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"

View File

@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
} else {
complete(&mcbsp_rx->tx_irq_completion);
complete(&mcbsp_rx->rx_irq_completion);
}
return IRQ_HANDLED;

View File

@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
return module_bug_finalize(hdr, sechdrs, module);
return 0;
}
void module_arch_cleanup(struct module *module)
{
module_bug_cleanup(module);
}

View File

@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}

View File

@ -162,7 +162,7 @@ static void mac_init_asc( void )
void mac_mksound( unsigned int freq, unsigned int length )
{
__u32 cfreq = ( freq << 5 ) / 468;
__u32 flags;
unsigned long flags;
int i;
if ( mac_special_bell == NULL )
@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
*/
static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
{
__u32 flags;
unsigned long flags;
/* if the bell is already ringing, ring longer */
if ( mac_bell_duration > 0 )
@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
static void mac_quadra_ring_bell( unsigned long ignored )
{
int i, count = mac_asc_samplespersec / HZ;
__u32 flags;
unsigned long flags;
/*
* we neither want a sound buffer overflow nor underflow, so we need to match

View File

@ -13,6 +13,7 @@ config MIPS
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
mainmenu "Linux/MIPS Kernel Configuration"
@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
select SYS_SUPPORTS_SMP
select SMP_UP
help
This is a kernel model which is also known a VSMP or lately
has been marketesed into SMVP.
This is a kernel model which is known a VSMP but lately has been
marketesed into SMVP.
Virtual SMP uses the processor's VPEs to implement virtual
processors. In currently available configuration of the 34K processor
this allows for a dual processor. Both processors will share the same
primary caches; each will obtain the half of the TLB for it's own
exclusive use. For a layman this model can be described as similar to
what Intel calls Hyperthreading.
For further information see http://www.linux-mips.org/wiki/34K#VSMP
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
help
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
is presenting the available TC's of the core as processors to Linux.
On currently available 34K processors this means a Linux system will
see up to 5 processors. The implementation of the SMTC kernel differs
significantly from VSMP and cannot efficiently coexist in the same
kernel binary so the choice between VSMP and SMTC is a compile time
decision.
For further information see http://www.linux-mips.org/wiki/34K#SMTC
endchoice

View File

@ -43,7 +43,7 @@ int prom_argc;
char **prom_argv;
char **prom_envp;
void prom_init_cmdline(void)
void __init prom_init_cmdline(void)
{
int i;
@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
}
}
int prom_get_ethernet_addr(char *ethernet_addr)
int __init prom_get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
return 0;
}
EXPORT_SYMBOL(prom_get_ethernet_addr);
void __init prom_free_prom_memory(void)
{

View File

@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
hostprogs-y := calc_vmlinuz_load_addr
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
vmlinuzobjs-y += $(obj)/piggy.o

View File

@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_STATIC
depends on CPU_CAVIUM_OCTEON
config CAVIUM_OCTEON_HELPER
def_bool y
depends on OCTEON_ETHERNET || PCI

View File

@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK; /* Let default notifier send signals */
}
static int cnmips_cu2_setup(void)
static int __init cnmips_cu2_setup(void)
{
return cu2_notifier(cnmips_cu2_call, 0);
}

View File

@ -11,4 +11,4 @@
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o

View File

@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
#else /* !CONFIG_64BIT */
#include <asm-generic/atomic64.h>
#endif /* CONFIG_64BIT */
/*

View File

@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
#define cu2_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb __cpuinitdata = { \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \

View File

@ -321,6 +321,7 @@ struct gic_intrmask_regs {
*/
struct gic_intr_map {
unsigned int cpunum; /* Directed to this CPU */
#define GIC_UNUSED 0xdead /* Dummy data */
unsigned int pin; /* Directed to this Pin */
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */

View File

@ -1,6 +1,6 @@
#ifndef __ASM_MACH_TX49XX_KMALLOC_H
#define __ASM_MACH_TX49XX_KMALLOC_H
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* __ASM_MACH_TX49XX_KMALLOC_H */

View File

@ -88,9 +88,6 @@
#define GIC_EXT_INTR(x) x
/* Dummy data */
#define X 0xdead
/* External Interrupts used for IPI */
#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17

View File

@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#endif
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
/*
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
* (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
* discussion can be found in lkml posting
* <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
* archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
*
* It is unclear if the misscompilations mentioned in
* http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
* until GCC 3.x has been retired before we can apply
* https://patchwork.linux-mips.org/patch/1541/
*/
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)

View File

@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)
#define _TIF_WORK_MASK (0x0000ffef & \
~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)

View File

@ -356,16 +356,19 @@
#define __NR_perf_event_open (__NR_Linux + 333)
#define __NR_accept4 (__NR_Linux + 334)
#define __NR_recvmmsg (__NR_Linux + 335)
#define __NR_fanotify_init (__NR_Linux + 336)
#define __NR_fanotify_mark (__NR_Linux + 337)
#define __NR_prlimit64 (__NR_Linux + 338)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 335
#define __NR_Linux_syscalls 338
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 335
#define __NR_O32_Linux_syscalls 338
#if _MIPS_SIM == _MIPS_SIM_ABI64
@ -668,16 +671,19 @@
#define __NR_perf_event_open (__NR_Linux + 292)
#define __NR_accept4 (__NR_Linux + 293)
#define __NR_recvmmsg (__NR_Linux + 294)
#define __NR_fanotify_init (__NR_Linux + 295)
#define __NR_fanotify_mark (__NR_Linux + 296)
#define __NR_prlimit64 (__NR_Linux + 297)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 294
#define __NR_Linux_syscalls 297
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 294
#define __NR_64_Linux_syscalls 297
#if _MIPS_SIM == _MIPS_SIM_NABI32
@ -985,16 +991,19 @@
#define __NR_accept4 (__NR_Linux + 297)
#define __NR_recvmmsg (__NR_Linux + 298)
#define __NR_getdents64 (__NR_Linux + 299)
#define __NR_fanotify_init (__NR_Linux + 300)
#define __NR_fanotify_mark (__NR_Linux + 301)
#define __NR_prlimit64 (__NR_Linux + 302)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 299
#define __NR_Linux_syscalls 302
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 299
#define __NR_N32_Linux_syscalls 302
#ifdef __KERNEL__

View File

@ -7,7 +7,6 @@
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/gcmpregs.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/irq.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
int i;
irq -= _irqbase;
pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
pr_debug("%s(%d) called\n", __func__, irq);
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
if (cpu == X)
if (cpu == GIC_UNUSED)
continue;
if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
continue;

View File

@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
/* Userpace events, ignore. */
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;

View File

@ -251,7 +251,7 @@ void sp_work_handle_request(void)
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0, 0)) == 0)
ret.retval = tv.tv_sec;
ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:

View File

@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
{
return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
}
SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
u64, a3, u64, a4, int, dfd, const char __user *, pathname)
{
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
}

View File

@ -583,7 +583,10 @@ einval: li v0, -ENOSYS
sys sys_rt_tgsigqueueinfo 4
sys sys_perf_event_open 5
sys sys_accept4 4
sys sys_recvmmsg 5
sys sys_recvmmsg 5 /* 4335 */
sys sys_fanotify_init 2
sys sys_fanotify_mark 6
sys sys_prlimit64 4
.endm
/* We pre-compute the number of _instruction_ bytes needed to

View File

@ -416,9 +416,12 @@ sys_call_table:
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev /* 5390 */
PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR sys_recvmmsg
PTR sys_recvmmsg
PTR sys_fanotify_init /* 5295 */
PTR sys_fanotify_mark
PTR sys_prlimit64
.size sys_call_table,.-sys_call_table

View File

@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
PTR sys_getdents
PTR sys_getdents64
PTR sys_fanotify_init /* 6300 */
PTR sys_fanotify_mark
PTR sys_prlimit64
.size sysn32_call_table,.-sysn32_call_table

View File

@ -538,5 +538,8 @@ sys_call_table:
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
PTR compat_sys_recvmmsg /* 4335 */
PTR sys_fanotify_init
PTR sys_32_fanotify_mark
PTR sys_prlimit64
.size sys_call_table,.-sys_call_table

View File

@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ISA
if (dev == NULL)
gfp |= __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
gfp |= __GFP_DMA;
dma_flag = __GFP_DMA;
else
#endif
#ifdef CONFIG_ZONE_DMA32
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
dma_flag = __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
;
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA;
else
#endif
dma_flag = 0;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
return gfp;
return gfp | dma_flag;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,

View File

@ -30,7 +30,7 @@
#define tc_lsize 32
extern unsigned long icache_way_size, dcache_way_size;
unsigned long tcache_size;
static unsigned long tcache_size;
#include <asm/r4kcache.h>

View File

@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
*/
#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
#define X GIC_UNUSED
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
{ X, X, X, X, 0 },
@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
#undef X
/*
* GCMP needs to be detected before any SMP initialisation

View File

@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
pr_err(KERN_ERR "PCI init error!!!\n");
pr_err("PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}

View File

@ -22,29 +22,19 @@
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <glb.h>
void pnx8550_machine_restart(char *command)
{
char head[] = "************* Machine restart *************";
char foot[] = "*******************************************";
printk("\n\n");
printk("%s\n", head);
if (command != NULL)
printk("* %s\n", command);
printk("%s\n", foot);
PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
}
void pnx8550_machine_halt(void)
{
printk("*** Machine halt. (Not implemented) ***\n");
}
void pnx8550_machine_power_off(void)
{
printk("*** Machine power off. (Not implemented) ***\n");
while (1) {
if (cpu_wait)
cpu_wait();
}
}

View File

@ -44,7 +44,6 @@
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
pm_power_off = pnx8550_machine_power_off;
pm_power_off = pnx8550_machine_halt;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown

View File

@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
/*
@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}

View File

@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
void flush_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_MN10300_CACHE_WBACK
unsigned long addr, size, off;
unsigned long addr, size, base, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
if (end > 0x80000000UL) {
/* addresses above 0xa0000000 do not go through the cache */
if (end > 0xa0000000UL) {
end = 0xa0000000UL;
if (start >= end)
return;
}
/* kernel addresses between 0x80000000 and 0x9fffffff do not
* require page tables, so we just map such addresses directly */
base = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_dcache_flush_range(base, end);
if (base == start)
goto invalidate;
end = base;
}
for (; start < end; start += size) {
/* work out how much of the page to flush */
off = start & (PAGE_SIZE - 1);
@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
invalidate:
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_range);

View File

@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
module_bug_cleanup(mod);
}

View File

@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
int err;
err = module_bug_finalize(hdr, sechdrs, me);
if (err)
return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}

View File

@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
int id_match = 0;
if (dev == NULL || id == NULL)
return NULL;
return clk;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {

View File

@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %s\n", pcictrl->full_name);
return;
goto out_put;
}
if (bus_range[1] == bus_range[0])
@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
printk(" controlled by %s\n", pcictrl->full_name);
printk("\n");
hose = pcibios_alloc_controller(of_node_get(pcictrl));
hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %s\n",
pcictrl->full_name);
return;
goto out_put;
}
hose->first_busno = bus_range[0];
@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
return;
out_put:
of_node_put(pcictrl);
}
#else

View File

@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
/* wait at lease 1 us */
udelay(2);
/* wait for 1 us */
udelay(1);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
/* wait at least 200ns */
/* 7 ~= (200ns * timebase) / ns2sec */
__delay(7);
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);

View File

@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}

View File

@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
ret |= module_bug_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
module_dwarf_cleanup(mod);
}

View File

@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
static int uml_net_set_mac(struct net_device *dev, void *addr)
{
struct uml_net_private *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
spin_lock_irq(&lp->lock);
eth_mac_addr(dev, hwaddr->sa_data);
spin_unlock_irq(&lp->lock);
return 0;
}
static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
{
dev->mtu = new_mtu;
@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = {
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_multicast_list = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = uml_net_set_mac,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = uml_net_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac,
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
eth_mac_addr(dev, device->mac);
/* don't use eth_mac_addr, it will not work here */
memcpy(dev->dev_addr, device->mac, ETH_ALEN);
dev->mtu = transport->user->mtu;
dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;

View File

@ -61,7 +61,7 @@ struct cstate_entry {
unsigned int ecx;
} states[ACPI_PROCESSOR_MAX_POWER];
};
static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];

View File

@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
old_cfg = old_desc->chip_data;
memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
cfg->vector = old_cfg->vector;
cfg->move_in_progress = old_cfg->move_in_progress;
cpumask_copy(cfg->domain, old_cfg->domain);
cpumask_copy(cfg->old_domain, old_cfg->old_domain);
init_copy_irq_2_pin(old_cfg, cfg, node);
}
static void free_irq_cfg(struct irq_cfg *old_cfg)
static void free_irq_cfg(struct irq_cfg *cfg)
{
kfree(old_cfg);
free_cpumask_var(cfg->domain);
free_cpumask_var(cfg->old_domain);
kfree(cfg);
}
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)

View File

@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
}
}
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;

View File

@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void get_cpu_cap(struct cpuinfo_x86 *c);
#endif

View File

@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER)
return -ENODEV;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors)
return -ENODEV;
if (errors) {
ret = -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1))
return -ENODEV;
if (!(supported & 0x1)) {
ret = -ENODEV;
goto out_free;
}
out_free:
kfree(output.pointer);

View File

@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
}
}

View File

@ -660,8 +660,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
if (!test_bit(idx, cpuc->active_mask))
if (!test_bit(idx, cpuc->active_mask)) {
/* catch in-flight IRQs */
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
hwc = &event->hw;

View File

@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
{
unsigned int irq;
irq = create_irq();
irq = create_irq_nr(0, -1);
if (!irq)
return -EINVAL;

View File

@ -239,11 +239,10 @@ int module_finalize(const Elf_Ehdr *hdr,
apply_paravirt(pseg, pseg + para->sh_size);
}
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
module_bug_cleanup(mod);
}

View File

@ -674,6 +674,7 @@ static int __init ppro_init(char **cpu_type)
case 0x0f:
case 0x16:
case 0x17:
case 0x1d:
*cpu_type = "i386/core_2";
break;
case 0x1a:

View File

@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void)
__init void xen_hvm_init_time_ops(void)
{
/* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 */
if (!xen_have_vector_callback && num_present_cpus() > 1)
* on cpu > 0 and at this point we don't know how many cpus are
* available */
if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"

View File

@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
Be aware that using this interface can confuse your Embedded
Controller in a way that a normal reboot is not enough. You then
have to power of your system, and remove the laptop battery for
have to power off your system, and remove the laptop battery for
some seconds.
An Embedded Controller typically is available on laptops and reads
sensor values like battery state and temperature.

View File

@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
device_remove_file(&device->dev, &dev_attr_rrtime);
}
/* Query firmware how many CPUs should be idle */
static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
/*
* Query firmware how many CPUs should be idle
* return -1 on failure
*/
static int acpi_pad_pur(acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package;
int rev, num, ret = -EINVAL;
int num = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
return -EINVAL;
return num;
if (!buffer.length || !buffer.pointer)
return -EINVAL;
return num;
package = buffer.pointer;
if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
goto out;
rev = package->package.elements[0].integer.value;
num = package->package.elements[1].integer.value;
if (rev != 1 || num < 0)
goto out;
*num_cpus = num;
ret = 0;
out:
if (package->type == ACPI_TYPE_PACKAGE &&
package->package.count == 2 &&
package->package.elements[0].integer.value == 1) /* rev 1 */
num = package->package.elements[1].integer.value;
kfree(buffer.pointer);
return ret;
return num;
}
/* Notify firmware how many CPUs are idle */
@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
if (acpi_pad_pur(handle, &num_cpus)) {
num_cpus = acpi_pad_pur(handle);
if (num_cpus < 0) {
mutex_unlock(&isolated_cpus_lock);
return;
}

View File

@ -854,6 +854,7 @@ struct acpi_bit_register_info {
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001

View File

@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
*
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
* fatal system error. Used in conjuction with
* fatal system error. Used in conjunction with
* relinquish_interpreter
*
******************************************************************************/

View File

@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
/*
* 16-, 32-, and 64-bit cases must use the move macros that perform
* endian conversion and/or accomodate hardware that cannot perform
* endian conversion and/or accommodate hardware that cannot perform
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:

View File

@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
depends on ACPI_APEI
help
ERST is a way provided by APEI to save and retrieve hardware
error infomation to and from a persistent store. Enable this
error information to and from a persistent store. Enable this
if you want to debugging and testing the ERST kernel support
and firmware implementation.

View File

@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
int apei_resources_request(struct apei_resources *resources,
const char *desc)
{
struct apei_res *res, *res_bak;
struct apei_res *res, *res_bak = NULL;
struct resource *r;
int rc;
apei_resources_sub(resources, &apei_resources_all);
rc = apei_resources_sub(resources, &apei_resources_all);
if (rc)
return rc;
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
}
}
apei_resources_merge(&apei_resources_all, resources);
rc = apei_resources_merge(&apei_resources_all, resources);
if (rc) {
pr_err(APEI_PFX "Fail to merge resources!\n");
goto err_unmap_ioport;
}
return 0;
err_unmap_ioport:
@ -491,12 +499,13 @@ err_unmap_iomem:
break;
release_mem_region(res->start, res->end - res->start);
}
return -EINVAL;
return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
void apei_resources_release(struct apei_resources *resources)
{
int rc;
struct apei_res *res;
list_for_each_entry(res, &resources->iomem, list)
@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
list_for_each_entry(res, &resources->ioport, list)
release_region(res->start, res->end - res->start);
apei_resources_sub(&apei_resources_all, resources);
rc = apei_resources_sub(&apei_resources_all, resources);
if (rc)
pr_err(APEI_PFX "Fail to sub resources!\n");
}
EXPORT_SYMBOL_GPL(apei_resources_release);

View File

@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
static int einj_check_table(struct acpi_table_einj *einj_tab)
{
if (einj_tab->header_length != sizeof(struct acpi_table_einj))
if ((einj_tab->header_length !=
(sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
&& (einj_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (einj_tab->header.length < sizeof(struct acpi_table_einj))
return -EINVAL;

View File

@ -2,7 +2,7 @@
* APEI Error Record Serialization Table debug support
*
* ERST is a way provided by APEI to save and retrieve hardware error
* infomation to and from a persistent store. This file provide the
* information to and from a persistent store. This file provide the
* debugging/testing support for ERST kernel support and firmware
* implementation.
*
@ -111,11 +111,13 @@ retry:
goto out;
}
if (len > erst_dbg_buf_len) {
kfree(erst_dbg_buf);
void *p;
rc = -ENOMEM;
erst_dbg_buf = kmalloc(len, GFP_KERNEL);
if (!erst_dbg_buf)
p = kmalloc(len, GFP_KERNEL);
if (!p)
goto out;
kfree(erst_dbg_buf);
erst_dbg_buf = p;
erst_dbg_buf_len = len;
goto retry;
}
@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
if (mutex_lock_interruptible(&erst_dbg_mutex))
return -EINTR;
if (usize > erst_dbg_buf_len) {
kfree(erst_dbg_buf);
void *p;
rc = -ENOMEM;
erst_dbg_buf = kmalloc(usize, GFP_KERNEL);
if (!erst_dbg_buf)
p = kmalloc(usize, GFP_KERNEL);
if (!p)
goto out;
kfree(erst_dbg_buf);
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
rc = copy_from_user(erst_dbg_buf, ubuf, usize);

View File

@ -2,7 +2,7 @@
* APEI Error Record Serialization Table support
*
* ERST is a way provided by APEI to save and retrieve hardware error
* infomation to and from a persistent store.
* information to and from a persistent store.
*
* For more information about ERST, please refer to ACPI Specification
* version 4.0, section 17.4.
@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
{
int rc;
u64 offset;
void *src, *dst;
/* ioremap does not work in interrupt context */
if (in_interrupt()) {
pr_warning(ERST_PFX
"MOVE_DATA can not be used in interrupt context");
return -EBUSY;
}
rc = __apei_exec_read_register(entry, &offset);
if (rc)
return rc;
memmove((void *)ctx->dst_base + offset,
(void *)ctx->src_base + offset,
ctx->var2);
src = ioremap(ctx->src_base + offset, ctx->var2);
if (!src)
return -ENOMEM;
dst = ioremap(ctx->dst_base + offset, ctx->var2);
if (!dst)
return -ENOMEM;
memmove(dst, src, ctx->var2);
iounmap(src);
iounmap(dst);
return 0;
}
@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
static int erst_check_table(struct acpi_table_erst *erst_tab)
{
if (erst_tab->header_length != sizeof(struct acpi_table_erst))
if ((erst_tab->header_length !=
(sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
&& (erst_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
return -EINVAL;

View File

@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
struct ghes *ghes = NULL;
int rc = -EINVAL;
generic = ghes_dev->dev.platform_data;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
if (!generic->enabled)
return -ENODEV;

View File

@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
{
struct acpi_hest_generic *generic;
struct platform_device *ghes_dev;
struct ghes_arr *ghes_arr = data;
int rc;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
return 0;
generic = (struct acpi_hest_generic *)hest_hdr;
if (!generic->enabled)
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
return 0;
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
if (!ghes_dev)
return -ENOMEM;
ghes_dev->dev.platform_data = generic;
rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
if (rc)
goto err;
rc = platform_device_add(ghes_dev);
if (rc)
goto err;

View File

@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
list_add_tail_rcu(&map->list, &acpi_iomaps);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
return vaddr + (paddr - pg_off);
return map->vaddr + (paddr - map->paddr);
err_unmap:
iounmap(vaddr);
return NULL;

View File

@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,

View File

@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
{
printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2006");
acpi_osi_setup("!Windows 2006 SP1");
acpi_osi_setup("!Windows 2006 SP2");
return 0;
}
static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Toshiba Satellite L355",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
},
},
{
.callback = dmi_disable_osi_win7,
.ident = "ASUS K50IJ",
.matches = {
@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Toshiba P305D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
},
},
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.

View File

@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
static int set_power_nocheck(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
"disable power check in power transistion\n", id->ident);
"disable power check in power transition\n", id->ident);
acpi_power_nocheck = 1;
return 0;
}
@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
/*
* Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
* Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
.callback = set_copy_dsdt,
.ident = "TOSHIBA Satellite A505",
.ident = "TOSHIBA Satellite",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
},
},
{
.callback = set_copy_dsdt,
.ident = "TOSHIBA Satellite L505D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
},
},
{}
@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
/*
* If the laptop falls into the DMI check table, the power state check
* will be disabled in the course of device power transistion.
* will be disabled in the course of device power transition.
*/
dmi_check_system(power_nocheck_dmi_table);

View File

@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
acpi_bus_unregister_driver(&acpi_fan_driver);
#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
#endif
return;
}

View File

@ -28,12 +28,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
}
static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
{
set_no_mwait, "IFL91 board", {
DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
{
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),

View File

@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
} else {
printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
cpuidle_get_driver()->name);
}

View File

@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
if (!try_module_get(calling_module))
return -EINVAL;
/* is_done is set to negative if an error occured,
* and to postitive if _no_ error occured, but SMM
/* is_done is set to negative if an error occurred,
* and to postitive if _no_ error occurred, but SMM
* was already notified. This avoids double notification
* which might lead to unexpected results...
*/

View File

@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
static int __init init_nvs_nosave(const struct dmi_system_id *d)
{
acpi_nvs_nosave();
return 0;
}
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR11M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Everex StepNote Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
},
},
{},
};
#endif /* CONFIG_SUSPEND */

View File

@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
return result;
}
static int param_get_debug_level(char *buffer, struct kernel_param *kp)
static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
return result;
}
module_param_call(debug_layer, param_set_uint, param_get_debug_layer,
&acpi_dbg_layer, 0644);
module_param_call(debug_level, param_set_uint, param_get_debug_level,
&acpi_dbg_level, 0644);
static struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
static struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[6];
module_param_string(trace_method_name, trace_method_name, 6, 0644);

View File

@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
"control misses _BQC function\n");
printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
"cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}

View File

@ -80,7 +80,7 @@
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
* noticable impact on workloads, which is not acceptable for most sysadmins,
* noticeable impact on workloads, which is not acceptable for most sysadmins,
* and in addition, less performance has a power price of its own.
*
* As a general rule of thumb, menu assumes that the following heuristic

View File

@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
sh_chan = to_sh_chan(chan);
param = chan->private;
slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
return NULL;
}
slave_addr = param->config->addr;
/*
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.

View File

@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
ATTR_COUNTER(0),
ATTR_COUNTER(1),
ATTR_COUNTER(2),
{ .attr = { .name = NULL } }
};
static struct mcidev_sysfs_group i7core_udimm_counters = {

View File

@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
return -ENOMEM;
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
atomic_inc(&dev->object_count);
@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
* Called after the last reference to the object has been lost.
* Must be called without holding struct_mutex
*
* Frees the object
*/
void
drm_gem_object_free_unlocked(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
if (dev->driver->gem_free_object_unlocked != NULL)
dev->driver->gem_free_object_unlocked(obj);
else if (dev->driver->gem_free_object != NULL) {
mutex_lock(&dev->struct_mutex);
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void
drm_gem_object_handle_free(struct kref *kref)
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_gem_object *obj = container_of(kref,
struct drm_gem_object,
handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
drm_vm_open_locked(vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_unreference_unlocked(obj);
mutex_lock(&obj->dev->struct_mutex);
drm_vm_close_locked(vma);
drm_gem_object_unreference(obj);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);

View File

@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->handle_count),
atomic_read(&obj->refcount.refcount));
return 0;
}

View File

@ -433,6 +433,25 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
void drm_vm_close_locked(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
break;
}
}
}
/**
* \c close method for all virtual memory types.
*
@ -445,20 +464,9 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
break;
}
}
drm_vm_close_locked(vma);
mutex_unlock(&dev->struct_mutex);
}

View File

@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.unlocked_ioctl = i810_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};

View File

@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.unlocked_ioctl = i830_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};

View File

@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
}
}
div_u64(diff, diff1);
diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
div_u64(ret, 10);
ret = div_u64(ret, 10);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
/* More magic constants... */
diff = diff * 1181;
div_u64(diff, diffms * 10);
diff = div_u64(diff, diffms * 10);
dev_priv->gfx_power = diff;
}

View File

@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
return ret;
}
/* Sink the floating reference from kref_init(handlecount) */
drm_gem_object_handle_unreference_unlocked(obj);
args->handle = handle;
return 0;
}
@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
obj_priv = to_intel_bo(obj);
/* Bounds check source.
*
* XXX: This could use review for overflow issues...
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
/* Bounds check source. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
goto err;
}
if (!access_ok(VERIFY_WRITE,
(char __user *)(uintptr_t)args->data_ptr,
args->size)) {
ret = -EFAULT;
goto err;
}
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
err:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
if (!access_ok(VERIFY_READ, user_data, remain))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
obj_priv = to_intel_bo(obj);
/* Bounds check destination.
*
* XXX: This could use review for overflow issues...
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
/* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
goto err;
}
if (!access_ok(VERIFY_READ,
(char __user *)(uintptr_t)args->data_ptr,
args->size)) {
ret = -EFAULT;
goto err;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
err:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
@ -3258,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||

View File

@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
struct drm_i915_gem_object *obj_priv;
struct list_head *render_iter, *bsd_iter;
int ret = 0;
@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
return -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&unwind_list, evict_list) {
while (!list_empty(&unwind_list)) {
obj_priv = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
} else
drm_gem_object_unreference(&obj_priv->base);
}
/* Unbinding will emit any required flushes */
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&eviction_list, evict_list) {
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
#endif
ret = i915_gem_object_unbind(&obj_priv->base);
if (ret)
return ret;
continue;
}
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
}
/* The just created free hole should be on the top of the free stack
* maintained by drm_mm, so this BUG_ON actually executes in O(1).
* Furthermore all accessed data has just recently been used, so it
* should be really fast, too. */
BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
alignment, 0));
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
obj_priv = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
evict_list);
if (ret == 0)
ret = i915_gem_object_unbind(&obj_priv->base);
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
}
return 0;
return ret;
}
int

View File

@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
DRM_DEBUG_KMS("vblank wait timed out\n");
}
/**
* intel_wait_for_vblank_off - wait for vblank after disabling a pipe
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
*
@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
* So this function waits for the display line value to settle (it
* usually ends up stopping at the start of the next frame).
* On Gen4 and above:
* wait for the pipe register state bit to turn off
*
* Otherwise:
* wait for the display line value to settle (it usually
* ends up stopping at the start of the next frame).
*
*/
void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
u32 last_line;
/* Wait for the display line to settle */
do {
last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
mdelay(5);
} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (INTEL_INFO(dev)->gen >= 4) {
int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("vblank wait timed out\n");
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
100, 0))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
u32 last_line;
int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
/* Wait for the display line to settle */
do {
last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
mdelay(5);
} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
}
}
/* Parameters have changed, update FBC info */
@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
/* Wait for vblank for the disable to take effect */
intel_wait_for_vblank_off(dev, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
(dev_priv->quirks & QUIRK_PIPEA_FORCE))
(dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
/* Wait for vblank for the disable to take effect */
intel_wait_for_vblank(dev, pipe);
goto skip_pipe_off;
}
/* Next, disable display pipes */
temp = I915_READ(pipeconf_reg);
@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pipeconf_reg);
}
/* Wait for vblank for the disable to take effect. */
intel_wait_for_vblank_off(dev, pipe);
/* Wait for the pipe to turn off */
intel_wait_for_pipe_off(dev, pipe);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {

View File

@ -1138,18 +1138,14 @@ static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
uint8_t train_set[4])
{
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
int ret;
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
if (first)
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
uint8_t voltage;
bool clock_recovery = false;
bool channel_eq = false;
bool first = true;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
/* Enable output, wait for it to become active */
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_1, train_set, first))
DP_TRAINING_PATTERN_1, train_set))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
DP_TRAINING_PATTERN_2, train_set))
break;
udelay(400);

View File

@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,

View File

@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj)
if (ifb->obj) {
drm_gem_object_handle_unreference(ifb->obj);
drm_gem_object_unreference(ifb->obj);
}
return 0;
}

View File

@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}

View File

@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
out:
drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}

View File

@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
drm_mm_takedown(&chan->notifier_heap);
}

Some files were not shown because too many files have changed in this diff Show More