1
0
Fork 0

Merge commit 'origin/master' into next

Manual merge of:
	drivers/char/hvc_console.c
	drivers/char/hvc_console.h
hifive-unleashed-5.1
Benjamin Herrenschmidt 2010-02-26 14:41:00 +11:00
commit 874f2f997d
286 changed files with 8635 additions and 3381 deletions

View File

@ -69,7 +69,6 @@ av_permissions.h
bbootsect
bin2c
binkernel.spec
binoffset
bootsect
bounds.h
bsetup

View File

@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_display_output=video
See above.
acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
early. Needed on some platforms to properly
initialize the EC.
acpi_irq_balance [HW,ACPI]
ACPI will balance active IRQs
default in APIC mode
@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
aic79xx= [HW,SCSI]
See Documentation/scsi/aic79xx.txt.
alignment= [KNL,ARM]
Allow the default userspace alignment fault handler
behaviour to be specified. Bit 0 enables warnings,
bit 1 enables fixups, and bit 2 sends a segfault.
amd_iommu= [HW,X86-84]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:

View File

@ -34,7 +34,6 @@
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
#include <zlib.h>
#include <assert.h>
#include <sched.h>
#include <limits.h>

View File

@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
Default: 5
max_addresses - INTEGER
Number of maximum addresses per interface. 0 disables limitation.
It is recommended not set too large value (or 0) because it would
be too easy way to crash kernel to allow to create too much of
autoconfigured addresses.
Maximum number of autoconfigured addresses per interface. Setting
to zero disables the limitation. It is not recommended to set this
value too large (or to zero) because it would be an easy way to
crash the kernel by allowing too many addresses to be created.
Default: 16
disable_ipv6 - BOOLEAN

View File

@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://gitorious.org/linux-gemini/mainline.git
S: Maintained
S: Odd Fixes
F: arch/arm/mach-gemini/
ARM/EBSA110 MACHINE SUPPORT
@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
S: Odd Fixes
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
@ -1733,10 +1733,9 @@ F: include/linux/tfrc.h
F: net/dccp/
DECnet NETWORK LAYER
M: Christine Caulfield <christine.caulfield@googlemail.com>
W: http://linux-decnet.sourceforge.net
L: linux-decnet-user@lists.sourceforge.net
S: Maintained
S: Orphan
F: Documentation/networking/decnet.txt
F: net/decnet/
@ -2394,6 +2393,12 @@ L: linuxppc-dev@ozlabs.org
S: Odd Fixes
F: drivers/char/hvc_*
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/char/virtio_console.c
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org
@ -3490,9 +3495,9 @@ S: Maintained
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
M: Lennert Buytenhek <buytenh@marvell.com>
M: Lennert Buytenhek <buytenh@wantstofly.org>
L: netdev@vger.kernel.org
S: Supported
S: Maintained
F: drivers/net/mv643xx_eth.*
F: include/linux/mv643xx.h

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 33
EXTRAVERSION = -rc8
EXTRAVERSION =
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*

View File

@ -42,7 +42,8 @@
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif

View File

@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
EXPORT_SYMBOL(outer_cache);
#endif
struct stack {

View File

@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
unsigned int reg_both, reg_level, reg_type;
reg_type = __raw_readl(base + GPIO_INT_TYPE);
reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
reg_level = __raw_readl(base + GPIO_INT_LEVEL);
reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
switch (type) {
@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
}
__raw_writel(reg_type, base + GPIO_INT_TYPE);
__raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
__raw_writel(reg_level, base + GPIO_INT_LEVEL);
__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
gpio_ack_irq(irq);

View File

@ -961,16 +961,14 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
struct omap_mux *entry;
#ifndef CONFIG_OMAP_MUX
/* Skip pins that are not muxed as GPIO by bootloader */
if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
#ifdef CONFIG_OMAP_MUX
if (!superset->muxnames || !superset->muxnames[0]) {
superset++;
continue;
}
#endif
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)
if (!superset->muxnames || !superset->muxnames[0]) {
#else
/* Skip pins that are not muxed as GPIO by bootloader */
if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
superset++;
continue;
}

View File

@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/moduleparam.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@ -77,6 +78,8 @@ static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
core_param(alignment, ai_usermode, int, 0600);
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)

View File

@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
# Last update: Thu Jan 28 22:15:54 2010
# Last update: Sat Feb 20 14:16:15 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268
oratismadi MACH_ORATISMADI ORATISMADI 2269
oratisot16 MACH_ORATISOT16 ORATISOT16 2270
oratisdesk MACH_ORATISDESK ORATISDESK 2271
v2_ca9 MACH_V2P_CA9 V2P_CA9 2272
vexpress MACH_VEXPRESS VEXPRESS 2272
sintexo MACH_SINTEXO SINTEXO 2273
cm3389 MACH_CM3389 CM3389 2274
omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648
dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
scat110 MACH_SCAT110 SCAT110 2651
acer_a1 MACH_ACER_A1 ACER_A1 2652
cmcontrol MACH_CMCONTROL CMCONTROL 2653
pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
rfp43 MACH_RFP43 RFP43 2655
sk86r0301 MACH_SK86R0301 SK86R0301 2656
ctpxa MACH_CTPXA CTPXA 2657
epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
guruplug MACH_GURUPLUG GURUPLUG 2659
spear310 MACH_SPEAR310 SPEAR310 2660
spear320 MACH_SPEAR320 SPEAR320 2661
robotx MACH_ROBOTX ROBOTX 2662
lsxhl MACH_LSXHL LSXHL 2663
smartlite MACH_SMARTLITE SMARTLITE 2664
cws2 MACH_CWS2 CWS2 2665
m619 MACH_M619 M619 2666
smartview MACH_SMARTVIEW SMARTVIEW 2667
lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
kizbox MACH_KIZBOX KIZBOX 2669
htccharmer MACH_HTCCHARMER HTCCHARMER 2670
guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
pm9g45 MACH_PM9G45 PM9G45 2672
htcpanther MACH_HTCPANTHER HTCPANTHER 2673
htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
reb01 MACH_REB01 REB01 2675
aquila MACH_AQUILA AQUILA 2676
spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
surf7x30 MACH_SURF7X30 SURF7X30 2679
micro2440 MACH_MICRO2440 MICRO2440 2680
am2440 MACH_AM2440 AM2440 2681
tq2440 MACH_TQ2440 TQ2440 2682
lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683
ak880x MACH_AK880X AK880X 2684
cobra3530 MACH_COBRA3530 COBRA3530 2685
pmppb MACH_PMPPB PMPPB 2686
u6715 MACH_U6715 U6715 2687
axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
g30_dvb MACH_G30_DVB G30_DVB 2689
vc088x MACH_VC088X VC088X 2690
mioa702 MACH_MIOA702 MIOA702 2691
hpmin MACH_HPMIN HPMIN 2692
ak880xak MACH_AK880XAK AK880XAK 2693

View File

@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }

View File

@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);

View File

@ -130,6 +130,7 @@ config CMDLINE_FORCE
config OF
def_bool y
select OF_FLATTREE
config PROC_DEVICETREE
bool "Support for device tree in /proc"

View File

@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
* Little endian
*/
#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
#define in_le32(a) __le32_to_cpu(__raw_readl(a))

View File

@ -26,31 +26,11 @@
#include <asm/irq.h>
#include <asm/atomic.h>
#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l))
#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
extern struct device_node *of_chosen;
#define HAVE_ARCH_DEVTREE_FIXUPS
extern struct device_node *allnodes; /* temporary while merging */
extern rwlock_t devtree_lock; /* temporary while merging */
/* For updating the device tree at runtime */
extern void of_attach_node(struct device_node *);
extern void of_detach_node(struct device_node *);
/* Other Prototypes */
extern int early_uartlite_console(void);
extern struct resource *request_OF_resource(struct device_node *node,
int index, const char *name_postfix);
extern int release_OF_resource(struct device_node *node, int index);
/*
* OF address retreival & translation
*/

View File

@ -172,16 +172,15 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
int step = -line_length; \
int count = end - start; \
BUG_ON(count <= 0); \
int volatile temp; \
BUG_ON(end - start <= 0); \
\
__asm__ __volatile__ (" 1: addk %0, %0, %1; \
" #op " %0, r0; \
bgtid %1, 1b; \
addk %1, %1, %2; \
" : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
__asm__ __volatile__ (" 1: " #op " %1, r0; \
cmpu %0, %1, %2; \
bgtid %0, 1b; \
addk %1, %1, %3; \
" : : "r" (temp), "r" (start), "r" (end),\
"r" (line_length) : "memory"); \
} while (0);
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
pr_debug("%s\n", __func__);
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
#if 0
unsigned int i;
pr_debug("%s\n", __func__);
/* Just loop through cache size and invalidate it */
for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
__invalidate_dcache(0, i);
#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,

View File

@ -185,7 +185,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
static int of_dev_phandle_match(struct device *dev, void *data)
{
phandle *ph = data;
return to_of_device(dev)->node->linux_phandle == *ph;
return to_of_device(dev)->node->phandle == *ph;
}
struct of_device *of_find_device_by_phandle(phandle ph)

File diff suppressed because it is too large Load Diff

View File

@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
break;
}
/* Ignoring the last page when ddr size is 128M. Cached
* accesses to last page is causing the processor to prefetch
* using address above 128M stepping out of the ddr address
* space.
*/
if (mem == 0x8000000)
mem -= 0x1000;
add_memory_region(0, mem, BOOT_MEM_RAM);
}

View File

@ -1,5 +1,6 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>

View File

@ -18,7 +18,6 @@ config PARISC
select BUG
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
select HAVE_ARCH_TRACEHOOK
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,

View File

@ -18,7 +18,6 @@
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
@ -123,6 +122,10 @@ static int __init pcibios_init(void)
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
/* Set the CLS for PCI as early as possible. */
pci_cache_line_size = pci_dfl_cache_line_size;
return 0;
}
@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
(0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
(0x80 << 8) | pci_cache_line_size);
}

View File

@ -173,6 +173,7 @@ config PPC_OF
config OF
def_bool y
select OF_FLATTREE
config PPC_UDBG_16550
bool

View File

@ -23,21 +23,8 @@
#include <asm/irq.h>
#include <asm/atomic.h>
#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
extern struct device_node *of_chosen;
#define HAVE_ARCH_DEVTREE_FIXUPS
/* For updating the device tree at runtime */
extern void of_attach_node(struct device_node *);
extern void of_detach_node(struct device_node *);
#ifdef CONFIG_PPC32
/*
* PCI <-> OF matching functions
@ -52,11 +39,6 @@ extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
extern void pci_create_OF_bus_map(void);
#endif
extern struct resource *request_OF_resource(struct device_node* node,
int index, const char* name_postfix);
extern int release_OF_resource(struct device_node* node, int index);
/*
* OF address retreival & translation
*/

View File

@ -214,7 +214,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
static int of_dev_phandle_match(struct device *dev, void *data)
{
phandle *ph = data;
return to_of_device(dev)->node->linux_phandle == *ph;
return to_of_device(dev)->node->phandle == *ph;
}
struct of_device *of_find_device_by_phandle(phandle ph)

View File

@ -224,7 +224,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
struct device_node *agp;
agp = of_find_compatible_node(NULL, NULL, "u3-agp");

File diff suppressed because it is too large Load Diff

View File

@ -341,7 +341,8 @@ static void __init mpc85xx_mds_pic_init(void)
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);

View File

@ -80,8 +80,8 @@ static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
printk(KERN_INFO "xes_mpc85xx: Enabling L2 as cache\n");
ctl = MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2I;
if (machine_is_compatible("MPC8540") ||
machine_is_compatible("MPC8560"))
if (of_machine_is_compatible("MPC8540") ||
of_machine_is_compatible("MPC8560"))
/*
* Assume L2 SRAM is used fully for cache, so set
* L2BLKSZ (bits 4:5) to match L2SIZ (bits 2:3).

View File

@ -48,7 +48,7 @@ static int __init cbe_powerbutton_init(void)
int ret = 0;
struct input_dev *dev;
if (!machine_is_compatible("IBM,CBPLUS-1.0")) {
if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) {
printk(KERN_ERR "%s: Not a cell blade.\n", __func__);
ret = -ENODEV;
goto out;

View File

@ -255,7 +255,7 @@ static int __init cbe_sysreset_init(void)
{
struct cbe_pmd_regs __iomem *regs;
sysreset_hack = machine_is_compatible("IBM,CBPLUS-1.0");
sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0");
if (!sysreset_hack)
return 0;

View File

@ -457,7 +457,7 @@ neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
continue;
vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
for (i=0; i < (lenp / sizeof(phandle)); i++) {
if (vic_handles[i] == target->linux_phandle)
if (vic_handles[i] == target->phandle)
return spu;
}
}
@ -499,7 +499,7 @@ static void init_affinity_node(int cbe)
if (strcmp(name, "spe") == 0) {
spu = devnode_spu(cbe, vic_dn);
avoid_ph = last_spu_dn->linux_phandle;
avoid_ph = last_spu_dn->phandle;
} else {
/*
* "mic-tm" and "bif0" nodes do not have
@ -514,7 +514,7 @@ static void init_affinity_node(int cbe)
last_spu->has_mem_affinity = 1;
spu->has_mem_affinity = 1;
}
avoid_ph = vic_dn->linux_phandle;
avoid_ph = vic_dn->phandle;
}
list_add_tail(&spu->aff_list, &last_spu->aff_list);

View File

@ -304,8 +304,8 @@ static struct cpufreq_driver pas_cpufreq_driver = {
static int __init pas_cpufreq_init(void)
{
if (!machine_is_compatible("PA6T-1682M") &&
!machine_is_compatible("pasemi,pwrficient"))
if (!of_machine_is_compatible("PA6T-1682M") &&
!of_machine_is_compatible("pasemi,pwrficient"))
return -ENODEV;
return cpufreq_register_driver(&pas_cpufreq_driver);

View File

@ -657,31 +657,31 @@ static int __init pmac_cpufreq_setup(void)
cur_freq = (*value) / 1000;
/* Check for 7447A based MacRISC3 */
if (machine_is_compatible("MacRISC3") &&
if (of_machine_is_compatible("MacRISC3") &&
of_get_property(cpunode, "dynamic-power-step", NULL) &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
/* Check for other MacRISC3 machines */
} else if (machine_is_compatible("PowerBook3,4") ||
machine_is_compatible("PowerBook3,5") ||
machine_is_compatible("MacRISC3")) {
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5") ||
of_machine_is_compatible("MacRISC3")) {
pmac_cpufreq_init_MacRISC3(cpunode);
/* Else check for iBook2 500/600 */
} else if (machine_is_compatible("PowerBook4,1")) {
} else if (of_machine_is_compatible("PowerBook4,1")) {
hi_freq = cur_freq;
low_freq = 400000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for TiPb 550 */
else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
hi_freq = cur_freq;
low_freq = 500000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for TiPb 400 & 500 */
else if (machine_is_compatible("PowerBook3,2")) {
else if (of_machine_is_compatible("PowerBook3,2")) {
/* We only know about the 400 MHz and the 500Mhz model
* they both have 300 MHz as low frequency
*/

View File

@ -398,11 +398,11 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
int rc = -ENODEV;
/* Check supported platforms */
if (machine_is_compatible("PowerMac8,1") ||
machine_is_compatible("PowerMac8,2") ||
machine_is_compatible("PowerMac9,1"))
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
of_machine_is_compatible("PowerMac9,1"))
use_volts_smu = 1;
else if (machine_is_compatible("PowerMac11,2"))
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
else
return -ENODEV;
@ -729,9 +729,9 @@ static int __init g5_cpufreq_init(void)
return -ENODEV;
}
if (machine_is_compatible("PowerMac7,2") ||
machine_is_compatible("PowerMac7,3") ||
machine_is_compatible("RackMac3,1"))
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
rc = g5_pm72_cpufreq_init(cpus);
#ifdef CONFIG_PMAC_SMU
else

View File

@ -2426,7 +2426,7 @@ static int __init probe_motherboard(void)
}
}
for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) {
if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) {
pmac_mb = pmac_mb_defs[i];
goto found;
}

View File

@ -842,7 +842,7 @@ struct pmf_function *__pmf_find_function(struct device_node *target,
list_for_each_entry(func, &dev->functions, link) {
if (name && strcmp(name, func->name))
continue;
if (func->phandle && target->node != func->phandle)
if (func->phandle && target->phandle != func->phandle)
continue;
if ((func->flags & flags) == 0)
continue;

View File

@ -693,9 +693,9 @@ static void __init smp_core99_setup(int ncpus)
#ifdef CONFIG_PPC64
/* i2c based HW sync on some G5s */
if (machine_is_compatible("PowerMac7,2") ||
machine_is_compatible("PowerMac7,3") ||
machine_is_compatible("RackMac3,1"))
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
smp_core99_setup_i2c_hwsync(ncpus);
/* pfunc based HW sync on recent G5s */
@ -713,7 +713,7 @@ static void __init smp_core99_setup(int ncpus)
#else /* CONFIG_PPC64 */
/* GPIO based HW sync on ppc32 Core99 */
if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) {
if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
struct device_node *cpu;
const u32 *tbprop = NULL;
@ -750,7 +750,7 @@ static void __init smp_core99_setup(int ncpus)
#endif
/* 32 bits SMP can't NAP */
if (!machine_is_compatible("MacRISC4"))
if (!of_machine_is_compatible("MacRISC4"))
powersave_nap = 0;
}
@ -852,7 +852,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
/* If we didn't start the second CPU, we must take
* it off the bus
*/
if (machine_is_compatible("MacRISC4") &&
if (of_machine_is_compatible("MacRISC4") &&
num_online_cpus() < 2)
g5_phy_disable_cpu1();
#endif /* CONFIG_PPC64 */

View File

@ -317,9 +317,9 @@ void __init pmac_calibrate_decr(void)
* calibration. That's better since the VIA itself seems
* to be slightly off. --BenH
*/
if (!machine_is_compatible("MacRISC2") &&
!machine_is_compatible("MacRISC3") &&
!machine_is_compatible("MacRISC4"))
if (!of_machine_is_compatible("MacRISC2") &&
!of_machine_is_compatible("MacRISC3") &&
!of_machine_is_compatible("MacRISC4"))
if (via_calibrate_decr())
return;
@ -328,7 +328,7 @@ void __init pmac_calibrate_decr(void)
* probably implement calibration based on the KL timer on these
* machines anyway... -BenH
*/
if (machine_is_compatible("PowerMac3,5"))
if (of_machine_is_compatible("PowerMac3,5"))
if (via_calibrate_decr())
return;
#endif

View File

@ -132,9 +132,9 @@ void udbg_scc_init(int force_scc)
scc_inittab[1] = in_8(sccc);
out_8(sccc, 12);
scc_inittab[3] = in_8(sccc);
} else if (machine_is_compatible("RackMac1,1")
|| machine_is_compatible("RackMac1,2")
|| machine_is_compatible("MacRISC4")) {
} else if (of_machine_is_compatible("RackMac1,1")
|| of_machine_is_compatible("RackMac1,2")
|| of_machine_is_compatible("MacRISC4")) {
/* Xserves and G5s default to 57600 */
scc_inittab[1] = 0;
scc_inittab[3] = 0;

View File

@ -56,9 +56,9 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (machine_is_compatible("PowerMac1,1"))
if (of_machine_is_compatible("PowerMac1,1"))
ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
if (machine_is_compatible("AAPL,PowerBook1998"))
if (of_machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
grackle_set_stg(hose, 1);

View File

@ -53,8 +53,8 @@ struct stat {
ino_t st_ino;
mode_t st_mode;
short st_nlink;
uid_t st_uid;
gid_t st_gid;
uid16_t st_uid;
gid16_t st_gid;
unsigned short st_rdev;
off_t st_size;
time_t st_atime;

View File

@ -59,7 +59,7 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
cur_inst = 0;
for_each_node_by_type(dp, "cpu") {
int err = check_cpu_node(dp->node, &cur_inst,
int err = check_cpu_node(dp->phandle, &cur_inst,
compare, compare_arg,
prom_node, mid);
if (!err) {

View File

@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
{
unsigned long base = (unsigned long) tp;
/* Stack pointer must be 16-byte aligned. */
if (sp & (16UL - 1))
return false;
if (sp >= (base + sizeof(struct thread_info)) &&
sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
return true;

View File

@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
static int of_bus_ambapp_match(struct device_node *np)
{
return !strcmp(np->name, "ambapp");
return !strcmp(np->type, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
@ -433,7 +433,7 @@ build_resources:
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->node);
dev_set_name(&op->dev, "%08x", dp->phandle);
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",

View File

@ -676,7 +676,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->node);
dev_set_name(&op->dev, "%08x", dp->phandle);
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",

View File

@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct pci_bus *bus, int devfn)
{
struct dev_archdata *sd;
struct pci_slot *slot;
struct of_device *op;
struct pci_dev *dev;
const char *type;
@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = slot;
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
dev->device = of_getintprop_default(node, "device-id", 0xffff);
@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
if (!strcmp(node->name, "pci")) {
/* a PCI-PCI bridge */

View File

@ -4,9 +4,6 @@
#include <linux/spinlock.h>
#include <asm/prom.h>
extern struct device_node *allnodes; /* temporary while merging */
extern rwlock_t devtree_lock; /* temporary while merging */
extern void * prom_early_alloc(unsigned long size);
extern void irq_trans_init(struct device_node *dp);

View File

@ -37,18 +37,6 @@ EXPORT_SYMBOL(of_console_path);
char *of_console_options;
EXPORT_SYMBOL(of_console_options);
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
for (np = allnodes; np; np = np->allnext)
if (np->node == handle)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
@ -89,7 +77,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *old_val = prop->value;
int ret;
ret = prom_setprop(dp->node, name, val, len);
ret = prom_setprop(dp->phandle, name, val, len);
err = -EINVAL;
if (ret >= 0) {
@ -236,7 +224,7 @@ static struct device_node * __init prom_create_node(phandle node,
dp->name = get_one_property(node, "name");
dp->type = get_one_property(node, "device_type");
dp->node = node;
dp->phandle = node;
dp->properties = build_prop_list(node);
@ -313,7 +301,7 @@ void __init prom_build_devicetree(void)
nextp = &allnodes->allnext;
allnodes->child = prom_build_tree(allnodes,
prom_getchild(allnodes->node),
prom_getchild(allnodes->phandle),
&nextp);
of_console_init();

View File

@ -370,7 +370,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
} else {
struct device_node *dp = of_find_node_by_cpuid(cpu);
prom_startcpu(dp->node, entry, cookie);
prom_startcpu(dp->phandle, entry, cookie);
}
for (timeout = 0; timeout < 50000; timeout++) {

View File

@ -191,10 +191,12 @@ tsb_dtlb_load:
tsb_itlb_load:
/* Executable bit must be set. */
661: andcc %g5, _PAGE_EXEC_4U, %g0
.section .sun4v_1insn_patch, "ax"
661: sethi %hi(_PAGE_EXEC_4U), %g4
andcc %g5, %g4, %g0
.section .sun4v_2insn_patch, "ax"
.word 661b
andcc %g5, _PAGE_EXEC_4V, %g0
nop
.previous
be,pn %xcc, tsb_do_fault

View File

@ -450,6 +450,8 @@ struct thread_struct {
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
unsigned long debugreg6;
/* Keep track of the exact dr7 value set by the user */
unsigned long ptrace_dr7;
/* Fault info: */
unsigned long cr2;
unsigned long trap_no;

View File

@ -1342,14 +1342,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
},
},
{
.callback = force_acpi_ht,
.ident = "ASUS P2B-DS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
},
},
{
.callback = force_acpi_ht,
.ident = "ASUS CUR-DLS",

View File

@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
/*
* Store a breakpoint's encoded address, length, and type.
*/
static int arch_store_info(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
/*
* For kernel-addresses, either the address or symbol name can be
* specified.
*/
if (info->name)
info->address = (unsigned long)
kallsyms_lookup_name(info->name);
if (info->address)
return 0;
return -EINVAL;
}
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
return ret;
}
ret = arch_store_info(bp);
if (ret < 0)
return ret;
/*
* For kernel-addresses, either the address or symbol name can be
* specified.
*/
if (info->name)
info->address = (unsigned long)
kallsyms_lookup_name(info->name);
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.

View File

@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
} else if (n == 6) {
val = thread->debugreg6;
} else if (n == 7) {
val = ptrace_get_dr7(thread->ptrace_bps);
val = thread->ptrace_dr7;
}
return val;
}
@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
return rc;
}
/* All that's left is DR7 */
if (n == 7)
if (n == 7) {
rc = ptrace_write_dr7(tsk, val);
if (!rc)
thread->ptrace_dr7 = val;
}
ret_path:
return rc;

View File

@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
*/
static inline bool queue_should_plug(struct request_queue *q)
{
return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}
static int __make_request(struct request_queue *q, struct bio *bio)
@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
if (blk_account_rq(rq)) {
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
/*
* Mark this device as supporting hardware queuing, if
* we have more IOs in flight than 4.
*/
if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
}
}
/**

View File

@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
struct platform_device *dd;
id = dock_station_count;
memset(&ds, 0, sizeof(ds));
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);

View File

@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
{ set_max_cstate, "Pavilion zv5000", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
(void *)1},
{ set_max_cstate, "Asus L8400B", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
{},
};
@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
}
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
}
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;

View File

@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
return status;
}
static int early_pdc_done;
void acpi_processor_set_pdc(acpi_handle handle)
{
struct acpi_object_list *obj_list;
@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
if (arch_has_acpi_pdc() == false)
return;
if (early_pdc_done)
return;
obj_list = acpi_processor_alloc_pdc();
if (!obj_list)
return;
@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
return 0;
}
static int param_early_pdc_optin(char *s)
{
early_pdc_optin = 1;
return 1;
}
__setup("acpi_early_pdc_eval", param_early_pdc_optin);
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
{
set_early_pdc_optin, "HP Envy", {
@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
early_pdc_done = 1;
}

View File

@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
if (result)
goto update_bios;
return 0;
/* We need to call _PPC once when cpufreq starts */
if (ignore_ppc != 1)
result = acpi_processor_get_platform_limit(pr);
return result;
/*
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that

View File

@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
if (child)
*child = device;
return 0;
if (device)
return 0;
else
return -ENODEV;
}
/*
* acpi_bus_add and acpi_bus_start
*
* scan a given ACPI tree and (probably recently hot-plugged)
* create and add or starts found devices.
*
* If no devices were found -ENODEV is returned which does not
* mean that this is a real error, there just have been no suitable
* ACPI objects in the table trunk from which the kernel could create
* a device and add/start an appropriate driver.
*/
int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
acpi_bus_scan(handle, &ops, child);
return 0;
return acpi_bus_scan(handle, &ops, child);
}
EXPORT_SYMBOL(acpi_bus_add);
@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
if (!device)
return -EINVAL;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
acpi_bus_scan(device->handle, &ops, NULL);
return 0;
return acpi_bus_scan(device->handle, &ops, NULL);
}
EXPORT_SYMBOL(acpi_bus_start);

View File

@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
if (acpi_disabled)
if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
if (acpi_disabled)
if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)

View File

@ -3082,8 +3082,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
/* Auto-activate optimization is supposed to be supported on
all AHCI controllers indicating NCQ support, but it seems
to be broken at least on some NVIDIA MCP79 chipsets.
Until we get info on which NVIDIA chipsets don't have this
issue, if any, disable AA on all NVIDIA AHCIs. */
if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
pi.flags |= ATA_FLAG_FPDMA_AA;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;

View File

@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
kfree(cp);
}
static struct sysfs_ops class_sysfs_ops = {

View File

@ -243,10 +243,12 @@ static int index_to_minor(int index)
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
int err;
u64 cap;
u32 v;
u32 blk_size, sg_elems;
u32 v, blk_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
if (index_to_minor(index) >= 1 << MINORBITS)
return -ENOSPC;
@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_mempool;
}
vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
if (!vblk->disk->queue) {
q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
}
vblk->disk->queue->queuedata = vblk;
q->queuedata = vblk;
if (index < 26) {
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
/* If barriers are supported, tell block layer that queue is ordered */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
virtblk_prepare_flush);
else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
set_capacity(vblk->disk, cap);
/* We can handle whatever the host told us to handle. */
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_phys_segments(q, vblk->sg_elems-2);
blk_queue_max_hw_segments(q, vblk->sg_elems-2);
/* No need to bounce any requests */
blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
/* No real sector limit. */
blk_queue_max_sectors(vblk->disk->queue, -1U);
blk_queue_max_sectors(q, -1U);
/* Host can optionally specify maximum segment size and number of
* segments. */
@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, size_max),
&v);
if (!err)
blk_queue_max_segment_size(vblk->disk->queue, v);
blk_queue_max_segment_size(q, v);
else
blk_queue_max_segment_size(vblk->disk->queue, -1U);
blk_queue_max_segment_size(q, -1U);
/* Host can optionally specify the block size of the device */
err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
blk_queue_logical_block_size(vblk->disk->queue, blk_size);
blk_queue_logical_block_size(q, blk_size);
else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
offsetof(struct virtio_blk_config, physical_block_exp),
&physical_block_exp);
if (!err && physical_block_exp)
blk_queue_physical_block_size(q,
blk_size * (1 << physical_block_exp));
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
offsetof(struct virtio_blk_config, alignment_offset),
&alignment_offset);
if (!err && alignment_offset)
blk_queue_alignment_offset(q, blk_size * alignment_offset);
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
offsetof(struct virtio_blk_config, min_io_size),
&min_io_size);
if (!err && min_io_size)
blk_queue_io_min(q, blk_size * min_io_size);
err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
offsetof(struct virtio_blk_config, opt_io_size),
&opt_io_size);
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
add_disk(vblk->disk);
return 0;
@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
};
/*

View File

@ -666,6 +666,14 @@ config VIRTIO_CONSOLE
help
Virtio console for use with lguest and other hypervisors.
Also serves as a general-purpose serial device for data
transfer between the guest and host. Character devices at
/dev/vportNpn will be created when corresponding ports are
found, where N is the device number and n is the port number
within that device. If specified by the host, a sysfs
attribute called 'name' will be populated with a name for
the port which can be used by udev scripts to create a
symlink to the device.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"

View File

@ -99,7 +99,7 @@ static int hvc_beat_config(char *p)
static int __init hvc_beat_console_init(void)
{
if (hvc_beat_useit && machine_is_compatible("Beat")) {
if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
}
return 0;

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@
#define DRV_NAME "cs5535-clockevt"
static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
static int timer_irq;
module_param_named(irq, timer_irq, int, 0644);
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");

View File

@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
/*
* EDID is delightfully ambiguous about how interlaced modes are to be
* encoded. Our internal representation is of frame height, but some
* HDTV detailed timings are encoded as field height.
*
* The format list here is from CEA, in frame size. Technically we
* should be checking refresh rate too. Whatever.
*/
static void
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
struct detailed_pixel_timing *pt)
{
int i;
static const struct {
int w, h;
} cea_interlaced[] = {
{ 1920, 1080 },
{ 720, 480 },
{ 1440, 480 },
{ 2880, 480 },
{ 720, 576 },
{ 1440, 576 },
{ 2880, 576 },
};
static const int n_sizes =
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < n_sizes; i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
mode->vsync_start *= 2;
mode->vsync_end *= 2;
mode->vtotal *= 2;
mode->vtotal |= 1;
}
}
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;

View File

@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
return 0;
}
static void i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_opregion_free(dev, 1);
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
return 0;
}
static int i915_suspend(struct drm_device *dev, pm_message_t state)
@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
if (error)
return error;
i915_drm_suspend(dev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
i915_restore_state(dev);
intel_opregion_init(dev, 1);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
i915_restore_state(dev);
intel_opregion_init(dev, 1);
return i915_drm_thaw(dev);
}
@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
if (error)
return error;
i915_drm_suspend(drm_dev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
error = i915_drm_freeze(drm_dev);
if (!error)
i915_drm_suspend(drm_dev);
return error;
return i915_drm_freeze(drm_dev);
}
const struct dev_pm_ops i915_pm_ops = {

View File

@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
},
},
{
.ident = "Clevo M5x0N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
},
},
{ }
};

View File

@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
unsigned long flags;
spin_lock_irqsave(&bios->lock, flags);
mutex_lock(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
spin_unlock_irqrestore(&bios->lock, flags);
mutex_unlock(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
spin_lock_init(&bios->lock);
mutex_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))

View File

@ -205,7 +205,7 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
spinlock_t lock;
struct mutex lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;

View File

@ -583,6 +583,7 @@ struct drm_nouveau_private {
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
uint64_t vram_sys_base;
/* the mtrr covering the FB */
int fb_mtrr;

View File

@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
uint32_t flags, uint64_t phys)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj **pgt;
unsigned psz, pfl, pages;
struct nouveau_gpuobj *pgt;
unsigned block;
int i;
if (virt >= dev_priv->vm_gart_base &&
(virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
psz = 12;
pgt = &dev_priv->gart_info.sg_ctxdma;
pfl = 0x21;
virt -= dev_priv->vm_gart_base;
} else
if (virt >= dev_priv->vm_vram_base &&
(virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
psz = 16;
pgt = dev_priv->vm_vram_pt;
pfl = 0x01;
virt -= dev_priv->vm_vram_base;
} else {
NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
virt, virt + size - 1);
return -EINVAL;
virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
size = (size >> 16) << 1;
phys |= ((uint64_t)flags << 32);
phys |= 1;
if (dev_priv->vram_sys_base) {
phys += dev_priv->vram_sys_base;
phys |= 0x30;
}
pages = size >> psz;
dev_priv->engine.instmem.prepare_access(dev, true);
if (flags & 0x80000000) {
while (pages--) {
struct nouveau_gpuobj *pt = pgt[virt >> 29];
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
while (size) {
unsigned offset_h = upper_32_bits(phys);
unsigned offset_l = lower_32_bits(phys);
unsigned pte, end;
nv_wo32(dev, pt, pte++, 0x00000000);
nv_wo32(dev, pt, pte++, 0x00000000);
virt += (1 << psz);
for (i = 7; i >= 0; i--) {
block = 1 << (i + 1);
if (size >= block && !(virt & (block - 1)))
break;
}
} else {
while (pages--) {
struct nouveau_gpuobj *pt = pgt[virt >> 29];
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
unsigned offset_h = upper_32_bits(phys) & 0xff;
unsigned offset_l = lower_32_bits(phys);
offset_l |= (i << 7);
nv_wo32(dev, pt, pte++, offset_l | pfl);
nv_wo32(dev, pt, pte++, offset_h | flags);
phys += block << 15;
size -= block;
phys += (1 << psz);
virt += (1 << psz);
while (block) {
pgt = dev_priv->vm_vram_pt[virt >> 14];
pte = virt & 0x3ffe;
end = pte + block;
if (end > 16384)
end = 16384;
block -= (end - pte);
virt += (end - pte);
while (pte < end) {
nv_wo32(dev, pgt, pte++, offset_l);
nv_wo32(dev, pgt, pte++, offset_h);
}
}
}
dev_priv->engine.instmem.finish_access(dev);
@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
void
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
{
nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *pgt;
unsigned pages, pte, end;
virt -= dev_priv->vm_vram_base;
pages = (size >> 16) << 1;
dev_priv->engine.instmem.prepare_access(dev, true);
while (pages) {
pgt = dev_priv->vm_vram_pt[virt >> 29];
pte = (virt & 0x1ffe0000ULL) >> 15;
end = pte + pages;
if (end > 16384)
end = 16384;
pages -= (end - pte);
virt += (end - pte) << 15;
while (pte < end)
nv_wo32(dev, pgt, pte++, 0);
}
dev_priv->engine.instmem.finish_access(dev);
nv_wr32(dev, 0x100c80, 0x00050001);
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
return;
}
nv_wr32(dev, 0x100c80, 0x00000001);
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
}
}
/*

View File

@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
uint8_t saved_seq1, saved_pi, saved_rpc1;
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
int i;
@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
/* only implemented for head A for now */
NVSetOwner(dev, 0);
saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
@ -203,6 +206,7 @@ out:
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
NV_INFO(dev, "Load detected on head A\n");

View File

@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,

View File

@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
else
dev_priv->vram_sys_base = 0;
/* Reserve the last MiB of VRAM, we should probably try to avoid
* setting up the below tables over the top of the VBIOS image at
* some point.
@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
* We map the entire fake channel into the start of the PRAMIN BAR
*/
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
0, &priv->pramin_pt);
0, &priv->pramin_pt);
if (ret)
return ret;
for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
if (v < (c_offset + c_size))
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
else
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
v = c_offset | 1;
if (dev_priv->vram_sys_base) {
v += dev_priv->vram_sys_base;
v |= 0x30;
}
i = 0;
while (v < dev_priv->vram_sys_base + c_offset + c_size) {
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
v += 0x1000;
i += 8;
}
while (i < pt_size) {
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
i += 8;
}
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
uint32_t pte, pte_end, vram;
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
uint32_t pte, pte_end;
uint64_t vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
pte = (gpuobj->im_pramin->start >> 12) << 3;
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
vram |= 1;
if (dev_priv->vram_sys_base) {
vram += dev_priv->vram_sys_base;
vram |= 0x30;
}
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
pte += 8;
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
dev_priv->engine.instmem.finish_access(dev);
@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
pte = (gpuobj->im_pramin->start >> 12) << 3;
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
pte += 8;
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
dev_priv->engine.instmem.finish_access(dev);

View File

@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
uint8_t count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
schedule_timeout_uninterruptible(usecs_to_jiffies(count));
udelay(count);
else
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
}

View File

@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}

View File

@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
dev_priv->r600_max_backends,
(0xff << dev_priv->r600_max_backends) & 0xff);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
dev_priv->r600_max_backends,
(0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =

View File

@ -96,6 +96,7 @@ extern int radeon_audio;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/
struct radeon_ib {
struct list_head list;
unsigned long idx;
unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
uint32_t *ptr;
uint32_t *ptr;
uint32_t length_dw;
bool free;
};
/*
@ -377,10 +379,9 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
unsigned head_id;
};
struct radeon_cp {

View File

@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* Asrock RS600 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x1849) &&
(dev->pdev->subsystem_device == 0x7941)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&

View File

@ -1279,47 +1279,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
rdev->mode_info.connector_table = radeon_connector_table;
if (rdev->mode_info.connector_table == CT_NONE) {
#ifdef CONFIG_PPC_PMAC
if (machine_is_compatible("PowerBook3,3")) {
if (of_machine_is_compatible("PowerBook3,3")) {
/* powerbook with VGA */
rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
} else if (machine_is_compatible("PowerBook3,4") ||
machine_is_compatible("PowerBook3,5")) {
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5")) {
/* powerbook with internal tmds */
rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
} else if (machine_is_compatible("PowerBook5,1") ||
machine_is_compatible("PowerBook5,2") ||
machine_is_compatible("PowerBook5,3") ||
machine_is_compatible("PowerBook5,4") ||
machine_is_compatible("PowerBook5,5")) {
} else if (of_machine_is_compatible("PowerBook5,1") ||
of_machine_is_compatible("PowerBook5,2") ||
of_machine_is_compatible("PowerBook5,3") ||
of_machine_is_compatible("PowerBook5,4") ||
of_machine_is_compatible("PowerBook5,5")) {
/* powerbook with external single link tmds (sil164) */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
} else if (machine_is_compatible("PowerBook5,6")) {
} else if (of_machine_is_compatible("PowerBook5,6")) {
/* powerbook with external dual or single link tmds */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
} else if (machine_is_compatible("PowerBook5,7") ||
machine_is_compatible("PowerBook5,8") ||
machine_is_compatible("PowerBook5,9")) {
} else if (of_machine_is_compatible("PowerBook5,7") ||
of_machine_is_compatible("PowerBook5,8") ||
of_machine_is_compatible("PowerBook5,9")) {
/* PowerBook6,2 ? */
/* powerbook with external dual link tmds (sil1178?) */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
} else if (machine_is_compatible("PowerBook4,1") ||
machine_is_compatible("PowerBook4,2") ||
machine_is_compatible("PowerBook4,3") ||
machine_is_compatible("PowerBook6,3") ||
machine_is_compatible("PowerBook6,5") ||
machine_is_compatible("PowerBook6,7")) {
} else if (of_machine_is_compatible("PowerBook4,1") ||
of_machine_is_compatible("PowerBook4,2") ||
of_machine_is_compatible("PowerBook4,3") ||
of_machine_is_compatible("PowerBook6,3") ||
of_machine_is_compatible("PowerBook6,5") ||
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
} else if (machine_is_compatible("PowerMac4,4")) {
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
} else if (machine_is_compatible("PowerMac10,1")) {
} else if (of_machine_is_compatible("PowerMac10,1")) {
/* mini with internal tmds */
rdev->mode_info.connector_table = CT_MINI_INTERNAL;
} else if (machine_is_compatible("PowerMac10,2")) {
} else if (of_machine_is_compatible("PowerMac10,2")) {
/* mini with external tmds */
rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
} else if (machine_is_compatible("PowerMac12,1")) {
} else if (of_machine_is_compatible("PowerMac12,1")) {
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;

View File

@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
if (radeon_connector->shared_ddc && connector_status_connected) {
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
}
if (radeon_connector->ddc_bus && i2c_bus->valid) {
if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
sizeof(struct radeon_i2c_bus_rec)) == 0) {
if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
radeon_connector->shared_ddc = true;
shared_ddc = true;
}

View File

@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
&p->validated);
}
}
return radeon_bo_list_validate(&p->validated, p->ib->fence);
return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (error && parser->ib) {
radeon_bo_list_unvalidate(&parser->validated,
parser->ib->fence);
} else {
radeon_bo_list_unreserve(&parser->validated);
if (!error && parser->ib) {
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
mutex_lock(&parser->rdev->ddev->struct_mutex);

View File

@ -106,9 +106,10 @@
* 1.29- R500 3D cmd buffer support
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 31
#define DRIVER_MINOR 32
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {

View File

@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
}
}
int radeon_bo_list_validate(struct list_head *head, void *fence)
int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
r = radeon_bo_list_reserve(head);
@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
if (fence) {
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
bo->tbo.sync_obj = radeon_fence_ref(fence);
bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
}
}
return 0;
}
void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
void radeon_bo_list_fence(struct list_head *head, void *fence)
{
struct radeon_bo_list *lobj;
struct radeon_fence *old_fence;
struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
if (fence)
list_for_each_entry(lobj, head, list) {
old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
if (old_fence == fence) {
lobj->bo->tbo.sync_obj = NULL;
radeon_fence_unref(&old_fence);
}
list_for_each_entry(lobj, head, list) {
bo = lobj->bo;
spin_lock(&bo->tbo.lock);
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
bo->tbo.sync_obj = radeon_fence_ref(fence);
bo->tbo.sync_obj_arg = NULL;
spin_unlock(&bo->tbo.lock);
if (old_fence) {
radeon_fence_unref(&old_fence);
}
radeon_bo_list_unreserve(head);
}
}
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,

View File

@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
extern int radeon_bo_list_validate(struct list_head *head);
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,

View File

@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
unsigned long i;
int r = 0;
int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
DRM_ERROR("failed to create fence for new IB\n");
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (i < RADEON_IB_POOL_SIZE) {
set_bit(i, rdev->ib_pool.alloc_bm);
rdev->ib_pool.ibs[i].length_dw = 0;
*ib = &rdev->ib_pool.ibs[i];
mutex_unlock(&rdev->ib_pool.mutex);
goto out;
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
i &= (RADEON_IB_POOL_SIZE - 1);
if (rdev->ib_pool.ibs[i].free) {
nib = &rdev->ib_pool.ibs[i];
break;
}
}
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
/* we go do nothings here */
if (nib == NULL) {
/* This should never happen, it means we allocated all
* IB and haven't scheduled one yet, return EBUSY to
* userspace hoping that on ioctl recall we get better
* luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("all IB allocated none scheduled.\n");
r = -EINVAL;
goto out;
radeon_fence_unref(&fence);
return -EBUSY;
}
/* get the first ib on the scheduled list */
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
struct radeon_ib, list);
if (nib->fence == NULL) {
/* we go do nothings here */
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib->free = false;
if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
r = -EINVAL;
goto out;
}
mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false);
if (r) {
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
(unsigned long)nib->gpu_addr, nib->length_dw);
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
goto out;
r = radeon_fence_wait(nib->fence, false);
if (r) {
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
mutex_lock(&rdev->ib_pool.mutex);
nib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
}
radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0;
/* scheduled list is accessed here */
mutex_lock(&rdev->ib_pool.mutex);
list_del(&nib->list);
INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib;
out:
if (r) {
radeon_fence_unref(&fence);
} else {
(*ib)->fence = fence;
}
return r;
return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
/* IB is scheduled & not signaled don't do anythings */
mutex_unlock(&rdev->ib_pool.mutex);
return;
}
list_del(&tmp->list);
INIT_LIST_HEAD(&tmp->list);
if (tmp->fence)
if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
tmp->length_dw = 0;
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
mutex_lock(&rdev->ib_pool.mutex);
tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
/* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
rdev->ib_pool.ibs[i].free = true;
}
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) {
return 0;
}
seq_printf(m, "IB %04lu\n", ib->idx);
seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {

View File

@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
rdev->config.rv770.max_backends,
(0xff << rdev->config.rv770.max_backends) & 0xff);
if (rdev->family == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
rdev->config.rv770.max_backends,
(0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =

View File

@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_state)
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
int ret = 0;
if (PageHighMem(p))
return 0;
if (get_page_memtype(p) != -1) {
if (c_old != tt_cached) {
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
return ret;
}
if (c_state == tt_wc)
if (c_new == tt_wc)
ret = set_memory_wc((unsigned long) page_address(p), 1);
else if (c_state == tt_uncached)
else if (c_new == tt_uncached)
ret = set_pages_uc(p, 1);
return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_state)
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
return 0;
}
@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
ret = ttm_tt_set_page_caching(cur_page, c_state);
ret = ttm_tt_set_page_caching(cur_page,
ttm->caching_state,
c_state);
if (unlikely(ret != 0))
goto out_err;
}
@ -268,7 +272,7 @@ out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
(void)ttm_tt_set_page_caching(cur_page,
(void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}

View File

@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
*/
DRM_INFO("It appears like vesafb is loaded. "
"Ignore above error if any. Entering stealth mode.\n");
"Ignore above error if any.\n");
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
if (unlikely(ret != 0)) {
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
goto out_no_device;
}
vmw_kms_init(dev_priv);
vmw_overlay_init(dev_priv);
} else {
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
goto out_no_device;
vmw_kms_init(dev_priv);
vmw_overlay_init(dev_priv);
vmw_fb_init(dev_priv);
}
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
goto out_no_device;
vmw_kms_init(dev_priv);
vmw_overlay_init(dev_priv);
vmw_fb_init(dev_priv);
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
if (!dev_priv->stealth) {
vmw_fb_close(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_release_device(dev_priv);
pci_release_regions(dev->pdev);
} else {
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_fb_close(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_release_device(dev_priv);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
}
else
pci_release_regions(dev->pdev);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev->devname == vmw_devname)
@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
int ret = 0;
DRM_INFO("Master set.\n");
if (dev_priv->stealth) {
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
return ret;
}
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
if (dev_priv->stealth) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_release_device(dev_priv);
}
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
if (!dev_priv->stealth)
vmw_fb_on(dev_priv);
vmw_fb_on(dev_priv);
}

View File

@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p)
{
uint32_t handle;
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
uint32_t cur_validate_node;
struct ttm_validate_buffer *val_buf;
int ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
handle = cmd->dma.guest.ptr.gmrId;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number of DMA commands per submission"
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = &cmd->dma.guest.ptr;
reloc->location = ptr;
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
++sw_context->cur_val_buf;
}
*vmw_bo_p = vmw_bo;
return 0;
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_dmabuf_unreference(&vmw_bo);
return 0;
}
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_dmabuf_unreference(&vmw_bo);
return 0;
}
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
bo = &vmw_bo->base;
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
cmd->dma.host.sid, &srf);
if (ret) {
@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
&vmw_cmd_blt_surf_screen_check)

View File

@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
info->aperture_base = vmw_priv->vram_start;
info->aperture_size = vmw_priv->vram_size;
/*
* Dirty & Deferred IO
*/

View File

@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
/* if target is default */
if (!strncmp(kbuf, "default", 7))
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,

View File

@ -55,6 +55,12 @@ source "drivers/hid/usbhid/Kconfig"
menu "Special HID drivers"
depends on HID
config HID_3M_PCT
tristate "3M PCT"
depends on USB_HID
---help---
Support for 3M PCT touch screens.
config HID_A4TECH
tristate "A4 tech" if EMBEDDED
depends on USB_HID
@ -183,6 +189,23 @@ config LOGIRUMBLEPAD2_FF
Say Y here if you want to enable force feedback support for Logitech
Rumblepad 2 devices.
config LOGIG940_FF
bool "Logitech Flight System G940 force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
help
Say Y here if you want to enable force feedback support for Logitech
Flight System G940 devices.
config HID_MAGICMOUSE
tristate "Apple MagicMouse multi-touch support"
depends on BT_HIDP
---help---
Support for the Apple Magic Mouse multi-touch.
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse.
config HID_MICROSOFT
tristate "Microsoft" if EMBEDDED
depends on USB_HID
@ -190,6 +213,12 @@ config HID_MICROSOFT
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
config HID_MOSART
tristate "MosArt"
depends on USB_HID
---help---
Support for MosArt dual-touch panels.
config HID_MONTEREY
tristate "Monterey" if EMBEDDED
depends on USB_HID
@ -198,11 +227,17 @@ config HID_MONTEREY
Support for Monterey Genius KB29E.
config HID_NTRIG
tristate "NTrig" if EMBEDDED
tristate "NTrig"
depends on USB_HID
---help---
Support for N-Trig touch screen.
config HID_ORTEK
tristate "Ortek" if EMBEDDED
depends on USB_HID
default !EMBEDDED
---help---
Support for N-Trig touch screen.
Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
config HID_PANTHERLORD
tristate "Pantherlord support" if EMBEDDED
@ -227,6 +262,12 @@ config HID_PETALYNX
---help---
Support for Petalynx Maxter remote control.
config HID_QUANTA
tristate "Quanta Optical Touch"
depends on USB_HID
---help---
Support for Quanta Optical Touch dual-touch panels.
config HID_SAMSUNG
tristate "Samsung" if EMBEDDED
depends on USB_HID
@ -241,6 +282,12 @@ config HID_SONY
---help---
Support for Sony PS3 controller.
config HID_STANTUM
tristate "Stantum"
depends on USB_HID
---help---
Support for Stantum multitouch panel.
config HID_SUNPLUS
tristate "Sunplus" if EMBEDDED
depends on USB_HID
@ -305,9 +352,8 @@ config THRUSTMASTER_FF
Rumble Force or Force Feedback Wheel.
config HID_WACOM
tristate "Wacom Bluetooth devices support" if EMBEDDED
tristate "Wacom Bluetooth devices support"
depends on BT_HIDP
default !EMBEDDED
---help---
Support for Wacom Graphire Bluetooth tablet.

Some files were not shown because too many files have changed in this diff Show More