alistair23-linux/arch/x86/kernel/devicetree.c

420 lines
8.4 KiB
C
Raw Normal View History

/*
* Architecture specific OF callbacks.
*/
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of_pci.h>
#include <linux/initrd.h>
#include <asm/hpet.h>
#include <asm/apic.h>
#include <asm/pci_x86.h>
__initdata u64 initial_dtb;
char __initdata cmd_line[COMMAND_LINE_SIZE];
int __initdata of_ioapic;
unsigned long pci_address_to_pio(phys_addr_t address)
{
/*
* The ioport address can be directly used by inX / outX
*/
BUG_ON(address >= (1 << 16));
return (unsigned long)address;
}
EXPORT_SYMBOL_GPL(pci_address_to_pio);
void __init early_init_dt_scan_chosen_arch(unsigned long node)
{
BUG();
}
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
BUG();
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init early_init_dt_setup_initrd_arch(unsigned long start,
unsigned long end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
initrd_below_start_ok = 1;
}
#endif
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
}
/*
* CE4100 ids. Will be moved to machine_device_initcall() once we have it.
*/
static struct of_device_id __initdata ce4100_ids[] = {
{ .compatible = "intel,ce4100-cp", },
{ .compatible = "isa", },
{ .compatible = "pci", },
{},
};
static int __init add_bus_probe(void)
{
if (!of_have_populated_dt())
return 0;
return of_platform_bus_probe(NULL, ce4100_ids, NULL);
}
module_init(add_bus_probe);
#ifdef CONFIG_PCI
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
struct device_node *np;
for_each_node_by_type(np, "pci") {
const void *prop;
unsigned int bus_min;
prop = of_get_property(np, "bus-range", NULL);
if (!prop)
continue;
bus_min = be32_to_cpup(prop);
if (bus->number == bus_min)
return np;
}
return NULL;
}
static int x86_of_pci_irq_enable(struct pci_dev *dev)
{
struct of_irq oirq;
u32 virq;
int ret;
u8 pin;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (ret)
return ret;
if (!pin)
return 0;
ret = of_irq_map_pci(dev, &oirq);
if (ret)
return ret;
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
if (virq == 0)
return -EINVAL;
dev->irq = virq;
return 0;
}
static void x86_of_pci_irq_disable(struct pci_dev *dev)
{
}
x86: delete __cpuinit usage from all x86 files The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) are flagged as __cpuinit -- so if we remove the __cpuinit from arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the arch/x86 uses of the __cpuinit macros from all C files. x86 only had the one __CPUINIT used in assembly files, and it wasn't paired off with a .previous or a __FINIT, so we can delete it directly w/o any corresponding additional change there. [1] https://lkml.org/lkml/2013/5/20/589 Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: x86@kernel.org Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 16:23:59 -06:00
void x86_of_pci_init(void)
{
pcibios_enable_irq = x86_of_pci_irq_enable;
pcibios_disable_irq = x86_of_pci_irq_disable;
}
#endif
static void __init dtb_setup_hpet(void)
{
#ifdef CONFIG_HPET_TIMER
struct device_node *dn;
struct resource r;
int ret;
dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet");
if (!dn)
return;
ret = of_address_to_resource(dn, 0, &r);
if (ret) {
WARN_ON(1);
return;
}
hpet_address = r.start;
#endif
}
static void __init dtb_lapic_setup(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
struct device_node *dn;
struct resource r;
int ret;
dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic");
if (!dn)
return;
ret = of_address_to_resource(dn, 0, &r);
if (WARN_ON(ret))
return;
/* Did the boot loader setup the local APIC ? */
if (!cpu_has_apic) {
if (apic_force_enable(r.start))
return;
}
smp_found_config = 1;
pic_mode = 1;
register_lapic_address(r.start);
generic_processor_info(boot_cpu_physical_apicid,
GET_APIC_VERSION(apic_read(APIC_LVR)));
#endif
}
#ifdef CONFIG_X86_IO_APIC
static unsigned int ioapic_id;
static void __init dtb_add_ioapic(struct device_node *dn)
{
struct resource r;
int ret;
ret = of_address_to_resource(dn, 0, &r);
if (ret) {
printk(KERN_ERR "Can't obtain address from node %s.\n",
dn->full_name);
return;
}
mp_register_ioapic(++ioapic_id, r.start, gsi_top);
}
static void __init dtb_ioapic_setup(void)
{
struct device_node *dn;
for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic")
dtb_add_ioapic(dn);
if (nr_ioapics) {
of_ioapic = 1;
return;
}
printk(KERN_ERR "Error: No information about IO-APIC in OF.\n");
}
#else
static void __init dtb_ioapic_setup(void) {}
#endif
static void __init dtb_apic_setup(void)
{
dtb_lapic_setup();
dtb_ioapic_setup();
}
#ifdef CONFIG_OF_FLATTREE
static void __init x86_flattree_get_config(void)
{
u32 size, map_len;
void *new_dtb;
if (!initial_dtb)
return;
map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK),
(u64)sizeof(struct boot_param_header));
initial_boot_params = early_memremap(initial_dtb, map_len);
size = be32_to_cpu(initial_boot_params->totalsize);
if (map_len < size) {
early_iounmap(initial_boot_params, map_len);
initial_boot_params = early_memremap(initial_dtb, size);
map_len = size;
}
new_dtb = alloc_bootmem(size);
memcpy(new_dtb, initial_boot_params, size);
early_iounmap(initial_boot_params, map_len);
initial_boot_params = new_dtb;
/* root level address cells */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
unflatten_device_tree();
}
#else
static inline void x86_flattree_get_config(void) { }
#endif
void __init x86_dtb_init(void)
{
x86_flattree_get_config();
if (!of_have_populated_dt())
return;
dtb_setup_hpet();
dtb_apic_setup();
}
#ifdef CONFIG_X86_IO_APIC
struct of_ioapic_type {
u32 out_type;
u32 trigger;
u32 polarity;
};
static struct of_ioapic_type of_ioapic_type[] =
{
{
.out_type = IRQ_TYPE_EDGE_RISING,
.trigger = IOAPIC_EDGE,
.polarity = 1,
},
{
.out_type = IRQ_TYPE_LEVEL_LOW,
.trigger = IOAPIC_LEVEL,
.polarity = 0,
},
{
.out_type = IRQ_TYPE_LEVEL_HIGH,
.trigger = IOAPIC_LEVEL,
.polarity = 1,
},
{
.out_type = IRQ_TYPE_EDGE_FALLING,
.trigger = IOAPIC_EDGE,
.polarity = 0,
},
};
static int ioapic_xlate(struct irq_domain *domain,
struct device_node *controller,
const u32 *intspec, u32 intsize,
irq_hw_number_t *out_hwirq, u32 *out_type)
{
struct io_apic_irq_attr attr;
struct of_ioapic_type *it;
u32 line, idx;
int rc;
if (WARN_ON(intsize < 2))
return -EINVAL;
line = intspec[0];
if (intspec[1] >= ARRAY_SIZE(of_ioapic_type))
return -EINVAL;
it = &of_ioapic_type[intspec[1]];
idx = (u32) domain->host_data;
set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity);
rc = io_apic_setup_irq_pin_once(irq_find_mapping(domain, line),
cpu_to_node(0), &attr);
if (rc)
return rc;
*out_hwirq = line;
*out_type = it->out_type;
return 0;
}
const struct irq_domain_ops ioapic_irq_domain_ops = {
.xlate = ioapic_xlate,
};
static void dt_add_ioapic_domain(unsigned int ioapic_num,
struct device_node *np)
{
struct irq_domain *id;
struct mp_ioapic_gsi *gsi_cfg;
int ret;
int num;
gsi_cfg = mp_ioapic_gsi_routing(ioapic_num);
num = gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
id = irq_domain_add_linear(np, num, &ioapic_irq_domain_ops,
(void *)ioapic_num);
BUG_ON(!id);
if (gsi_cfg->gsi_base == 0) {
/*
* The first NR_IRQS_LEGACY irq descs are allocated in
* early_irq_init() and need just a mapping. The
* remaining irqs need both. All of them are preallocated
* and assigned so we can keep the 1:1 mapping which the ioapic
* is having.
*/
irqdomain: Refactor irq_domain_associate_many() Originally, irq_domain_associate_many() was designed to unwind the mapped irqs on a failure of any individual association. However, that proved to be a problem with certain IRQ controllers. Some of them only support a subset of irqs, and will fail when attempting to map a reserved IRQ. In those cases we want to map as many IRQs as possible, so instead it is better for irq_domain_associate_many() to make a best-effort attempt to map irqs, but not fail if any or all of them don't succeed. If a caller really cares about how many irqs got associated, then it should instead go back and check that all of the irqs is cares about were mapped. The original design open-coded the individual association code into the body of irq_domain_associate_many(), but with no longer needing to unwind associations, the code becomes simpler to split out irq_domain_associate() to contain the bulk of the logic, and irq_domain_associate_many() to be a simple loop wrapper. This patch also adds a new error check to the associate path to make sure it isn't called for an irq larger than the controller can handle, and adds locking so that the irq_domain_mutex is held while setting up a new association. v3: Fixup missing change to irq_domain_add_tree() v2: Fixup x86 warning. irq_domain_associate_many() no longer returns an error code, but reports errors to the printk log directly. In the majority of cases we don't actually want to fail if there is a problem, but rather log it and still try to boot the system. Signed-off-by: Grant Likely <grant.likely@linaro.org> irqdomain: Fix flubbed irq_domain_associate_many refactoring commit d39046ec72, "irqdomain: Refactor irq_domain_associate_many()" was missing the following hunk which causes a boot failure on anything using irq_domain_add_tree() to allocate an irq domain. Signed-off-by: Grant Likely <grant.likely@linaro.org> Cc: Michael Neuling <mikey@neuling.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>, Cc: Thomas Gleixner <tglx@linutronix.de>, Cc: Stephen Rothwell <sfr@canb.auug.org.au>
2013-06-09 18:06:02 -06:00
irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY);
if (num > NR_IRQS_LEGACY) {
ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY,
NR_IRQS_LEGACY, num - NR_IRQS_LEGACY);
if (ret)
pr_err("Error creating mapping for the "
"remaining IRQs: %d\n", ret);
}
irq_set_default_host(id);
} else {
ret = irq_create_strict_mappings(id, gsi_cfg->gsi_base, 0, num);
if (ret)
pr_err("Error creating IRQ mapping: %d\n", ret);
}
}
static void __init ioapic_add_ofnode(struct device_node *np)
{
struct resource r;
int i, ret;
ret = of_address_to_resource(np, 0, &r);
if (ret) {
printk(KERN_ERR "Failed to obtain address for %s\n",
np->full_name);
return;
}
for (i = 0; i < nr_ioapics; i++) {
if (r.start == mpc_ioapic_addr(i)) {
dt_add_ioapic_domain(i, np);
return;
}
}
printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name);
}
void __init x86_add_irq_domains(void)
{
struct device_node *dp;
if (!of_have_populated_dt())
return;
for_each_node_with_property(dp, "interrupt-controller") {
if (of_device_is_compatible(dp, "intel,ce4100-ioapic"))
ioapic_add_ofnode(dp);
}
}
#else
void __init x86_add_irq_domains(void) { }
#endif