1
0
Fork 0

ia64 for v5.4 - big change here is removal of support for SGI Altix

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJdf64MAAoJEKurIx+X31iBB20P/07o93sBT92SiA2/ety9sLqV
 BGJmEdw7gyb9WVbUip6s71FIEKZw4foCGkqDiX+lr5Fw2A9tiK7LmFgTLi4LLwg+
 syhYZ1y5/mwBI4FLlJudKjQdFZjr/n7DNlz4H67woE2kK+FyRsOKEaFUhuR8+0rC
 mKJBKtIGnoIOPG06PT1k5qfdpzlreCFoWdIhjO55LfDgZnnDiMaX5h0vcBQ9xgCp
 xGV0n/f7+qn4pzB4hGvNV209Sdgv2V4t77bHNvyXlJrM5Hqzafo5MzFgEJv+fRqJ
 2RnkWVhwctfbid/2ggf2aAsYnMK3GigEaOCsYW2oWJESVUQhxIi3ndF/Jt9fraZv
 ZouD7G/s64P5lUQuCT9JnKGzJrSgxvkd37049AZ4pFVc2MzLC6o6dyyP8pu5ARe8
 T0shFik3+gsml2US/vSUzxvrg1saRQjl9E/AJ0RTZ8oyP4FNnFmkJf38qj3a0L0k
 ILFYscM5q7WPggoDA/m6F96tLGhdK/sKjDzrADjEh2dIvn4woqoEJSDn+rXuP+Gm
 UOj1v8mILZCqvOAmc9IkGCkPUlbrmNV/1FYh5+GWudtillEaD82vjSqm+jnVbfXD
 REvHlR/kxCSj1gg/+nk+NFdZCkW3xETOcTZohhDkR7du2mHjTwBMZ2YRPrqoX4c8
 VZA57Mrqm5Uk5601qYRl
 =L5e+
 -----END PGP SIGNATURE-----

Merge tag 'please-pull-ia64_for_5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 updates from Tony Luck:
 "The big change here is removal of support for SGI Altix"

* tag 'please-pull-ia64_for_5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: (33 commits)
  genirq: remove the is_affinity_mask_valid hook
  ia64: remove CONFIG_SWIOTLB ifdefs
  ia64: remove support for machvecs
  ia64: move the screen_info setup to common code
  ia64: move the ROOT_DEV setup to common code
  ia64: rework iommu probing
  ia64: remove the unused sn_coherency_id symbol
  ia64: remove the SGI UV simulator support
  ia64: remove the zx1 swiotlb machvec
  ia64: remove CONFIG_ACPI ifdefs
  ia64: remove CONFIG_PCI ifdefs
  ia64: remove the hpsim platform
  ia64: remove now unused machvec indirections
  ia64: remove support for the SGI SN2 platform
  drivers: remove the SGI SN2 IOC4 base support
  drivers: remove the SGI SN2 IOC3 base support
  qla2xxx: remove SGI SN2 support
  qla1280: remove SGI SN2 support
  misc/sgi-xp: remove SGI SN2 support
  char/mspec: remove SGI SN2 support
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-09-16 15:32:01 -07:00
commit 76f0f227cf
231 changed files with 349 additions and 40676 deletions

View File

@ -2378,7 +2378,7 @@
machvec= [IA-64] Force the use of a particular machine-vector
(machvec) in a generic kernel.
Example: machvec=hpzx1_swiotlb
Example: machvec=hpzx1
machtype= [Loongson] Share the same kernel image file between different
yeeloong laptop.

View File

@ -1,49 +0,0 @@
====================================
SGI IOC4 PCI (multi function) device
====================================
The SGI IOC4 PCI device is a bit of a strange beast, so some notes on
it are in order.
First, even though the IOC4 performs multiple functions, such as an
IDE controller, a serial controller, a PS/2 keyboard/mouse controller,
and an external interrupt mechanism, it's not implemented as a
multifunction device. The consequence of this from a software
standpoint is that all these functions share a single IRQ, and
they can't all register to own the same PCI device ID. To make
matters a bit worse, some of the register blocks (and even registers
themselves) present in IOC4 are mixed-purpose between these several
functions, meaning that there's no clear "owning" device driver.
The solution is to organize the IOC4 driver into several independent
drivers, "ioc4", "sgiioc4", and "ioc4_serial". Note that there is no
PS/2 controller driver as this functionality has never been wired up
on a shipping IO card.
ioc4
====
This is the core (or shim) driver for IOC4. It is responsible for
initializing the basic functionality of the chip, and allocating
the PCI resources that are shared between the IOC4 functions.
This driver also provides registration functions that the other
IOC4 drivers can call to make their presence known. Each driver
needs to provide a probe and remove function, which are invoked
by the core driver at appropriate times. The interface of these
IOC4 function probe and remove operations isn't precisely the same
as PCI device probe and remove operations, but is logically the
same operation.
sgiioc4
=======
This is the IDE driver for IOC4. Its name isn't very descriptive
simply for historical reasons (it used to be the only IOC4 driver
component). There's not much to say about it other than it hooks
up to the ioc4 driver via the appropriate registration, probe, and
remove functions.
ioc4_serial
===========
This is the serial driver for IOC4. There's not much to say about it
other than it hooks up to the ioc4 driver via the appropriate registration,
probe, and remove functions.

View File

@ -8413,12 +8413,6 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/sgi/ioc3-eth.c
IOC3 SERIAL DRIVER
M: Pat Gefre <pfg@sgi.com>
L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/ioc3_serial.c
IOMAP FILESYSTEM LIBRARY
M: Christoph Hellwig <hch@infradead.org>
M: Darrick J. Wong <darrick.wong@oracle.com>

View File

@ -10,12 +10,14 @@ config IA64
bool
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI if (!IA64_HP_SIM)
select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM)
select ACPI
select ACPI_NUMA if NUMA
select ARCH_SUPPORTS_ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select FORCE_PCI if (!IA64_HP_SIM)
select FORCE_PCI
select PCI_DOMAINS if PCI
select PCI_MSI
select PCI_SYSCALL if PCI
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
@ -30,8 +32,8 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
@ -45,6 +47,7 @@ config IA64
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL
select SWIOTLB
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
@ -52,6 +55,7 @@ config IA64
select HAVE_ARCH_AUDITSYSCALL
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@ -66,7 +70,6 @@ config 64BIT
config ZONE_DMA32
def_bool y
depends on !IA64_SGI_SN2
config QUICKLIST
bool
@ -120,87 +123,6 @@ config AUDIT_ARCH
bool
default y
choice
prompt "System type"
default IA64_GENERIC
config IA64_GENERIC
bool "generic"
select NUMA
select ACPI_NUMA
select SWIOTLB
select PCI_MSI
help
This selects the system type of your hardware. A "generic" kernel
will run on any supported IA-64 system. However, if you configure
a kernel for your specific system, it will be faster and smaller.
generic For any supported IA-64 system
DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
DIG+Intel+IOMMU For DIG systems with Intel IOMMU
HP-zx1/sx1000 For HP systems
HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
SGI-SN2 For SGI Altix systems
SGI-UV For SGI UV systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
If you don't know what to do, choose "generic".
config IA64_DIG
bool "DIG-compliant"
select SWIOTLB
config IA64_DIG_VTD
bool "DIG+Intel+IOMMU"
select INTEL_IOMMU
select PCI_MSI
config IA64_HP_ZX1
bool "HP-zx1/sx1000"
help
Build a kernel that runs on HP zx1 and sx1000 systems. This adds
support for the HP I/O MMU.
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
select SWIOTLB
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
have broken PCI devices which cannot DMA to full 32 bits. Apart
from support for the HP I/O MMU, this includes support for the software
I/O TLB, which allows supporting the broken devices at the expense of
wasting some kernel memory (about 2MB by default).
config IA64_SGI_SN2
bool "SGI-SN2"
select NUMA
select ACPI_NUMA
help
Selecting this option will optimize the kernel for use on sn2 based
systems, but the resulting kernel binary will not run on other
types of ia64 systems. If you have an SGI Altix system, it's safe
to select this option. If in doubt, select ia64 generic support
instead.
config IA64_SGI_UV
bool "SGI-UV"
select NUMA
select ACPI_NUMA
select SWIOTLB
help
Selecting this option will optimize the kernel for use on UV based
systems, but the resulting kernel binary will not run on other
types of ia64 systems. If you have an SGI UV system, it's safe
to select this option. If in doubt, select ia64 generic support
instead.
config IA64_HP_SIM
bool "Ski-simulator"
select SWIOTLB
depends on !PM
endchoice
choice
prompt "Processor type"
default ITANIUM
@ -252,14 +174,7 @@ config IA64_PAGE_SIZE_64KB
endchoice
if IA64_HP_SIM
config HZ
default 32
endif
if !IA64_HP_SIM
source "kernel/Kconfig.hz"
endif
config IA64_BRL_EMU
bool
@ -272,17 +187,26 @@ config IA64_L1_CACHE_SHIFT
default "7" if MCKINLEY
default "6" if ITANIUM
config IA64_SGI_UV
bool "SGI-UV support"
help
Selecting this option will add specific support for running on SGI
UV based systems. If you have an SGI UV system or are building a
distro kernel, select this option.
config IA64_HP_SBA_IOMMU
bool "HP SBA IOMMU support"
default y
help
Say Y here to add support for the SBA IOMMU found on HP zx1 and
sx1000 systems. If you're unsure, answer Y.
config IA64_CYCLONE
bool "Cyclone (EXA) Time Source support"
help
Say Y here to enable support for IBM EXA Cyclone time source.
If you're unsure, answer N.
config IOSAPIC
bool
depends on !IA64_HP_SIM
default y
config FORCE_MAX_ZONEORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE
@ -381,14 +305,12 @@ config ARCH_SPARSEMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_DISCONTIGMEM_DEFAULT
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB)
def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE
config NUMA
bool "NUMA support"
depends on !IA64_HP_SIM && !FLATMEM
default y if IA64_SGI_SN2
select ACPI_NUMA if ACPI
depends on !FLATMEM
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
@ -409,7 +331,7 @@ config NODES_SHIFT
config VIRTUAL_MEM_MAP
bool "Virtual mem map"
depends on !SPARSEMEM
default y if !IA64_HP_SIM
default y
help
Say Y to compile the kernel with support for a virtual mem map.
This code also only takes effect if a memory hole of greater than
@ -472,9 +394,6 @@ config IA64_MC_ERR_INJECT
If you're unsure, do not select this option.
config SGI_SN
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
config IA64_ESI
bool "ESI (Extensible SAL Interface) support"
help
@ -493,11 +412,9 @@ config IA64_HP_AML_NFW
the "force" module parameter, e.g., with the "aml_nfw.force"
kernel command line option.
source "drivers/sn/Kconfig"
config KEXEC
bool "kexec system call"
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
depends on !SMP || HOTPLUG_CPU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
@ -515,7 +432,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps"
depends on IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
depends on IA64_MCA_RECOVERY && (!SMP || HOTPLUG_CPU)
help
Generate crash dump after being started by kexec.
@ -537,8 +454,6 @@ endif
endmenu
source "arch/ia64/hp/sim/Kconfig"
config MSPEC
tristate "Memory special operations driver"
depends on IA64

View File

@ -14,7 +14,7 @@ config IA64_GRANULE_16MB
config IA64_GRANULE_64MB
bool "64MB"
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_SGI_SN2)
depends on BROKEN
endchoice

View File

@ -39,29 +39,23 @@ $(error Sorry, you need a newer version of the assember, one that is built from
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
quiet_cmd_gzip = GZIP $@
cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
quiet_cmd_objcopy = OBJCOPY $@
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o
libs-y += arch/ia64/lib/
core-y += arch/ia64/kernel/ arch/ia64/mm/
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/
drivers-y += arch/ia64/pci/ arch/ia64/hp/common/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot
PHONY += boot compressed check
PHONY += compressed check
all: compressed unwcheck
@ -69,22 +63,21 @@ compressed: vmlinux.gz
vmlinuz: vmlinux.gz
vmlinux.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
vmlinux.gz: vmlinux.bin FORCE
$(call if_changed,gzip)
vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
unwcheck: vmlinux
-$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $<
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
archheaders:
$(Q)$(MAKE) $(build)=arch/ia64/kernel/syscalls all
CLEAN_FILES += vmlinux.gz bootloader
boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
CLEAN_FILES += vmlinux.gz
install: vmlinux.gz
sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
@ -92,7 +85,6 @@ install: vmlinux.gz
define archhelp
echo '* compressed - Build compressed kernel image'
echo ' install - Install compressed kernel image'
echo ' boot - Build vmlinux and bootloader for Ski simulator'
echo '* unwcheck - Check vmlinux for invalid unwind info'
endef

View File

@ -7,7 +7,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_IA64_DIG=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y

View File

@ -44,14 +44,12 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_SGI_IOC4=y
CONFIG_SGI_XP=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_CMD64X=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_BLK_DEV_SGIIOC4=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
@ -90,16 +88,11 @@ CONFIG_IGB=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SGI_SNSC=y
CONFIG_SGI_TIOCX=y
CONFIG_SGI_MBCS=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_SERIAL_SGI_IOC4=y
# CONFIG_HW_RANDOM is not set
CONFIG_EFI_RTC=y
CONFIG_RAW_DRIVER=m
@ -107,7 +100,6 @@ CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_AGP_HP_ZX1=m
CONFIG_AGP_SGI_TIOCA=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m

View File

@ -19,7 +19,6 @@ CONFIG_SPARSEMEM_MANUAL=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_SGI_IOC3=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_BUTTON=m
@ -37,14 +36,12 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_SGI_IOC4=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_CMD64X=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_BLK_DEV_SGIIOC4=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
@ -79,17 +76,11 @@ CONFIG_E1000=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SGI_SNSC=y
CONFIG_SGI_TIOCX=y
CONFIG_SGI_MBCS=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_SERIAL_SGI_IOC4=y
CONFIG_SERIAL_SGI_IOC3=y
# CONFIG_HW_RANDOM is not set
CONFIG_EFI_RTC=y
CONFIG_RAW_DRIVER=m
@ -97,7 +88,6 @@ CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_AGP_HP_ZX1=m
CONFIG_AGP_SGI_TIOCA=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m

View File

@ -1,52 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IA64_HP_SIM=y
CONFIG_MCKINLEY=y
CONFIG_IA64_PAGE_SIZE_64KB=y
CONFIG_SMP=y
CONFIG_NR_CPUS=64
CONFIG_PREEMPT=y
CONFIG_IA64_PALINFO=m
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IPV6 is not set
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_EFI_RTC=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_HP_SIMETH=y
CONFIG_HP_SIMSERIAL=y
CONFIG_HP_SIMSERIAL_CONSOLE=y
CONFIG_HP_SIMSCSI=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_PROC_KCORE=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y

View File

@ -12,7 +12,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_IA64_DIG=y
CONFIG_MCKINLEY=y
CONFIG_IA64_PAGE_SIZE_64KB=y
CONFIG_IA64_CYCLONE=y

View File

@ -4,7 +4,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IA64_HP_ZX1=y
CONFIG_MCKINLEY=y
CONFIG_SMP=y
CONFIG_NR_CPUS=16

View File

@ -1,15 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# ia64/platform/dig/Makefile
#
# Copyright (C) 1999 Silicon Graphics, Inc.
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
obj-y := setup.o
ifeq ($(CONFIG_INTEL_IOMMU), y)
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
else
obj-$(CONFIG_IA64_GENERIC) += machvec.o
endif

View File

@ -1,3 +0,0 @@
#define MACHVEC_PLATFORM_NAME dig
#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig.h>
#include <asm/machvec_init.h>

View File

@ -1,3 +0,0 @@
#define MACHVEC_PLATFORM_NAME dig_vtd
#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig_vtd.h>
#include <asm/machvec_init.h>

View File

@ -1,71 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Platform dependent support for DIG64 platforms.
*
* Copyright (C) 1999 Intel Corp.
* Copyright (C) 1999, 2001 Hewlett-Packard Co
* Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/string.h>
#include <linux/screen_info.h>
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/root_dev.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/setup.h>
void __init
dig_setup (char **cmdline_p)
{
unsigned int orig_x, orig_y, num_cols, num_rows, font_height;
/*
* Default to /dev/sda2. This assumes that the EFI partition
* is physical disk 1 partition 1 and the Linux root disk is
* physical disk 1 partition 2.
*/
ROOT_DEV = Root_SDA2; /* default to second partition on first drive */
#ifdef CONFIG_SMP
init_smp_config();
#endif
memset(&screen_info, 0, sizeof(screen_info));
if (!ia64_boot_param->console_info.num_rows
|| !ia64_boot_param->console_info.num_cols)
{
printk(KERN_WARNING "dig_setup: warning: invalid screen-info, guessing 80x25\n");
orig_x = 0;
orig_y = 0;
num_cols = 80;
num_rows = 25;
font_height = 16;
} else {
orig_x = ia64_boot_param->console_info.orig_x;
orig_y = ia64_boot_param->console_info.orig_y;
num_cols = ia64_boot_param->console_info.num_cols;
num_rows = ia64_boot_param->console_info.num_rows;
font_height = 400 / num_rows;
}
screen_info.orig_x = orig_x;
screen_info.orig_y = orig_y;
screen_info.orig_video_cols = num_cols;
screen_info.orig_video_lines = num_rows;
screen_info.orig_video_points = font_height;
screen_info.orig_video_mode = 3; /* XXX fake */
screen_info.orig_video_isVGA = 1; /* XXX fake */
screen_info.orig_video_ega_bx = 3; /* XXX fake */
}

View File

@ -6,7 +6,5 @@
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
obj-y := sba_iommu.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += hwsw_iommu.o
obj-$(CONFIG_IA64_GENERIC) += hwsw_iommu.o
obj-$(CONFIG_IA64_HP_SBA_IOMMU) += sba_iommu.o
obj-$(CONFIG_IA64_HP_AML_NFW) += aml_nfw.o

View File

@ -1,60 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
* Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This is a pseudo I/O MMU which dispatches to the hardware I/O MMU
* whenever possible. We assume that the hardware I/O MMU requires
* full 32-bit addressability, as is the case, e.g., for HP zx1-based
* systems (there, the I/O MMU window is mapped at 3-4GB). If a
* device doesn't provide full 32-bit addressability, we fall back on
* the sw I/O TLB. This is good enough to let us support broken
* hardware such as soundcards which have a DMA engine that can
* address only 28 bits.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
#include <asm/machvec.h>
extern const struct dma_map_ops sba_dma_ops;
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
/*
* Note: we need to make the determination of whether or not to use
* the sw I/O TLB based purely on the device structure. Anything else
* would be unreliable or would be too intrusive.
*/
static inline int use_swiotlb(struct device *dev)
{
return dev && dev->dma_mask &&
!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}
const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
if (use_swiotlb(dev))
return NULL;
return &sba_dma_ops;
}
EXPORT_SYMBOL(hwsw_dma_get_ops);
void __init
hwsw_init (void)
{
/* default to a smallish 2MB sw I/O TLB */
if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) {
#ifdef CONFIG_IA64_GENERIC
/* Better to have normal DMA than panic */
printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
" reverting to hpzx1 platform vector\n", __func__);
machvec_init("hpzx1");
#else
panic("Unable to initialize software I/O TLB services");
#endif
}
}

View File

@ -35,6 +35,7 @@
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include <linux/swiotlb.h>
#include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h>
@ -43,8 +44,6 @@
#include <asm/acpi-ext.h>
extern int swiotlb_late_init_with_default_size (size_t size);
#define PFX "IOC: "
/*
@ -251,12 +250,8 @@ static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
static u64 prefetch_spill_page;
#endif
#ifdef CONFIG_PCI
# define GET_IOC(dev) ((dev_is_pci(dev)) \
#define GET_IOC(dev) ((dev_is_pci(dev)) \
? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
#else
# define GET_IOC(dev) NULL
#endif
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
@ -1741,9 +1736,7 @@ ioc_sac_init(struct ioc *ioc)
controller->iommu = ioc;
sac->sysdata = controller;
sac->dma_mask = 0xFFFFFFFFUL;
#ifdef CONFIG_PCI
sac->dev.bus = &pci_bus_type;
#endif
ioc->sac_only_dev = sac;
}
@ -2062,27 +2055,33 @@ static int __init acpi_sba_ioc_init_acpi(void)
/* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi);
static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
static const struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent,
.free = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.dma_supported = sba_dma_supported,
};
static int __init
sba_init(void)
{
if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
return 0;
#if defined(CONFIG_IA64_GENERIC)
/* If we are booting a kdump kernel, the sba_iommu will
* cause devices that were not shutdown properly to MCA
* as soon as they are turned back on. Our only option for
* a successful kdump kernel boot is to use the swiotlb.
/*
* If we are booting a kdump kernel, the sba_iommu will cause devices
* that were not shutdown properly to MCA as soon as they are turned
* back on. Our only option for a successful kdump kernel boot is to
* use swiotlb.
*/
if (is_kdump_kernel()) {
dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
machvec_init("dig");
if (is_kdump_kernel())
return 0;
}
#endif
/*
* ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
@ -2091,43 +2090,18 @@ sba_init(void)
while (ioc_found)
acpi_sba_ioc_add(ioc_found);
if (!ioc_list) {
#ifdef CONFIG_IA64_GENERIC
/*
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
machvec_init("dig");
#else
panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
#endif
if (!ioc_list)
return 0;
}
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
/*
* hpzx1_swiotlb needs to have a fairly small swiotlb bounce
* buffer setup to support devices with smaller DMA masks than
* sba_iommu can handle.
*/
if (ia64_platform_is("hpzx1_swiotlb")) {
extern void hwsw_init(void);
hwsw_init();
}
#endif
#ifdef CONFIG_PCI
{
struct pci_bus *b = NULL;
while ((b = pci_find_next_bus(b)) != NULL)
sba_connect_bus(b);
}
#endif
/* no need for swiotlb with the iommu */
swiotlb_exit();
dma_ops = &sba_dma_ops;
#ifdef CONFIG_PROC_FS
ioc_proc_init();
@ -2144,12 +2118,6 @@ nosbagart(char *str)
return 1;
}
static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
__setup("nosbagart", nosbagart);
static int __init
@ -2174,18 +2142,3 @@ sba_page_override(char *str)
}
__setup("sbapagesize=",sba_page_override);
const struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent,
.free = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.dma_supported = sba_dma_supported,
};
void sba_dma_init(void)
{
dma_ops = &sba_dma_ops;
}

View File

@ -1,23 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
menu "HP Simulator drivers"
depends on IA64_HP_SIM || IA64_GENERIC
config HP_SIMETH
bool "Simulated Ethernet "
depends on NET
config HP_SIMSERIAL
bool "Simulated serial driver support"
depends on TTY
config HP_SIMSERIAL_CONSOLE
bool "Console for HP simulator"
depends on HP_SIMSERIAL
config HP_SIMSCSI
bool "Simulated SCSI disk"
depends on SCSI=y
endmenu

View File

@ -1,17 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# ia64/platform/hp/sim/Makefile
#
# Copyright (C) 2002 Hewlett-Packard Co.
# David Mosberger-Tang <davidm@hpl.hp.com>
# Copyright (C) 1999 Silicon Graphics, Inc.
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
obj-$(CONFIG_HP_SIMSCSI) += simscsi.o

View File

@ -1,37 +0,0 @@
#
# ia64/boot/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1998, 2003 by David Mosberger-Tang <davidm@hpl.hp.com>
#
targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets := vmlinux.bin vmlinux.gz $(targets-y)
quiet_cmd_cptotop = LN $@
cmd_cptotop = ln -f $< $@
vmlinux.gz: $(obj)/vmlinux.gz $(addprefix $(obj)/,$(targets-y))
$(call cmd,cptotop)
@echo ' Kernel: $@ is ready'
boot: bootloader
bootloader: $(obj)/bootloader
$(call cmd,cptotop)
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)

View File

@ -1,165 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
#include <asm/pal.h>
.bss
.align 16
stack_mem:
.skip 16834
.text
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
GLOBAL_ENTRY(printk)
break 0
END(printk)
GLOBAL_ENTRY(_start)
.prologue
.save rp, r0
.body
movl gp = __gp
movl sp = stack_mem+16384-16
bsw.1
br.call.sptk.many rp=start_bootloader
0: nop 0 /* dummy nop to make unwinding work */
END(_start)
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
*/
GLOBAL_ENTRY(debug_break)
br.ret.sptk.many b0
END(debug_break)
GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0
mov r15=in4
break 0x80001
br.ret.sptk.many b0
END(ssc)
GLOBAL_ENTRY(jmp_to_kernel)
.regstk 2,0,0,0
mov r28=in0
mov b7=in1
br.sptk.few b7
END(jmp_to_kernel)
/*
* r28 contains the index of the PAL function
* r29--31 the args
* Return values in ret0--3 (r8--11)
*/
GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=PAL_PTCE_INFO,r28
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
movl r9=0x100000000 /* tc.base */
movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp
1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;;
1: cmp.eq p6,p7=PAL_RSE_INFO,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */
mov r10=0 /* hints */
mov r11=0
br.cond.sptk.few rp
1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f
mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */
;;
mov ar.lc=r8
movl r8=0xe000000000000000
;;
.loop: fc r8
add r8=32,r8
br.cloop.sptk.few .loop
sync.i
;;
srlz.i
;;
mov ar.lc=r9
mov r8=r0
;;
1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */
mov r10=0 /* reserved */
mov r11=0 /* reserved */
mov r16=0xffff /* implemented PMC */
mov r17=0x3ffff /* implemented PMD */
add r18=8,r29 /* second index */
;;
st8 [r29]=r16,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store implemented PMD */
st8 [r18]=r0,16 /* clear remaining bits */
mov r16=0xf0 /* cycles count capable PMC */
;;
st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */
mov r17=0xf0 /* retired bundles capable PMC */
;;
st8 [r29]=r16,16 /* store cycles capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store retired bundle capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */
;;
1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */
/* max_itr_entry=64, max_dtr_entry=64 */
/* hash_tag_id=2, max_pkr=15 */
/* key_size=24, phys_add_size=50, vw=1 */
movl r10=0x183C /* rid_size=24, impl_va_msb=60 */
;;
1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=0x80|0x01 /* NatPage|WB */
;;
1: br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
END(pal_emulator_static)

View File

@ -1,175 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/ia64/hp/sim/boot/bootloader.c
*
* Loads an ELF kernel.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 01/07/99 S.Eranian modified to pass command line arguments to kernel
*/
struct task_struct; /* forward declaration for elf.h */
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/elf.h>
#include <asm/intrinsics.h>
#include <asm/pal.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
#include "ssc.h"
struct disk_req {
unsigned long addr;
unsigned len;
};
struct disk_stat {
int fd;
unsigned count;
};
extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
extern void debug_break (void);
static void
cons_write (const char *buf)
{
unsigned long ch;
while ((ch = *buf++) != '\0') {
ssc(ch, 0, 0, 0, SSC_PUTCHAR);
if (ch == '\n')
ssc('\r', 0, 0, 0, SSC_PUTCHAR);
}
}
#define MAX_ARGS 32
void
start_bootloader (void)
{
static char mem[4096];
static char buffer[1024];
unsigned long off;
int fd, i;
struct disk_req req;
struct disk_stat stat;
struct elfhdr *elf;
struct elf_phdr *elf_phdr; /* program header */
unsigned long e_entry, e_phoff, e_phnum;
register struct ia64_boot_param *bp;
char *kpath, *args;
long arglen = 0;
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
/*
* S.Eranian: extract the commandline argument from the simulator
*
* The expected format is as follows:
*
* kernelname args...
*
* Both are optional but you can't have the second one without the first.
*/
arglen = ssc((long) buffer, 0, 0, 0, SSC_GET_ARGS);
kpath = "vmlinux";
args = buffer;
if (arglen > 0) {
kpath = buffer;
while (*args != ' ' && *args != '\0')
++args, --arglen;
if (*args == ' ')
*args++ = '\0', --arglen;
}
if (arglen <= 0) {
args = "";
arglen = 1;
}
fd = ssc((long) kpath, 1, 0, 0, SSC_OPEN);
if (fd < 0) {
cons_write(kpath);
cons_write(": file not found, reboot now\n");
for(;;);
}
stat.fd = fd;
off = 0;
req.len = sizeof(mem);
req.addr = (long) mem;
ssc(fd, 1, (long) &req, off, SSC_READ);
ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION);
elf = (struct elfhdr *) mem;
if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) != 0) {
cons_write("not an ELF file\n");
return;
}
if (elf->e_type != ET_EXEC) {
cons_write("not an ELF executable\n");
return;
}
if (!elf_check_arch(elf)) {
cons_write("kernel not for this processor\n");
return;
}
e_entry = elf->e_entry;
e_phnum = elf->e_phnum;
e_phoff = elf->e_phoff;
cons_write("loading ");
cons_write(kpath);
cons_write("...\n");
for (i = 0; i < e_phnum; ++i) {
req.len = sizeof(*elf_phdr);
req.addr = (long) mem;
ssc(fd, 1, (long) &req, e_phoff, SSC_READ);
ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION);
if (stat.count != sizeof(*elf_phdr)) {
cons_write("failed to read phdr\n");
return;
}
e_phoff += sizeof(*elf_phdr);
elf_phdr = (struct elf_phdr *) mem;
if (elf_phdr->p_type != PT_LOAD)
continue;
req.len = elf_phdr->p_filesz;
req.addr = __pa(elf_phdr->p_paddr);
ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ);
ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION);
memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0,
elf_phdr->p_memsz - elf_phdr->p_filesz);
}
ssc(fd, 0, 0, 0, SSC_CLOSE);
cons_write("starting kernel...\n");
/* fake an I/O base address: */
ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL);
bp = sys_fw_init(args, arglen);
ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS);
debug_break();
jmp_to_kernel((unsigned long) bp, e_entry);
cons_write("kernel returned!\n");
ssc(-1, 0, 0, 0, SSC_EXIT);
}

View File

@ -1,67 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
ENTRY(_start)
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = 0x100000;
_text = .;
.text : { *(__ivt_section) *(.text) }
_etext = .;
/* Global data */
_data = .;
.rodata : { *(.rodata) *(.rodata.*) }
.data : { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
__gp = ALIGN (8) + 0x200000;
.got : { *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : { *(.sdata) }
_edata = .;
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss : { *(.bss) *(COMMON) }
. = ALIGN(64 / 8);
__bss_stop = .;
_end = . ;
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* These must appear regardless of . */
}

View File

@ -1,374 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PAL & SAL emulation.
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifdef CONFIG_PCI
# include <linux/pci.h>
#endif
#include <linux/efi.h>
#include <asm/io.h>
#include <asm/pal.h>
#include <asm/sal.h>
#include <asm/setup.h>
#include "ssc.h"
#define MB (1024*1024UL)
#define SIMPLE_MEMMAP 1
#if SIMPLE_MEMMAP
# define NUM_MEM_DESCS 4
#else
# define NUM_MEM_DESCS 16
#endif
static char fw_mem[( sizeof(struct ia64_boot_param)
+ sizeof(efi_system_table_t)
+ sizeof(efi_runtime_services_t)
+ 1*sizeof(efi_config_table_t)
+ sizeof(struct ia64_sal_systab)
+ sizeof(struct ia64_sal_desc_entry_point)
+ NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
+ 1024)] __attribute__ ((aligned (8)));
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
/* Compute the `struct tm' representation of *T,
offset OFFSET seconds east of UTC,
and store year, yday, mon, mday, wday, hour, min, sec into *TP.
Return nonzero if successful. */
int
offtime (unsigned long t, efi_time_t *tp)
{
const unsigned short int __mon_yday[2][13] =
{
/* Normal years. */
{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
long int days, rem, y;
const unsigned short int *ip;
days = t / SECS_PER_DAY;
rem = t % SECS_PER_DAY;
while (rem < 0) {
rem += SECS_PER_DAY;
--days;
}
while (rem >= SECS_PER_DAY) {
rem -= SECS_PER_DAY;
++days;
}
tp->hour = rem / SECS_PER_HOUR;
rem %= SECS_PER_HOUR;
tp->minute = rem / 60;
tp->second = rem % 60;
/* January 1, 1970 was a Thursday. */
y = 1970;
# define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
# define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
# define __isleap(year) \
((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
while (days < 0 || days >= (__isleap (y) ? 366 : 365)) {
/* Guess a corrected year, assuming 365 days per year. */
long int yg = y + days / 365 - (days % 365 < 0);
/* Adjust DAYS and Y to match the guessed year. */
days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1)
- LEAPS_THRU_END_OF (y - 1));
y = yg;
}
tp->year = y;
ip = __mon_yday[__isleap(y)];
for (y = 11; days < (long int) ip[y]; --y)
continue;
days -= ip[y];
tp->month = y + 1;
tp->day = days + 1;
return 1;
}
extern void pal_emulator_static (void);
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
#define REG_OFFSET(addr) (0x00000000000000FF & (addr))
#define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr))
#define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr))
static efi_status_t
fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
{
#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
struct {
int tv_sec; /* must be 32bits to work */
int tv_usec;
} tv32bits;
ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD);
memset(tm, 0, sizeof(*tm));
offtime(tv32bits.tv_sec, tm);
if (tc)
memset(tc, 0, sizeof(*tc));
#else
# error Not implemented yet...
#endif
return EFI_SUCCESS;
}
static void
efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
{
#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
ssc(status, 0, 0, 0, SSC_EXIT);
#else
# error Not implemented yet...
#endif
}
static efi_status_t
efi_unimplemented (void)
{
return EFI_UNSUPPORTED;
}
static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
long r9 = 0;
long r10 = 0;
long r11 = 0;
long status;
/*
* Don't do a "switch" here since that gives us code that
* isn't self-relocatable.
*/
status = 0;
if (index == SAL_FREQ_BASE) {
if (in1 == SAL_FREQ_BASE_PLATFORM)
r9 = 200000000;
else if (in1 == SAL_FREQ_BASE_INTERVAL_TIMER) {
/*
* Is this supposed to be the cr.itc frequency
* or something platform specific? The SAL
* doc ain't exactly clear on this...
*/
r9 = 700000000;
} else if (in1 == SAL_FREQ_BASE_REALTIME_CLOCK)
r9 = 1;
else
status = -1;
} else if (index == SAL_SET_VECTORS) {
;
} else if (index == SAL_GET_STATE_INFO) {
;
} else if (index == SAL_GET_STATE_INFO_SIZE) {
;
} else if (index == SAL_CLEAR_STATE_INFO) {
;
} else if (index == SAL_MC_RENDEZ) {
;
} else if (index == SAL_MC_SET_PARAMS) {
;
} else if (index == SAL_CACHE_FLUSH) {
;
} else if (index == SAL_CACHE_INIT) {
;
#ifdef CONFIG_PCI
} else if (index == SAL_PCI_CONFIG_READ) {
/*
* in1 contains the PCI configuration address and in2
* the size of the read. The value that is read is
* returned via the general register r9.
*/
outl(BUILD_CMD(in1), 0xCF8);
if (in2 == 1) /* Reading byte */
r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3)));
else if (in2 == 2) /* Reading word */
r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2)));
else /* Reading dword */
r9 = inl(0xCFC);
status = PCIBIOS_SUCCESSFUL;
} else if (index == SAL_PCI_CONFIG_WRITE) {
/*
* in1 contains the PCI configuration address, in2 the
* size of the write, and in3 the actual value to be
* written out.
*/
outl(BUILD_CMD(in1), 0xCF8);
if (in2 == 1) /* Writing byte */
outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3)));
else if (in2 == 2) /* Writing word */
outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2)));
else /* Writing dword */
outl(in3, 0xCFC);
status = PCIBIOS_SUCCESSFUL;
#endif /* CONFIG_PCI */
} else if (index == SAL_UPDATE_PAL) {
;
} else {
status = -1;
}
return ((struct sal_ret_values) {status, r9, r10, r11});
}
struct ia64_boot_param *
sys_fw_init (const char *args, int arglen)
{
efi_system_table_t *efi_systab;
efi_runtime_services_t *efi_runtime;
efi_config_table_t *efi_tables;
struct ia64_sal_systab *sal_systab;
efi_memory_desc_t *efi_memmap, *md;
unsigned long *pal_desc, *sal_desc;
struct ia64_sal_desc_entry_point *sal_ed;
struct ia64_boot_param *bp;
unsigned char checksum = 0;
char *cp, *cmd_line;
int i = 0;
# define MAKE_MD(typ, attr, start, end) \
do { \
md = efi_memmap + i++; \
md->type = typ; \
md->pad = 0; \
md->phys_addr = start; \
md->virt_addr = 0; \
md->num_pages = (end - start) >> 12; \
md->attribute = attr; \
} while (0)
memset(fw_mem, 0, sizeof(fw_mem));
pal_desc = (unsigned long *) &pal_emulator_static;
sal_desc = (unsigned long *) &sal_emulator;
cp = fw_mem;
efi_systab = (void *) cp; cp += sizeof(*efi_systab);
efi_runtime = (void *) cp; cp += sizeof(*efi_runtime);
efi_tables = (void *) cp; cp += sizeof(*efi_tables);
sal_systab = (void *) cp; cp += sizeof(*sal_systab);
sal_ed = (void *) cp; cp += sizeof(*sal_ed);
efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
bp = (void *) cp; cp += sizeof(*bp);
cmd_line = (void *) cp;
if (args) {
if (arglen >= 1024)
arglen = 1023;
memcpy(cmd_line, args, arglen);
} else {
arglen = 0;
}
cmd_line[arglen] = '\0';
memset(efi_systab, 0, sizeof(*efi_systab));
efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
efi_systab->hdr.revision = ((1 << 16) | 00);
efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0");
efi_systab->fw_revision = 1;
efi_systab->runtime = (void *) __pa(efi_runtime);
efi_systab->nr_tables = 1;
efi_systab->tables = __pa(efi_tables);
efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
efi_runtime->get_time = (void *)__pa(&fw_efi_get_time);
efi_runtime->set_time = (void *)__pa(&efi_unimplemented);
efi_runtime->get_wakeup_time = (void *)__pa(&efi_unimplemented);
efi_runtime->set_wakeup_time = (void *)__pa(&efi_unimplemented);
efi_runtime->set_virtual_address_map = (void *)__pa(&efi_unimplemented);
efi_runtime->get_variable = (void *)__pa(&efi_unimplemented);
efi_runtime->get_next_variable = (void *)__pa(&efi_unimplemented);
efi_runtime->set_variable = (void *)__pa(&efi_unimplemented);
efi_runtime->get_next_high_mono_count = (void *)__pa(&efi_unimplemented);
efi_runtime->reset_system = (void *)__pa(&efi_reset_system);
efi_tables->guid = SAL_SYSTEM_TABLE_GUID;
efi_tables->table = __pa(sal_systab);
/* fill in the SAL system table: */
memcpy(sal_systab->signature, "SST_", 4);
sal_systab->size = sizeof(*sal_systab);
sal_systab->sal_rev_minor = 1;
sal_systab->sal_rev_major = 0;
sal_systab->entry_count = 1;
#ifdef CONFIG_IA64_GENERIC
strcpy(sal_systab->oem_id, "Generic");
strcpy(sal_systab->product_id, "IA-64 system");
#endif
#ifdef CONFIG_IA64_HP_SIM
strcpy(sal_systab->oem_id, "Hewlett-Packard");
strcpy(sal_systab->product_id, "HP-simulator");
#endif
/* fill in an entry point: */
sal_ed->type = SAL_DESC_ENTRY_POINT;
sal_ed->pal_proc = __pa(pal_desc[0]);
sal_ed->sal_proc = __pa(sal_desc[0]);
sal_ed->gp = __pa(sal_desc[1]);
for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp)
checksum += *cp;
sal_systab->checksum = -checksum;
#if SIMPLE_MEMMAP
/* simulate free memory at physical address zero */
MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB);
MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB);
MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB);
MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB);
#else
MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000);
MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000);
MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000);
MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000);
MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000);
MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000);
MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000);
MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000);
MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000);
MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000);
MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000);
MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000);
MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000);
MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000);
MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000);
MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000);
#endif
bp->efi_systab = __pa(&fw_mem);
bp->efi_memmap = __pa(efi_memmap);
bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t);
bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
bp->efi_memdesc_version = 1;
bp->command_line = __pa(cmd_line);
bp->console_info.num_cols = 80;
bp->console_info.num_rows = 25;
bp->console_info.orig_x = 0;
bp->console_info.orig_y = 24;
bp->fpswa = 0;
return bp;
}

View File

@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*/
#ifndef ssc_h
#define ssc_h
/* Simulator system calls: */
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_GET_ARGS 75
/*
* Simulator system call.
*/
extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr);
#endif /* ssc_h */

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/asmmacro.h>
/*
* Simulator system call.
*/
GLOBAL_ENTRY(ia64_ssc)
mov r15=r36
break 0x80001
br.ret.sptk.many rp
END(ia64_ssc)

View File

@ -1,77 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Platform dependent support for HP simulator.
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/tty.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/pal.h>
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
#include <asm/hpsim.h>
#include "hpsim_ssc.h"
static int simcons_init (struct console *, char *);
static void simcons_write (struct console *, const char *, unsigned);
static struct tty_driver *simcons_console_device (struct console *, int *);
static struct console hpsim_cons = {
.name = "simcons",
.write = simcons_write,
.device = simcons_console_device,
.setup = simcons_init,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static int
simcons_init (struct console *cons, char *options)
{
return 0;
}
static void
simcons_write (struct console *cons, const char *buf, unsigned count)
{
unsigned long ch;
while (count-- > 0) {
ch = *buf++;
ia64_ssc(ch, 0, 0, 0, SSC_PUTCHAR);
if (ch == '\n')
ia64_ssc('\r', 0, 0, 0, SSC_PUTCHAR);
}
}
static struct tty_driver *simcons_console_device (struct console *c, int *index)
{
*index = c->index;
return hp_simserial_driver;
}
int simcons_register(void)
{
if (!ia64_platform_is("hpsim"))
return 1;
if (hpsim_cons.flags & CON_ENABLED)
return 1;
register_console(&hpsim_cons);
return 0;
}

View File

@ -1,76 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Platform dependent support for HP simulator.
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include "hpsim_ssc.h"
static unsigned int
hpsim_irq_startup(struct irq_data *data)
{
return 0;
}
static void
hpsim_irq_noop(struct irq_data *data)
{
}
static int
hpsim_set_affinity_noop(struct irq_data *d, const struct cpumask *b, bool f)
{
return 0;
}
static struct irq_chip irq_type_hp_sim = {
.name = "hpsim",
.irq_startup = hpsim_irq_startup,
.irq_shutdown = hpsim_irq_noop,
.irq_enable = hpsim_irq_noop,
.irq_disable = hpsim_irq_noop,
.irq_ack = hpsim_irq_noop,
.irq_set_affinity = hpsim_set_affinity_noop,
};
static void hpsim_irq_set_chip(int irq)
{
struct irq_chip *chip = irq_get_chip(irq);
if (chip == &no_irq_chip)
irq_set_chip(irq, &irq_type_hp_sim);
}
static void hpsim_connect_irq(int intr, int irq)
{
ia64_ssc(intr, irq, 0, 0, SSC_CONNECT_INTERRUPT);
}
int hpsim_get_irq(int intr)
{
int irq = assign_irq_vector(AUTO_ASSIGN);
if (irq >= 0) {
hpsim_irq_set_chip(irq);
irq_set_handler(irq, handle_simple_irq);
hpsim_connect_irq(intr, irq);
}
return irq;
}
void __init
hpsim_irq_init (void)
{
int i;
for_each_active_irq(i)
hpsim_irq_set_chip(i);
}

View File

@ -1,3 +0,0 @@
#define MACHVEC_PLATFORM_NAME hpsim
#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpsim.h>
#include <asm/machvec_init.h>

View File

@ -1,41 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Platform dependent support for HP simulator.
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/param.h>
#include <linux/root_dev.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/pal.h>
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
#include <asm/hpsim.h>
#include "hpsim_ssc.h"
void
ia64_ctl_trace (long on)
{
ia64_ssc(on, 0, 0, 0, SSC_CTL_TRACE);
}
void __init
hpsim_setup (char **cmdline_p)
{
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
simcons_register();
}

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Platform dependent support for HP simulator.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#ifndef _IA64_PLATFORM_HPSIM_SSC_H
#define _IA64_PLATFORM_HPSIM_SSC_H
/* Simulator system calls: */
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_CTL_TRACE 76
#define SSC_NETDEV_PROBE 100
#define SSC_NETDEV_SEND 101
#define SSC_NETDEV_RECV 102
#define SSC_NETDEV_ATTACH 103
#define SSC_NETDEV_DETACH 104
/*
* Simulator system call.
*/
extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
#endif /* _IA64_PLATFORM_HPSIM_SSC_H */

View File

@ -1,510 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Simulated Ethernet Driver
*
* Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/notifier.h>
#include <linux/bitops.h>
#include <asm/irq.h>
#include <asm/hpsim.h>
#include "hpsim_ssc.h"
#define SIMETH_RECV_MAX 10
/*
* Maximum possible received frame for Ethernet.
* We preallocate an sk_buff of that size to avoid costly
* memcpy for temporary buffer into sk_buff. We do basically
* what's done in other drivers, like eepro with a ring.
* The difference is, of course, that we don't have real DMA !!!
*/
#define SIMETH_FRAME_SIZE ETH_FRAME_LEN
#define NETWORK_INTR 8
struct simeth_local {
struct net_device_stats stats;
int simfd; /* descriptor in the simulator */
};
static int simeth_probe1(void);
static int simeth_open(struct net_device *dev);
static int simeth_close(struct net_device *dev);
static int simeth_tx(struct sk_buff *skb, struct net_device *dev);
static int simeth_rx(struct net_device *dev);
static struct net_device_stats *simeth_get_stats(struct net_device *dev);
static irqreturn_t simeth_interrupt(int irq, void *dev_id);
static void set_multicast_list(struct net_device *dev);
static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr);
static char *simeth_version="0.3";
/*
* This variable is used to establish a mapping between the Linux/ia64 kernel
* and the host linux kernel.
*
* As of today, we support only one card, even though most of the code
* is ready for many more. The mapping is then:
* linux/ia64 -> linux/x86
* eth0 -> eth1
*
* In the future, we some string operations, we could easily support up
* to 10 cards (0-9).
*
* The default mapping can be changed on the kernel command line by
* specifying simeth=ethX (or whatever string you want).
*/
static char *simeth_device="eth0"; /* default host interface to use */
static volatile unsigned int card_count; /* how many cards "found" so far */
static int simeth_debug; /* set to 1 to get debug information */
/*
* Used to catch IFF_UP & IFF_DOWN events
*/
static struct notifier_block simeth_dev_notifier = {
simeth_device_event,
NULL
};
/*
* Function used when using a kernel command line option.
*
* Format: simeth=interface_name (like eth0)
*/
static int __init
simeth_setup(char *str)
{
simeth_device = str;
return 1;
}
__setup("simeth=", simeth_setup);
/*
* Function used to probe for simeth devices when not installed
* as a loadable module
*/
int __init
simeth_probe (void)
{
int r;
printk(KERN_INFO "simeth: v%s\n", simeth_version);
r = simeth_probe1();
if (r == 0) register_netdevice_notifier(&simeth_dev_notifier);
return r;
}
static inline int
netdev_probe(char *name, unsigned char *ether)
{
return ia64_ssc(__pa(name), __pa(ether), 0,0, SSC_NETDEV_PROBE);
}
static inline int
netdev_attach(int fd, int irq, unsigned int ipaddr)
{
/* this puts the host interface in the right mode (start interrupting) */
return ia64_ssc(fd, ipaddr, 0,0, SSC_NETDEV_ATTACH);
}
static inline int
netdev_detach(int fd)
{
/*
* inactivate the host interface (don't interrupt anymore) */
return ia64_ssc(fd, 0,0,0, SSC_NETDEV_DETACH);
}
static inline int
netdev_send(int fd, unsigned char *buf, unsigned int len)
{
return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_SEND);
}
static inline int
netdev_read(int fd, unsigned char *buf, unsigned int len)
{
return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV);
}
static const struct net_device_ops simeth_netdev_ops = {
.ndo_open = simeth_open,
.ndo_stop = simeth_close,
.ndo_start_xmit = simeth_tx,
.ndo_get_stats = simeth_get_stats,
.ndo_set_rx_mode = set_multicast_list, /* not yet used */
};
/*
* Function shared with module code, so cannot be in init section
*
* So far this function "detects" only one card (test_&_set) but could
* be extended easily.
*
* Return:
* - -ENODEV is no device found
* - -ENOMEM is no more memory
* - 0 otherwise
*/
static int
simeth_probe1(void)
{
unsigned char mac_addr[ETH_ALEN];
struct simeth_local *local;
struct net_device *dev;
int fd, err, rc;
/*
* XXX Fix me
* let's support just one card for now
*/
if (test_and_set_bit(0, &card_count))
return -ENODEV;
/*
* check with the simulator for the device
*/
fd = netdev_probe(simeth_device, mac_addr);
if (fd == -1)
return -ENODEV;
dev = alloc_etherdev(sizeof(struct simeth_local));
if (!dev)
return -ENOMEM;
memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr));
local = netdev_priv(dev);
local->simfd = fd; /* keep track of underlying file descriptor */
dev->netdev_ops = &simeth_netdev_ops;
err = register_netdev(dev);
if (err) {
free_netdev(dev);
return err;
}
/*
* attach the interrupt in the simulator, this does enable interrupts
* until a netdev_attach() is called
*/
if ((rc = hpsim_get_irq(NETWORK_INTR)) < 0)
panic("%s: out of interrupt vectors!\n", __func__);
dev->irq = rc;
printk(KERN_INFO "%s: hosteth=%s simfd=%d, HwAddr=%pm, IRQ %d\n",
dev->name, simeth_device, local->simfd, dev->dev_addr, dev->irq);
return 0;
}
/*
* actually binds the device to an interrupt vector
*/
static int
simeth_open(struct net_device *dev)
{
if (request_irq(dev->irq, simeth_interrupt, 0, "simeth", dev)) {
printk(KERN_WARNING "simeth: unable to get IRQ %d.\n", dev->irq);
return -EAGAIN;
}
netif_start_queue(dev);
return 0;
}
/* copied from lapbether.c */
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
return ( dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5));
}
/*
* Handler for IFF_UP or IFF_DOWN
*
* The reason for that is that we don't want to be interrupted when the
* interface is down. There is no way to unconnect in the simualtor. Instead
* we use this function to shutdown packet processing in the frame filter
* in the simulator. Thus no interrupts are generated
*
*
* That's also the place where we pass the IP address of this device to the
* simulator so that that we can start filtering packets for it
*
* There may be a better way of doing this, but I don't know which yet.
*/
static int
simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct simeth_local *local;
struct in_device *in_dev;
struct in_ifaddr **ifap = NULL;
struct in_ifaddr *ifa = NULL;
int r;
if ( ! dev ) {
printk(KERN_WARNING "simeth_device_event dev=0\n");
return NOTIFY_DONE;
}
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE;
/*
* Check whether or not it's for an ethernet device
*
* XXX Fixme: This works only as long as we support one
* type of ethernet device.
*/
if ( !dev_is_ethdev(dev) ) return NOTIFY_DONE;
if ((in_dev=dev->ip_ptr) != NULL) {
for (ifap=&in_dev->ifa_list; (ifa=*ifap) != NULL; ifap=&ifa->ifa_next)
if (strcmp(dev->name, ifa->ifa_label) == 0) break;
}
if ( ifa == NULL ) {
printk(KERN_ERR "simeth_open: can't find device %s's ifa\n", dev->name);
return NOTIFY_DONE;
}
printk(KERN_INFO "simeth_device_event: %s ipaddr=0x%x\n",
dev->name, ntohl(ifa->ifa_local));
/*
* XXX Fix me
* if the device was up, and we're simply reconfiguring it, not sure
* we get DOWN then UP.
*/
local = netdev_priv(dev);
/* now do it for real */
r = event == NETDEV_UP ?
netdev_attach(local->simfd, dev->irq, ntohl(ifa->ifa_local)):
netdev_detach(local->simfd);
printk(KERN_INFO "simeth: netdev_attach/detach: event=%s ->%d\n",
event == NETDEV_UP ? "attach":"detach", r);
return NOTIFY_DONE;
}
static int
simeth_close(struct net_device *dev)
{
netif_stop_queue(dev);
free_irq(dev->irq, dev);
return 0;
}
/*
* Only used for debug
*/
static void
frame_print(unsigned char *from, unsigned char *frame, int len)
{
int i;
printk("%s: (%d) %02x", from, len, frame[0] & 0xff);
for(i=1; i < 6; i++ ) {
printk(":%02x", frame[i] &0xff);
}
printk(" %2x", frame[6] &0xff);
for(i=7; i < 12; i++ ) {
printk(":%02x", frame[i] &0xff);
}
printk(" [%02x%02x]\n", frame[12], frame[13]);
for(i=14; i < len; i++ ) {
printk("%02x ", frame[i] &0xff);
if ( (i%10)==0) printk("\n");
}
printk("\n");
}
/*
* Function used to transmit of frame, very last one on the path before
* going to the simulator.
*/
static int
simeth_tx(struct sk_buff *skb, struct net_device *dev)
{
struct simeth_local *local = netdev_priv(dev);
#if 0
/* ensure we have at least ETH_ZLEN bytes (min frame size) */
unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
/* Where do the extra padding bytes comes from inthe skbuff ? */
#else
/* the real driver in the host system is going to take care of that
* or maybe it's the NIC itself.
*/
unsigned int length = skb->len;
#endif
local->stats.tx_bytes += skb->len;
local->stats.tx_packets++;
if (simeth_debug > 5) frame_print("simeth_tx", skb->data, length);
netdev_send(local->simfd, skb->data, length);
/*
* we are synchronous on write, so we don't simulate a
* trasnmit complete interrupt, thus we don't need to arm a tx
*/
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static inline struct sk_buff *
make_new_skb(struct net_device *dev)
{
struct sk_buff *nskb;
/*
* The +2 is used to make sure that the IP header is nicely
* aligned (on 4byte boundary I assume 14+2=16)
*/
nskb = dev_alloc_skb(SIMETH_FRAME_SIZE + 2);
if ( nskb == NULL ) {
printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name);
return NULL;
}
skb_reserve(nskb, 2); /* Align IP on 16 byte boundaries */
skb_put(nskb,SIMETH_FRAME_SIZE);
return nskb;
}
/*
* called from interrupt handler to process a received frame
*/
static int
simeth_rx(struct net_device *dev)
{
struct simeth_local *local;
struct sk_buff *skb;
int len;
int rcv_count = SIMETH_RECV_MAX;
local = netdev_priv(dev);
/*
* the loop concept has been borrowed from other drivers
* looks to me like it's a throttling thing to avoid pushing to many
* packets at one time into the stack. Making sure we can process them
* upstream and make forward progress overall
*/
do {
if ( (skb=make_new_skb(dev)) == NULL ) {
printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name);
local->stats.rx_dropped++;
return 0;
}
/*
* Read only one frame at a time
*/
len = netdev_read(local->simfd, skb->data, SIMETH_FRAME_SIZE);
if ( len == 0 ) {
if ( simeth_debug > 0 ) printk(KERN_WARNING "%s: count=%d netdev_read=0\n",
dev->name, SIMETH_RECV_MAX-rcv_count);
break;
}
#if 0
/*
* XXX Fix me
* Should really do a csum+copy here
*/
skb_copy_to_linear_data(skb, frame, len);
#endif
skb->protocol = eth_type_trans(skb, dev);
if ( simeth_debug > 6 ) frame_print("simeth_rx", skb->data, len);
/*
* push the packet up & trigger software interrupt
*/
netif_rx(skb);
local->stats.rx_packets++;
local->stats.rx_bytes += len;
} while ( --rcv_count );
return len; /* 0 = nothing left to read, otherwise, we can try again */
}
/*
* Interrupt handler (Yes, we can do it too !!!)
*/
static irqreturn_t
simeth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
/*
* very simple loop because we get interrupts only when receiving
*/
while (simeth_rx(dev));
return IRQ_HANDLED;
}
static struct net_device_stats *
simeth_get_stats(struct net_device *dev)
{
struct simeth_local *local = netdev_priv(dev);
return &local->stats;
}
/* fake multicast ability */
static void
set_multicast_list(struct net_device *dev)
{
printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name);
}
__initcall(simeth_probe);

View File

@ -1,373 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Simulated SCSI driver.
*
* Copyright (C) 1999, 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 02/01/15 David Mosberger Updated for v2.5.1
* 99/12/18 David Mosberger Added support for READ10/WRITE10 needed by linux v2.3.33
*/
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <asm/irq.h>
#include "hpsim_ssc.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#define DEBUG_SIMSCSI 0
#define SIMSCSI_REQ_QUEUE_LEN 64
#define DEFAULT_SIMSCSI_ROOT "/var/ski-disks/sd"
/* Simulator system calls: */
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_WRITE_ACCESS 2
#define SSC_READ_ACCESS 1
#if DEBUG_SIMSCSI
int simscsi_debug;
# define DBG simscsi_debug
#else
# define DBG 0
#endif
static struct Scsi_Host *host;
static void simscsi_interrupt (unsigned long val);
static DECLARE_TASKLET(simscsi_tasklet, simscsi_interrupt, 0);
struct disk_req {
unsigned long addr;
unsigned len;
};
struct disk_stat {
int fd;
unsigned count;
};
static int desc[16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
static struct queue_entry {
struct scsi_cmnd *sc;
} queue[SIMSCSI_REQ_QUEUE_LEN];
static int rd, wr;
static atomic_t num_reqs = ATOMIC_INIT(0);
/* base name for default disks */
static char *simscsi_root = DEFAULT_SIMSCSI_ROOT;
#define MAX_ROOT_LEN 128
/*
* used to setup a new base for disk images
* to use /foo/bar/disk[a-z] as disk images
* you have to specify simscsi=/foo/bar/disk on the command line
*/
static int __init
simscsi_setup (char *s)
{
/* XXX Fix me we may need to strcpy() ? */
if (strlen(s) > MAX_ROOT_LEN) {
printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n",
simscsi_root);
} else
simscsi_root = s;
return 1;
}
__setup("simscsi=", simscsi_setup);
static void
simscsi_interrupt (unsigned long val)
{
struct scsi_cmnd *sc;
while ((sc = queue[rd].sc) != NULL) {
atomic_dec(&num_reqs);
queue[rd].sc = NULL;
if (DBG)
printk("simscsi_interrupt: done with %u\n",
sc->request->tag);
(*sc->scsi_done)(sc);
rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN;
}
}
static int
simscsi_biosparam (struct scsi_device *sdev, struct block_device *n,
sector_t capacity, int ip[])
{
ip[0] = 64; /* heads */
ip[1] = 32; /* sectors */
ip[2] = capacity >> 11; /* cylinders */
return 0;
}
static void
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
int i;
struct scatterlist *sl;
struct disk_stat stat;
struct disk_req req;
stat.fd = desc[sc->device->id];
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
req.addr = __pa(sg_virt(sl));
req.len = sl->length;
if (DBG)
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
mode == SSC_READ ? "read":"write", req.addr, offset,
scsi_sg_count(sc) - i, sl->length);
ia64_ssc(stat.fd, 1, __pa(&req), offset, mode);
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
/* should not happen in our case */
if (stat.count != req.len) {
sc->result = DID_ERROR << 16;
return;
}
offset += sl->length;
}
sc->result = GOOD;
}
/*
* function handling both READ_6/WRITE_6 (non-scatter/gather mode)
* commands.
* Added 02/26/99 S.Eranian
*/
static void
simscsi_readwrite6 (struct scsi_cmnd *sc, int mode)
{
unsigned long offset;
offset = (((sc->cmnd[1] & 0x1f) << 16) | (sc->cmnd[2] << 8) | sc->cmnd[3])*512;
simscsi_sg_readwrite(sc, mode, offset);
}
static size_t
simscsi_get_disk_size (int fd)
{
struct disk_stat stat;
size_t bit, sectors = 0;
struct disk_req req;
char buf[512];
/*
* This is a bit kludgey: the simulator doesn't provide a
* direct way of determining the disk size, so we do a binary
* search, assuming a maximum disk size of 128GB.
*/
for (bit = (128UL << 30)/512; bit != 0; bit >>= 1) {
req.addr = __pa(&buf);
req.len = sizeof(buf);
ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ);
stat.fd = fd;
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
if (stat.count == sizeof(buf))
sectors |= bit;
}
return sectors - 1; /* return last valid sector number */
}
static void
simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
{
unsigned long offset;
offset = (((unsigned long)sc->cmnd[2] << 24)
| ((unsigned long)sc->cmnd[3] << 16)
| ((unsigned long)sc->cmnd[4] << 8)
| ((unsigned long)sc->cmnd[5] << 0))*512UL;
simscsi_sg_readwrite(sc, mode, offset);
}
static int
simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
unsigned int target_id = sc->device->id;
char fname[MAX_ROOT_LEN+16];
size_t disk_size;
char *buf;
char localbuf[36];
#if DEBUG_SIMSCSI
register long sp asm ("sp");
if (DBG)
printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%u,sp=%lx,done=%p\n",
target_id, sc->cmnd[0], sc->request->tag, sp, done);
#endif
sc->result = DID_BAD_TARGET << 16;
sc->scsi_done = done;
if (target_id <= 15 && sc->device->lun == 0) {
switch (sc->cmnd[0]) {
case INQUIRY:
if (scsi_bufflen(sc) < 35) {
break;
}
sprintf (fname, "%s%c", simscsi_root, 'a' + target_id);
desc[target_id] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS,
0, 0, SSC_OPEN);
if (desc[target_id] < 0) {
/* disk doesn't exist... */
break;
}
buf = localbuf;
buf[0] = 0; /* magnetic disk */
buf[1] = 0; /* not a removable medium */
buf[2] = 2; /* SCSI-2 compliant device */
buf[3] = 2; /* SCSI-2 response data format */
buf[4] = 31; /* additional length (bytes) */
buf[5] = 0; /* reserved */
buf[6] = 0; /* reserved */
buf[7] = 0; /* various flags */
memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28);
scsi_sg_copy_from_buffer(sc, buf, 36);
sc->result = GOOD;
break;
case TEST_UNIT_READY:
sc->result = GOOD;
break;
case READ_6:
if (desc[target_id] < 0 )
break;
simscsi_readwrite6(sc, SSC_READ);
break;
case READ_10:
if (desc[target_id] < 0 )
break;
simscsi_readwrite10(sc, SSC_READ);
break;
case WRITE_6:
if (desc[target_id] < 0)
break;
simscsi_readwrite6(sc, SSC_WRITE);
break;
case WRITE_10:
if (desc[target_id] < 0)
break;
simscsi_readwrite10(sc, SSC_WRITE);
break;
case READ_CAPACITY:
if (desc[target_id] < 0 || scsi_bufflen(sc) < 8) {
break;
}
buf = localbuf;
disk_size = simscsi_get_disk_size(desc[target_id]);
buf[0] = (disk_size >> 24) & 0xff;
buf[1] = (disk_size >> 16) & 0xff;
buf[2] = (disk_size >> 8) & 0xff;
buf[3] = (disk_size >> 0) & 0xff;
/* set block size of 512 bytes: */
buf[4] = 0;
buf[5] = 0;
buf[6] = 2;
buf[7] = 0;
scsi_sg_copy_from_buffer(sc, buf, 8);
sc->result = GOOD;
break;
case MODE_SENSE:
case MODE_SENSE_10:
/* sd.c uses this to determine whether disk does write-caching. */
scsi_sg_copy_from_buffer(sc, (char *)empty_zero_page,
PAGE_SIZE);
sc->result = GOOD;
break;
case START_STOP:
printk(KERN_ERR "START_STOP\n");
break;
default:
panic("simscsi: unknown SCSI command %u\n", sc->cmnd[0]);
}
}
if (sc->result == DID_BAD_TARGET) {
sc->result |= DRIVER_SENSE << 24;
sc->sense_buffer[0] = 0x70;
sc->sense_buffer[2] = 0x00;
}
if (atomic_read(&num_reqs) >= SIMSCSI_REQ_QUEUE_LEN) {
panic("Attempt to queue command while command is pending!!");
}
atomic_inc(&num_reqs);
queue[wr].sc = sc;
wr = (wr + 1) % SIMSCSI_REQ_QUEUE_LEN;
tasklet_schedule(&simscsi_tasklet);
return 0;
}
static DEF_SCSI_QCMD(simscsi_queuecommand)
static int
simscsi_host_reset (struct scsi_cmnd *sc)
{
printk(KERN_ERR "simscsi_host_reset: not implemented\n");
return 0;
}
static struct scsi_host_template driver_template = {
.name = "simulated SCSI host adapter",
.proc_name = "simscsi",
.queuecommand = simscsi_queuecommand,
.eh_host_reset_handler = simscsi_host_reset,
.bios_param = simscsi_biosparam,
.can_queue = SIMSCSI_REQ_QUEUE_LEN,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = 1024,
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
.dma_boundary = PAGE_SIZE - 1,
};
static int __init
simscsi_init(void)
{
int error;
host = scsi_host_alloc(&driver_template, 0);
if (!host)
return -ENOMEM;
error = scsi_add_host(host, NULL);
if (error)
goto free_host;
scsi_scan_host(host);
return 0;
free_host:
scsi_host_put(host);
return error;
}
device_initcall(simscsi_init);

View File

@ -1,521 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Simulated Serial Driver (fake serial)
*
* This driver is mostly used for bringup purposes and will go away.
* It has a strong dependency on the system console. All outputs
* are rerouted to the same facility as the one used by printk which, in our
* case means sys_sim.c console (goes via the simulator).
*
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/uaccess.h>
#include <asm/hpsim.h>
#include "hpsim_ssc.h"
#undef SIMSERIAL_DEBUG /* define this to get some debug information */
#define KEYBOARD_INTR 3 /* must match with simulator! */
#define NR_PORTS 1 /* only one port for now */
struct serial_state {
struct tty_port port;
struct circ_buf xmit;
int irq;
int x_char;
};
static struct serial_state rs_table[NR_PORTS];
struct tty_driver *hp_simserial_driver;
static struct console *console;
static void receive_chars(struct tty_port *port)
{
unsigned char ch;
static unsigned char seen_esc = 0;
while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) {
if (ch == 27 && seen_esc == 0) {
seen_esc = 1;
continue;
} else if (seen_esc == 1 && ch == 'O') {
seen_esc = 2;
continue;
} else if (seen_esc == 2) {
if (ch == 'P') /* F1 */
show_state();
#ifdef CONFIG_MAGIC_SYSRQ
if (ch == 'S') { /* F4 */
do {
ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR);
} while (!ch);
handle_sysrq(ch);
}
#endif
seen_esc = 0;
continue;
}
seen_esc = 0;
if (tty_insert_flip_char(port, ch, TTY_NORMAL) == 0)
break;
}
tty_flip_buffer_push(port);
}
/*
* This is the serial driver's interrupt routine for a single port
*/
static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
receive_chars(&info->port);
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
local_irq_restore(flags);
return 1;
}
static void transmit_chars(struct tty_struct *tty, struct serial_state *info,
int *intr_done)
{
int count;
unsigned long flags;
local_irq_save(flags);
if (info->x_char) {
char c = info->x_char;
console->write(console, &c, 1);
info->x_char = 0;
goto out;
}
if (info->xmit.head == info->xmit.tail || tty->stopped) {
#ifdef SIMSERIAL_DEBUG
printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
info->xmit.head, info->xmit.tail, tty->stopped);
#endif
goto out;
}
/*
* We removed the loop and try to do it in to chunks. We need
* 2 operations maximum because it's a ring buffer.
*
* First from current to tail if possible.
* Then from the beginning of the buffer until necessary
*/
count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE),
SERIAL_XMIT_SIZE - info->xmit.tail);
console->write(console, info->xmit.buf+info->xmit.tail, count);
info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1);
/*
* We have more at the beginning of the buffer
*/
count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count) {
console->write(console, info->xmit.buf, count);
info->xmit.tail += count;
}
out:
local_irq_restore(flags);
}
static void rs_flush_chars(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
if (info->xmit.head == info->xmit.tail || tty->stopped ||
!info->xmit.buf)
return;
transmit_chars(tty, info, NULL);
}
static int rs_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
struct serial_state *info = tty->driver_data;
int c, ret = 0;
unsigned long flags;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
/*
* Hey, we transmit directly from here in our case
*/
if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) &&
!tty->stopped)
transmit_chars(tty, info, NULL);
return ret;
}
static int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct serial_state *info = tty->driver_data;
info->x_char = ch;
if (ch) {
/*
* I guess we could call console->write() directly but
* let's do that for now.
*/
transmit_chars(tty, info, NULL);
}
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void rs_throttle(struct tty_struct * tty)
{
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
printk(KERN_INFO "simrs_throttle called\n");
}
static void rs_unthrottle(struct tty_struct * tty)
{
struct serial_state *info = tty->driver_data;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
printk(KERN_INFO "simrs_unthrottle called\n");
}
static int rs_setserial(struct tty_struct *tty, struct serial_struct *ss)
{
return 0;
}
static int rs_getserial(struct tty_struct *tty, struct serial_struct *ss)
{
return 0;
}
static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
if ((cmd != TIOCSERCONFIG) && (cmd != TIOCMIWAIT)) {
if (tty_io_error(tty))
return -EIO;
}
switch (cmd) {
case TIOCMIWAIT:
return 0;
case TIOCSERCONFIG:
case TIOCSERGETLSR: /* Get line status register */
return -EINVAL;
}
return -ENOIOCTLCMD;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void shutdown(struct tty_port *port)
{
struct serial_state *info = container_of(port, struct serial_state,
port);
unsigned long flags;
local_irq_save(flags);
if (info->irq)
free_irq(info->irq, info);
if (info->xmit.buf) {
free_page((unsigned long) info->xmit.buf);
info->xmit.buf = NULL;
}
local_irq_restore(flags);
}
static void rs_close(struct tty_struct *tty, struct file * filp)
{
struct serial_state *info = tty->driver_data;
tty_port_close(&info->port, tty, filp);
}
static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
rs_flush_buffer(tty);
tty_port_hangup(&info->port);
}
static int activate(struct tty_port *port, struct tty_struct *tty)
{
struct serial_state *state = container_of(port, struct serial_state,
port);
unsigned long flags, page;
int retval = 0;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
local_irq_save(flags);
if (state->xmit.buf)
free_page(page);
else
state->xmit.buf = (unsigned char *) page;
if (state->irq) {
retval = request_irq(state->irq, rs_interrupt_single, 0,
"simserial", state);
if (retval)
goto errout;
}
state->xmit.head = state->xmit.tail = 0;
errout:
local_irq_restore(flags);
return retval;
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int rs_open(struct tty_struct *tty, struct file * filp)
{
struct serial_state *info = rs_table + tty->index;
struct tty_port *port = &info->port;
tty->driver_data = info;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
/*
* figure out which console to use (should be one already)
*/
console = console_drivers;
while (console) {
if ((console->flags & CON_ENABLED) && console->write) break;
console = console->next;
}
return tty_port_open(port, tty, filp);
}
/*
* /proc fs routines....
*/
static int rs_proc_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "simserinfo:1.0\n");
for (i = 0; i < NR_PORTS; i++)
seq_printf(m, "%d: uart:16550 port:3F8 irq:%d\n",
i, rs_table[i].irq);
return 0;
}
static const struct tty_operations hp_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.send_xchar = rs_send_xchar,
.set_serial = rs_setserial,
.get_serial = rs_getserial,
.hangup = rs_hangup,
.proc_show = rs_proc_show,
};
static const struct tty_port_operations hp_port_ops = {
.activate = activate,
.shutdown = shutdown,
};
static int __init simrs_init(void)
{
struct serial_state *state;
int retval;
if (!ia64_platform_is("hpsim"))
return -ENODEV;
hp_simserial_driver = alloc_tty_driver(NR_PORTS);
if (!hp_simserial_driver)
return -ENOMEM;
printk(KERN_INFO "SimSerial driver with no serial options enabled\n");
/* Initialize the tty_driver structure */
hp_simserial_driver->driver_name = "simserial";
hp_simserial_driver->name = "ttyS";
hp_simserial_driver->major = TTY_MAJOR;
hp_simserial_driver->minor_start = 64;
hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL;
hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL;
hp_simserial_driver->init_termios = tty_std_termios;
hp_simserial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hp_simserial_driver, &hp_ops);
state = rs_table;
tty_port_init(&state->port);
state->port.ops = &hp_port_ops;
state->port.close_delay = 0; /* XXX really 0? */
retval = hpsim_get_irq(KEYBOARD_INTR);
if (retval < 0) {
printk(KERN_ERR "%s: out of interrupt vectors!\n",
__func__);
goto err_free_tty;
}
state->irq = retval;
/* the port is imaginary */
printk(KERN_INFO "ttyS0 at 0x03f8 (irq = %d) is a 16550\n", state->irq);
tty_port_link_device(&state->port, hp_simserial_driver, 0);
retval = tty_register_driver(hp_simserial_driver);
if (retval) {
printk(KERN_ERR "Couldn't register simserial driver\n");
goto err_free_tty;
}
return 0;
err_free_tty:
put_tty_driver(hp_simserial_driver);
tty_port_destroy(&state->port);
return retval;
}
#ifndef MODULE
__initcall(simrs_init);
#endif

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# ia64/hp/zx1/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o hpzx1_swiotlb_machvec.o

View File

@ -1,3 +0,0 @@
#define MACHVEC_PLATFORM_NAME hpzx1
#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h>
#include <asm/machvec_init.h>

View File

@ -1,3 +0,0 @@
#define MACHVEC_PLATFORM_NAME hpzx1_swiotlb
#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1_swiotlb.h>
#include <asm/machvec_init.h>

View File

@ -17,7 +17,7 @@
#include <linux/numa.h>
#include <asm/numa.h>
#ifdef CONFIG_ACPI
extern int acpi_lapic;
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
@ -28,34 +28,10 @@ static inline bool acpi_has_cpu_in_madt(void)
{
return !!acpi_lapic;
}
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
#ifdef CONFIG_IA64_GENERIC
const char *acpi_get_sysname (void);
#else
static inline const char *acpi_get_sysname (void)
{
# if defined (CONFIG_IA64_HP_SIM)
return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_SGI_UV)
return "uv";
# elif defined (CONFIG_IA64_DIG)
return "dig";
# elif defined(CONFIG_IA64_DIG_VTD)
return "dig_vtd";
# else
# error Unknown platform. Fix acpi.c.
# endif
}
#endif
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);

View File

@ -6,17 +6,11 @@
* Copyright (C) 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/machvec.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return platform_dma_get_ops(NULL);
return dma_ops;
}
#endif /* _ASM_IA64_DMA_MAPPING_H */

View File

@ -12,11 +12,7 @@
extern unsigned long MAX_DMA_ADDRESS;
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
# define isa_dma_bridge_buggy (0)
#endif
extern int isa_dma_bridge_buggy;
#define free_dma(x)

View File

@ -1,17 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMIA64_HPSIM_H
#define _ASMIA64_HPSIM_H
#ifndef CONFIG_HP_SIMSERIAL_CONSOLE
static inline int simcons_register(void) { return 1; }
#else
int simcons_register(void);
#endif
struct tty_driver;
extern struct tty_driver *hp_simserial_driver;
extern int hpsim_get_irq(int intr);
void ia64_ctl_trace(long on);
#endif

View File

@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/profile.h>
#include <asm/machvec.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@ -56,7 +55,7 @@ typedef u8 ia64_vector;
extern int ia64_first_device_vector;
extern int ia64_last_device_vector;
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
#ifdef CONFIG_SMP
/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
@ -127,7 +126,7 @@ extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
extern void destroy_and_reserve_irq (unsigned int irq);
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
#ifdef CONFIG_SMP
extern int irq_prepare_move(int irq, int cpu);
extern void irq_complete_move(unsigned int irq);
#else
@ -137,25 +136,9 @@ static inline void irq_complete_move(unsigned int irq) {}
static inline void ia64_native_resend_irq(unsigned int vector)
{
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
ia64_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
}
/*
* Default implementations for the irq-descriptor API:
*/
#ifndef CONFIG_IA64_GENERIC
static inline ia64_vector __ia64_irq_to_vector(int irq)
{
return irq_cfg[irq].vector;
}
static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec)
{
return __this_cpu_read(vector_irq[vec]);
}
#endif
/*
* Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
* vectors. On smaller systems, there is a one-to-one correspondence between interrupt
@ -170,7 +153,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
static inline ia64_vector
irq_to_vector (int irq)
{
return platform_irq_to_vector(irq);
return irq_cfg[irq].vector;
}
/*
@ -181,7 +164,7 @@ irq_to_vector (int irq)
static inline unsigned int
local_vector_to_irq (ia64_vector vec)
{
return platform_local_vector_to_irq(vec);
return __this_cpu_read(vector_irq[vec]);
}
#endif /* _ASM_IA64_HW_IRQ_H */

View File

@ -71,7 +71,6 @@ extern unsigned int num_io_spaces;
#define HAVE_ARCH_PIO_SIZE
#include <asm/intrinsics.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm-generic/iomap.h>
@ -129,25 +128,6 @@ __ia64_mk_io_addr (unsigned long port)
return (void *) (space->mmio_base | offset);
}
#define __ia64_inb ___ia64_inb
#define __ia64_inw ___ia64_inw
#define __ia64_inl ___ia64_inl
#define __ia64_outb ___ia64_outb
#define __ia64_outw ___ia64_outw
#define __ia64_outl ___ia64_outl
#define __ia64_readb ___ia64_readb
#define __ia64_readw ___ia64_readw
#define __ia64_readl ___ia64_readl
#define __ia64_readq ___ia64_readq
#define __ia64_readb_relaxed ___ia64_readb
#define __ia64_readw_relaxed ___ia64_readw
#define __ia64_readl_relaxed ___ia64_readl
#define __ia64_readq_relaxed ___ia64_readq
#define __ia64_writeb ___ia64_writeb
#define __ia64_writew ___ia64_writew
#define __ia64_writel ___ia64_writel
#define __ia64_writeq ___ia64_writeq
/*
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
* that the access has completed before executing other I/O accesses. Since we're doing
@ -156,8 +136,8 @@ __ia64_mk_io_addr (unsigned long port)
* during optimization, which is why we use "volatile" pointers.
*/
static inline unsigned int
___ia64_inb (unsigned long port)
#define inb inb
static inline unsigned int inb(unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
unsigned char ret;
@ -167,8 +147,8 @@ ___ia64_inb (unsigned long port)
return ret;
}
static inline unsigned int
___ia64_inw (unsigned long port)
#define inw inw
static inline unsigned int inw(unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
unsigned short ret;
@ -178,8 +158,8 @@ ___ia64_inw (unsigned long port)
return ret;
}
static inline unsigned int
___ia64_inl (unsigned long port)
#define inl inl
static inline unsigned int inl(unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
unsigned int ret;
@ -189,8 +169,8 @@ ___ia64_inl (unsigned long port)
return ret;
}
static inline void
___ia64_outb (unsigned char val, unsigned long port)
#define outb outb
static inline void outb(unsigned char val, unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
@ -198,8 +178,8 @@ ___ia64_outb (unsigned char val, unsigned long port)
__ia64_mf_a();
}
static inline void
___ia64_outw (unsigned short val, unsigned long port)
#define outw outw
static inline void outw(unsigned short val, unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
@ -207,8 +187,8 @@ ___ia64_outw (unsigned short val, unsigned long port)
__ia64_mf_a();
}
static inline void
___ia64_outl (unsigned int val, unsigned long port)
#define outl outl
static inline void outl(unsigned int val, unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
@ -216,199 +196,63 @@ ___ia64_outl (unsigned int val, unsigned long port)
__ia64_mf_a();
}
static inline void
__insb (unsigned long port, void *dst, unsigned long count)
#define insb insb
static inline void insb(unsigned long port, void *dst, unsigned long count)
{
unsigned char *dp = dst;
while (count--)
*dp++ = platform_inb(port);
*dp++ = inb(port);
}
static inline void
__insw (unsigned long port, void *dst, unsigned long count)
#define insw insw
static inline void insw(unsigned long port, void *dst, unsigned long count)
{
unsigned short *dp = dst;
while (count--)
put_unaligned(platform_inw(port), dp++);
put_unaligned(inw(port), dp++);
}
static inline void
__insl (unsigned long port, void *dst, unsigned long count)
#define insl insl
static inline void insl(unsigned long port, void *dst, unsigned long count)
{
unsigned int *dp = dst;
while (count--)
put_unaligned(platform_inl(port), dp++);
put_unaligned(inl(port), dp++);
}
static inline void
__outsb (unsigned long port, const void *src, unsigned long count)
#define outsb outsb
static inline void outsb(unsigned long port, const void *src,
unsigned long count)
{
const unsigned char *sp = src;
while (count--)
platform_outb(*sp++, port);
outb(*sp++, port);
}
static inline void
__outsw (unsigned long port, const void *src, unsigned long count)
#define outsw outsw
static inline void outsw(unsigned long port, const void *src,
unsigned long count)
{
const unsigned short *sp = src;
while (count--)
platform_outw(get_unaligned(sp++), port);
outw(get_unaligned(sp++), port);
}
static inline void
__outsl (unsigned long port, const void *src, unsigned long count)
#define outsl outsl
static inline void outsl(unsigned long port, const void *src,
unsigned long count)
{
const unsigned int *sp = src;
while (count--)
platform_outl(get_unaligned(sp++), port);
outl(get_unaligned(sp++), port);
}
/*
* Unfortunately, some platforms are broken and do not follow the IA-64 architecture
* specification regarding legacy I/O support. Thus, we have to make these operations
* platform dependent...
*/
#define __inb platform_inb
#define __inw platform_inw
#define __inl platform_inl
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define insb(p,d,c) __insb(p,d,c)
#define insw(p,d,c) __insw(p,d,c)
#define insl(p,d,c) __insl(p,d,c)
#define outb(v,p) __outb(v,p)
#define outw(v,p) __outw(v,p)
#define outl(v,p) __outl(v,p)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
/*
* The address passed to these functions are ioremap()ped already.
*
* We need these to be machine vectors since some platforms don't provide
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
* a good idea). Writes are ok though for all existing ia64 platforms (and
* hopefully it'll stay that way).
*/
static inline unsigned char
___ia64_readb (const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short
___ia64_readw (const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int
___ia64_readl (const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long
___ia64_readq (const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *) addr;
}
static inline void
__writeb (unsigned char val, volatile void __iomem *addr)
{
*(volatile unsigned char __force *) addr = val;
}
static inline void
__writew (unsigned short val, volatile void __iomem *addr)
{
*(volatile unsigned short __force *) addr = val;
}
static inline void
__writel (unsigned int val, volatile void __iomem *addr)
{
*(volatile unsigned int __force *) addr = val;
}
static inline void
__writeq (unsigned long val, volatile void __iomem *addr)
{
*(volatile unsigned long __force *) addr = val;
}
#define __readb platform_readb
#define __readw platform_readw
#define __readl platform_readl
#define __readq platform_readq
#define __readb_relaxed platform_readb_relaxed
#define __readw_relaxed platform_readw_relaxed
#define __readl_relaxed platform_readl_relaxed
#define __readq_relaxed platform_readq_relaxed
#define readb(a) __readb((a))
#define readw(a) __readw((a))
#define readl(a) __readl((a))
#define readq(a) __readq((a))
#define readb_relaxed(a) __readb_relaxed((a))
#define readw_relaxed(a) __readw_relaxed((a))
#define readl_relaxed(a) __readl_relaxed((a))
#define readq_relaxed(a) __readq_relaxed((a))
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_readq readq
#define __raw_readb_relaxed readb_relaxed
#define __raw_readw_relaxed readw_relaxed
#define __raw_readl_relaxed readl_relaxed
#define __raw_readq_relaxed readq_relaxed
#define writeb(v,a) __writeb((v), (a))
#define writew(v,a) __writew((v), (a))
#define writel(v,a) __writel((v), (a))
#define writeq(v,a) __writeq((v), (a))
#define writeb_relaxed(v,a) __writeb((v), (a))
#define writew_relaxed(v,a) __writew((v), (a))
#define writel_relaxed(v,a) __writel((v), (a))
#define writeq_relaxed(v,a) __writeq((v), (a))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
#ifndef inb_p
# define inb_p inb
#endif
#ifndef inw_p
# define inw_p inw
#endif
#ifndef inl_p
# define inl_p inl
#endif
#ifndef outb_p
# define outb_p outb
#endif
#ifndef outw_p
# define outw_p outw
#endif
#ifndef outl_p
# define outl_p outl
#endif
# ifdef __KERNEL__
extern void __iomem * ioremap(unsigned long offset, unsigned long size);

View File

@ -13,6 +13,5 @@ extern int iommu_detected;
#define no_iommu (1)
#define iommu_detected (0)
#endif
extern void machvec_init(const char *name);
#endif

View File

@ -52,8 +52,6 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC
#define NR_IOSAPICS 256
#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
@ -103,16 +101,6 @@ extern int __init iosapic_register_platform_intr (u32 int_type,
#ifdef CONFIG_NUMA
extern void map_iosapic_to_node (unsigned int, int);
#endif
#else
#define iosapic_system_init(pcat_compat) do { } while (0)
#define iosapic_init(address,gsi_base) (-EINVAL)
#define iosapic_remove(gsi_base) (-ENODEV)
#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
#define iosapic_unregister_intr(irq) do { } while (0)
#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
polarity,trigger) (gsi)
#endif
# endif /* !__ASSEMBLY__ */
#endif /* __ASM_IA64_IOSAPIC_H */

View File

@ -28,9 +28,6 @@ irq_canonicalize (int irq)
}
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
bool is_affinity_mask_valid(const struct cpumask *cpumask);
#define is_affinity_mask_valid is_affinity_mask_valid
int create_irq(void);
void destroy_irq(unsigned int irq);

View File

@ -1,347 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Machine vector for IA-64.
*
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef _ASM_IA64_MACHVEC_H
#define _ASM_IA64_MACHVEC_H
#include <linux/types.h>
/* forward declarations: */
struct device;
struct pt_regs;
struct scatterlist;
struct page;
struct mm_struct;
struct pci_bus;
struct task_struct;
struct pci_dev;
struct msi_desc;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
u8 size);
typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
u8 size);
typedef void ia64_mv_migrate_t(struct task_struct * task);
typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
typedef void ia64_mv_kernel_launch_event_t(void);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
* expected to follow this architected model (see Section 10.7 in the
* IA-64 Architecture Software Developer's Manual). Unfortunately,
* some broken machines do not follow that model, which is why we have
* to make the inX/outX operations part of the machine vector.
* Platform designers should follow the architected model whenever
* possible.
*/
typedef unsigned int ia64_mv_inb_t (unsigned long);
typedef unsigned int ia64_mv_inw_t (unsigned long);
typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiowb_t (void);
typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *);
typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
static inline void
machvec_noop (void)
{
}
static inline void
machvec_noop_task (struct task_struct *task)
{
}
static inline void
machvec_noop_bus (struct pci_bus *bus)
{
}
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
# elif defined (CONFIG_IA64_DIG)
# include <asm/machvec_dig.h>
# elif defined(CONFIG_IA64_DIG_VTD)
# include <asm/machvec_dig_vtd.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
# include <asm/machvec_hpzx1_swiotlb.h>
# elif defined (CONFIG_IA64_SGI_SN2)
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_SGI_UV)
# include <asm/machvec_uv.h>
# elif defined (CONFIG_IA64_GENERIC)
# ifdef MACHVEC_PLATFORM_HEADER
# include MACHVEC_PLATFORM_HEADER
# else
# define ia64_platform_name ia64_mv.name
# define platform_setup ia64_mv.setup
# define platform_cpu_init ia64_mv.cpu_init
# define platform_irq_init ia64_mv.irq_init
# define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_dma_init ia64_mv.dma_init
# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
# define platform_pci_legacy_read ia64_mv.pci_legacy_read
# define platform_pci_legacy_write ia64_mv.pci_legacy_write
# define platform_inb ia64_mv.inb
# define platform_inw ia64_mv.inw
# define platform_inl ia64_mv.inl
# define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl
# define platform_mmiowb ia64_mv.mmiowb
# define platform_readb ia64_mv.readb
# define platform_readw ia64_mv.readw
# define platform_readl ia64_mv.readl
# define platform_readq ia64_mv.readq
# define platform_readb_relaxed ia64_mv.readb_relaxed
# define platform_readw_relaxed ia64_mv.readw_relaxed
# define platform_readl_relaxed ia64_mv.readl_relaxed
# define platform_readq_relaxed ia64_mv.readq_relaxed
# define platform_migrate ia64_mv.migrate
# define platform_setup_msi_irq ia64_mv.setup_msi_irq
# define platform_teardown_msi_irq ia64_mv.teardown_msi_irq
# define platform_pci_fixup_bus ia64_mv.pci_fixup_bus
# define platform_kernel_launch_event ia64_mv.kernel_launch_event
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
* structure multiple of 16 bytes.
* This will fillup the holes created because of section 3.3.1 in
* Software Conventions guide.
*/
struct ia64_machine_vector {
const char *name;
ia64_mv_setup_t *setup;
ia64_mv_cpu_init_t *cpu_init;
ia64_mv_irq_init_t *irq_init;
ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_dma_init *dma_init;
ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
ia64_mv_pci_legacy_read_t *pci_legacy_read;
ia64_mv_pci_legacy_write_t *pci_legacy_write;
ia64_mv_inb_t *inb;
ia64_mv_inw_t *inw;
ia64_mv_inl_t *inl;
ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl;
ia64_mv_mmiowb_t *mmiowb;
ia64_mv_readb_t *readb;
ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
ia64_mv_readb_relaxed_t *readb_relaxed;
ia64_mv_readw_relaxed_t *readw_relaxed;
ia64_mv_readl_relaxed_t *readl_relaxed;
ia64_mv_readq_relaxed_t *readq_relaxed;
ia64_mv_migrate_t *migrate;
ia64_mv_setup_msi_irq_t *setup_msi_irq;
ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
ia64_mv_kernel_launch_event_t *kernel_launch_event;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
{ \
#name, \
platform_setup, \
platform_cpu_init, \
platform_irq_init, \
platform_send_ipi, \
platform_timer_interrupt, \
platform_global_tlb_purge, \
platform_dma_init, \
platform_dma_get_ops, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \
platform_pci_legacy_read, \
platform_pci_legacy_write, \
platform_inb, \
platform_inw, \
platform_inl, \
platform_outb, \
platform_outw, \
platform_outl, \
platform_mmiowb, \
platform_readb, \
platform_readw, \
platform_readl, \
platform_readq, \
platform_readb_relaxed, \
platform_readw_relaxed, \
platform_readl_relaxed, \
platform_readq_relaxed, \
platform_migrate, \
platform_setup_msi_irq, \
platform_teardown_msi_irq, \
platform_pci_fixup_bus, \
platform_kernel_launch_event \
}
extern struct ia64_machine_vector ia64_mv;
extern void machvec_init (const char *name);
extern void machvec_init_from_cmdline(const char *cmdline);
# else
# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
# endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void);
extern const struct dma_map_ops *dma_get_ops(struct device *);
/*
* Define default versions so we can extend machvec for new platforms without having
* to update the machvec files for all existing platforms.
*/
#ifndef platform_setup
# define platform_setup machvec_setup
#endif
#ifndef platform_cpu_init
# define platform_cpu_init machvec_noop
#endif
#ifndef platform_irq_init
# define platform_irq_init machvec_noop
#endif
#ifndef platform_send_ipi
# define platform_send_ipi ia64_send_ipi /* default to architected version */
#endif
#ifndef platform_timer_interrupt
# define platform_timer_interrupt machvec_timer_interrupt
#endif
#ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif
#ifndef platform_kernel_launch_event
# define platform_kernel_launch_event machvec_noop
#endif
#ifndef platform_dma_init
# define platform_dma_init swiotlb_dma_init
#endif
#ifndef platform_dma_get_ops
# define platform_dma_get_ops dma_get_ops
#endif
#ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector
#endif
#ifndef platform_local_vector_to_irq
# define platform_local_vector_to_irq __ia64_local_vector_to_irq
#endif
#ifndef platform_pci_get_legacy_mem
# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
#endif
#ifndef platform_pci_legacy_read
# define platform_pci_legacy_read ia64_pci_legacy_read
extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
#endif
#ifndef platform_pci_legacy_write
# define platform_pci_legacy_write ia64_pci_legacy_write
extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
#endif
#ifndef platform_inb
# define platform_inb __ia64_inb
#endif
#ifndef platform_inw
# define platform_inw __ia64_inw
#endif
#ifndef platform_inl
# define platform_inl __ia64_inl
#endif
#ifndef platform_outb
# define platform_outb __ia64_outb
#endif
#ifndef platform_outw
# define platform_outw __ia64_outw
#endif
#ifndef platform_outl
# define platform_outl __ia64_outl
#endif
#ifndef platform_mmiowb
# define platform_mmiowb __ia64_mmiowb
#endif
#ifndef platform_readb
# define platform_readb __ia64_readb
#endif
#ifndef platform_readw
# define platform_readw __ia64_readw
#endif
#ifndef platform_readl
# define platform_readl __ia64_readl
#endif
#ifndef platform_readq
# define platform_readq __ia64_readq
#endif
#ifndef platform_readb_relaxed
# define platform_readb_relaxed __ia64_readb_relaxed
#endif
#ifndef platform_readw_relaxed
# define platform_readw_relaxed __ia64_readw_relaxed
#endif
#ifndef platform_readl_relaxed
# define platform_readl_relaxed __ia64_readl_relaxed
#endif
#ifndef platform_readq_relaxed
# define platform_readq_relaxed __ia64_readq_relaxed
#endif
#ifndef platform_migrate
# define platform_migrate machvec_noop_task
#endif
#ifndef platform_setup_msi_irq
# define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
#endif
#ifndef platform_teardown_msi_irq
# define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
#ifndef platform_pci_fixup_bus
# define platform_pci_fixup_bus machvec_noop_bus
#endif
#endif /* _ASM_IA64_MACHVEC_H */

View File

@ -1,17 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_MACHVEC_DIG_h
#define _ASM_IA64_MACHVEC_DIG_h
extern ia64_mv_setup_t dig_setup;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "dig"
#define platform_setup dig_setup
#endif /* _ASM_IA64_MACHVEC_DIG_h */

View File

@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
#define _ASM_IA64_MACHVEC_DIG_VTD_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_dma_init pci_iommu_alloc;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "dig_vtd"
#define platform_setup dig_setup
#define platform_dma_init pci_iommu_alloc
#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */

View File

@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_MACHVEC_HPSIM_h
#define _ASM_IA64_MACHVEC_HPSIM_h
extern ia64_mv_setup_t hpsim_setup;
extern ia64_mv_irq_init_t hpsim_irq_init;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "hpsim"
#define platform_setup hpsim_setup
#define platform_irq_init hpsim_irq_init
#endif /* _ASM_IA64_MACHVEC_HPSIM_h */

View File

@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_MACHVEC_HPZX1_h
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_dma_init sba_dma_init;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "hpzx1"
#define platform_setup dig_setup
#define platform_dma_init sba_dma_init
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */

View File

@ -1,20 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "hpzx1_swiotlb"
#define platform_setup dig_setup
#define platform_dma_init machvec_noop
#define platform_dma_get_ops hwsw_dma_get_ops
#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */

View File

@ -1,35 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/iommu.h>
#include <asm/machvec.h>
extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
extern ia64_mv_inb_t __ia64_inb;
extern ia64_mv_inw_t __ia64_inw;
extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiowb_t __ia64_mmiowb;
extern ia64_mv_readb_t __ia64_readb;
extern ia64_mv_readw_t __ia64_readw;
extern ia64_mv_readl_t __ia64_readl;
extern ia64_mv_readq_t __ia64_readq;
extern ia64_mv_readb_t __ia64_readb_relaxed;
extern ia64_mv_readw_t __ia64_readw_relaxed;
extern ia64_mv_readl_t __ia64_readl_relaxed;
extern ia64_mv_readq_t __ia64_readq_relaxed;
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
= MACHVEC_INIT(name);
#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)

View File

@ -1,114 +0,0 @@
/*
* Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#ifndef _ASM_IA64_MACHVEC_SN2_H
#define _ASM_IA64_MACHVEC_SN2_H
extern ia64_mv_setup_t sn_setup;
extern ia64_mv_cpu_init_t sn_cpu_init;
extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_mmiowb_t __sn_mmiowb;
extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_readb_t __sn_readb_relaxed;
extern ia64_mv_readw_t __sn_readw_relaxed;
extern ia64_mv_readl_t __sn_readl_relaxed;
extern ia64_mv_readq_t __sn_readq_relaxed;
extern ia64_mv_dma_init sn_dma_init;
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "sn2"
#define platform_setup sn_setup
#define platform_cpu_init sn_cpu_init
#define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
#define platform_inl __sn_inl
#define platform_outb __sn_outb
#define platform_outw __sn_outw
#define platform_outl __sn_outl
#define platform_mmiowb __sn_mmiowb
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_readb_relaxed __sn_readb_relaxed
#define platform_readw_relaxed __sn_readw_relaxed
#define platform_readl_relaxed __sn_readl_relaxed
#define platform_readq_relaxed __sn_readq_relaxed
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
#define platform_dma_init sn_dma_init
#define platform_migrate sn_migrate
#define platform_kernel_launch_event sn_kernel_launch_event
#ifdef CONFIG_PCI_MSI
#define platform_setup_msi_irq sn_setup_msi_irq
#define platform_teardown_msi_irq sn_teardown_msi_irq
#else
#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
#define platform_pci_fixup_bus sn_pci_fixup_bus
#include <asm/sn/io.h>
#endif /* _ASM_IA64_MACHVEC_SN2_H */

View File

@ -1,26 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI UV Core Functions
*
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_MACHVEC_UV_H
#define _ASM_IA64_MACHVEC_UV_H
extern ia64_mv_setup_t uv_setup;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "uv"
#define platform_setup uv_setup
#endif /* _ASM_IA64_MACHVEC_UV_H */

View File

@ -3,22 +3,14 @@
#ifndef _ASM_IA64_MMIOWB_H
#define _ASM_IA64_MMIOWB_H
#include <asm/machvec.h>
/**
* ___ia64_mmiowb - I/O write barrier
* mmiowb - I/O write barrier
*
* Ensure ordering of I/O space writes. This will make sure that writes
* following the barrier will arrive after all previous writes. For most
* ia64 platforms, this is a simple 'mf.a' instruction.
*/
static inline void ___ia64_mmiowb(void)
{
ia64_mfa();
}
#define __ia64_mmiowb ___ia64_mmiowb
#define mmiowb() platform_mmiowb()
#define mmiowb() ia64_mfa()
#include <asm-generic/mmiowb.h>

View File

@ -27,16 +27,9 @@ static inline int pfn_to_nid(unsigned long pfn)
return nid;
}
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
# define MAX_PHYSNODE_ID 8
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
#else /* sn2 is the biggest case, so we use that if !DIG */
# define MAX_PHYSNODE_ID 2048
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
#endif
#else /* CONFIG_NUMA */
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
#define MAX_PHYSNODE_ID 2048
#endif /* CONFIG_NUMA */
#define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
#endif /* _ASM_IA64_MMZONE_H */

View File

@ -39,9 +39,9 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state);
#define pci_get_legacy_mem platform_pci_get_legacy_mem
#define pci_legacy_read platform_pci_legacy_read
#define pci_legacy_write platform_pci_legacy_write
char *pci_get_legacy_mem(struct pci_bus *bus);
int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
struct pci_controller {
struct acpi_device *companion;
@ -69,7 +69,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
}
#ifdef CONFIG_INTEL_IOMMU
extern void pci_iommu_alloc(void);
#endif
#endif /* _ASM_IA64_PCI_H */

View File

@ -679,8 +679,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
void default_idle(void);
#define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PROCESSOR_H */

View File

@ -1,15 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ACPI_H
#define _ASM_IA64_SN_ACPI_H
extern int sn_acpi_rev;
#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
#endif /* _ASM_IA64_SN_ACPI_H */

View File

@ -1,299 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ADDRS_H
#define _ASM_IA64_SN_ADDRS_H
#include <asm/percpu.h>
#include <asm/sn/types.h>
#include <asm/sn/arch.h>
#include <asm/sn/pda.h>
/*
* Memory/SHUB Address Format:
* +-+---------+--+--------------+
* |0| NASID |AS| NodeOffset |
* +-+---------+--+--------------+
*
* NASID: (low NASID bit is 0) Memory and SHUB MMRs
* AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
* 00: Local Resources and MMR space
* Top bit of NodeOffset
* 0: Local resources space
* node id:
* 0: IA64/NT compatibility space
* 2: Local MMR Space
* 4: Local memory, regardless of local node id
* 1: Global MMR space
* 01: GET space.
* 10: AMO space.
* 11: Cacheable memory space.
*
* NodeOffset: byte offset
*
*
* TIO address format:
* +-+----------+--+--------------+
* |0| NASID |AS| Nodeoffset |
* +-+----------+--+--------------+
*
* NASID: (low NASID bit is 1) TIO
* AS: 2-bit Chiplet Identifier
* 00: TIO LB (Indicates TIO MMR access.)
* 01: TIO ICE (indicates coretalk space access.)
*
* NodeOffset: top bit must be set.
*
*
* Note that in both of the above address formats, the low
* NASID bit indicates if the reference is to the SHUB or TIO MMRs.
*/
/*
* Define basic shift & mask constants for manipulating NASIDs and AS values.
*/
#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
#define NASID_SHIFT (sn_hub_info->nasid_shift)
#define AS_SHIFT (sn_hub_info->as_shift)
#define AS_BITMASK 0x3UL
#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
/*
* AS values. These are the same on both SHUB1 & SHUB2.
*/
#define AS_GET_VAL 1UL
#define AS_AMO_VAL 2UL
#define AS_CAC_VAL 3UL
#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
/*
* Virtual Mode Local & Global MMR space.
*/
#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
/*
* Physical mode addresses
*/
#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
/*
* Clear region & AS bits.
*/
#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
/*
* Misc NASID manipulation.
*/
#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
#define IS_TIO_NASID(n) ((n) & 1)
/* non-II mmr's start at top of big window space (4G) */
#define BWIN_TOP 0x0000000100000000UL
/*
* general address defines
*/
#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
/*
* Convert Memory addresses between various addressing modes.
*/
#define TO_PHYS(x) (TO_PHYS_MASK & (x))
#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
#ifdef CONFIG_SGI_SN
#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
#define TO_GET(x) (GET_BASE | TO_PHYS(x))
#else
#define TO_AMO(x) ({ BUG(); x; })
#define TO_GET(x) ({ BUG(); x; })
#endif
/*
* Covert from processor physical address to II/TIO physical address:
* II - squeeze out the AS bits
* TIO- requires a chiplet id in bits 38-39. For DMA to memory,
* the chiplet id is zero. If we implement TIO-TIO dma, we might need
* to insert a chiplet id into this macro. However, it is our belief
* right now that this chiplet id will be ICE, which is also zero.
*/
#define SH1_TIO_PHYS_TO_DMA(x) \
((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
#define SH2_NETWORK_BANK_OFFSET(x) \
((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
#define SH2_NETWORK_BANK_SELECT(x) \
((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
>> (sn_hub_info->nasid_shift - 4)) << 36)
#define SH2_NETWORK_ADDRESS(x) \
(SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
#define SH2_TIO_PHYS_TO_DMA(x) \
(((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
#define PHYS_TO_TIODMA(x) \
(is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
#define PHYS_TO_DMA(x) \
((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
/*
* Macros to test for address type.
*/
#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
* of any given node.
*/
#define BWIN_SIZE_BITS 29 /* big window size: 512M */
#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
: RAW_NODE_SWIN_BASE(n, w))
#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
((u64) (w) << TIO_SWIN_SIZE_BITS))
#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
#define BWIN_WIDGET_MASK 0x7
#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
#define TIO_HWIN_SHIFT_BITS 33
#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
* of any given node.
*/
#define SWIN_SIZE_BITS 24
#define SWIN_WIDGET_MASK 0xF
#define TIO_SWIN_SIZE_BITS 28
#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
#define TIO_SWIN_WIDGET_MASK 0x3
/*
* Convert smallwindow address to xtalk address.
*
* 'addr' can be physical or virtual address, but will be converted
* to Xtalk address in the range 0 -> SWINZ_SIZEMASK
*/
#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
/*
* The following macros produce the correct base virtual address for
* the hub registers. The REMOTE_HUB_* macro produce
* the address for the specified hub's registers. The intent is
* that the appropriate PI, MD, NI, or II register would be substituted
* for x.
*
* WARNING:
* When certain Hub chip workaround are defined, it's not sufficient
* to dereference the *_HUB_ADDR() macros. You should instead use
* HUB_L() and HUB_S() if you must deal with pointers to hub registers.
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
/* Shub1 TIO & MMR addressing macros */
#define SH1_TIO_IOSPACE_ADDR(n,x) \
GLOBAL_MMR_ADDR(n,x)
#define SH1_REMOTE_BWIN_MMR(n,x) \
GLOBAL_MMR_ADDR(n,x)
#define SH1_REMOTE_SWIN_MMR(n,x) \
(NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
#define SH1_REMOTE_MMR(n,x) \
(SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
SH1_REMOTE_SWIN_MMR(n,x))
/* Shub1 TIO & MMR addressing macros */
#define SH2_TIO_IOSPACE_ADDR(n,x) \
((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
#define SH2_REMOTE_MMR(n,x) \
GLOBAL_MMR_ADDR(n,x)
/* TIO & MMR addressing macros that work on both shub1 & shub2 */
#define TIO_IOSPACE_ADDR(n,x) \
((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
SH2_TIO_IOSPACE_ADDR(n,x)))
#define SH_REMOTE_MMR(n,x) \
(is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
#define REMOTE_HUB_ADDR(n,x) \
(IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
((volatile u64*)SH_REMOTE_MMR(n,x)))
#define HUB_L(x) (*((volatile typeof(*x) *)x))
#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
/*
* Coretalk address breakdown
*/
#define CTALK_NASID_SHFT 40
#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
#define CTALK_CID_SHFT 38
#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
#endif /* _ASM_IA64_SN_ADDRS_H */

View File

@ -1,86 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI specific setup.
*
* Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
#define _ASM_IA64_SN_ARCH_H
#include <linux/numa.h>
#include <asm/types.h>
#include <asm/percpu.h>
#include <asm/sn/types.h>
#include <asm/sn/sn_cpuid.h>
/*
* This is the maximum number of NUMALINK nodes that can be part of a single
* SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
* remote partitions are NOT included in this number.
* The number of compact nodes cannot exceed size of a coherency domain.
* The purpose of this define is to specify a node count that includes
* all C/M/TIO nodes in an SSI system.
*
* SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
*
* Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
* to ACPI3.0, this limit will be removed. The notion of "compact nodes"
* should be deleted and TIOs should be included in MAX_NUMNODES.
*/
#define MAX_TIO_NODES MAX_NUMNODES
#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES)
/*
* Maximum number of nodes in all partitions and in all coherency domains.
* This is the total number of nodes accessible in the numalink fabric. It
* includes all C & M bricks, plus all TIOs.
*
* This value is also the value of the maximum number of NASIDs in the numalink
* fabric.
*/
#define MAX_NUMALINK_NODES 16384
/*
* The following defines attributes of the HUB chip. These attributes are
* frequently referenced. They are kept in the per-cpu data areas of each cpu.
* They are kept together in a struct to minimize cache misses.
*/
struct sn_hub_info_s {
u8 shub2;
u8 nasid_shift;
u8 as_shift;
u8 shub_1_1_found;
u16 nasid_bitmask;
};
DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
#define sn_hub_info this_cpu_ptr(&__sn_hub_info)
#define is_shub2() (sn_hub_info->shub2)
#define is_shub1() (sn_hub_info->shub2 == 0)
/*
* Use this macro to test if shub 1.1 wars should be enabled
*/
#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
/*
* Compact node ID to nasid mappings kept in the per-cpu data areas of each
* cpu.
*/
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
#define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0])
extern u8 sn_partition_id;
extern u8 sn_system_size;
extern u8 sn_sharing_domain_size;
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes);
extern bool sn_cpu_disable_allowed(int cpu);
#endif /* _ASM_IA64_SN_ARCH_H */

View File

@ -1,236 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_BTE_H
#define _ASM_IA64_SN_BTE_H
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <asm/sn/pda.h>
#include <asm/sn/types.h>
#include <asm/sn/shub_mmr.h>
struct nodepda_s;
#define IBCT_NOTIFY (0x1UL << 4)
#define IBCT_ZFIL_MODE (0x1UL << 0)
/* #define BTE_DEBUG */
/* #define BTE_DEBUG_VERBOSE */
#ifdef BTE_DEBUG
# define BTE_PRINTK(x) printk x /* Terse */
# ifdef BTE_DEBUG_VERBOSE
# define BTE_PRINTKV(x) printk x /* Verbose */
# else
# define BTE_PRINTKV(x)
# endif /* BTE_DEBUG_VERBOSE */
#else
# define BTE_PRINTK(x)
# define BTE_PRINTKV(x)
#endif /* BTE_DEBUG */
/* BTE status register only supports 16 bits for length field */
#define BTE_LEN_BITS (16)
#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
#define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT)
/* Define hardware */
#define BTES_PER_NODE (is_shub2() ? 4 : 2)
#define MAX_BTES_PER_NODE 4
#define BTE2OFF_CTRL 0
#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE_BASE_ADDR(interface) \
(is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 : \
(interface == 1) ? SH2_BT_ENG_CSR_1 : \
(interface == 2) ? SH2_BT_ENG_CSR_2 : \
SH2_BT_ENG_CSR_3 \
: (interface == 0) ? IIO_IBLS0 : IIO_IBLS1)
#define BTE_SOURCE_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_SRC/8) \
: base + (BTEOFF_SRC/8))
#define BTE_DEST_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_DEST/8) \
: base + (BTEOFF_DEST/8))
#define BTE_CTRL_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_CTRL/8) \
: base + (BTEOFF_CTRL/8))
#define BTE_NOTIF_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_NOTIFY/8) \
: base + (BTEOFF_NOTIFY/8))
/* Define hardware modes */
#define BTE_NOTIFY IBCT_NOTIFY
#define BTE_NORMAL BTE_NOTIFY
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
/* Use a reserved bit to let the caller specify a wait for any BTE */
#define BTE_WACQUIRE 0x4000
/* Use the BTE on the node with the destination memory */
#define BTE_USE_DEST (BTE_WACQUIRE << 1)
/* Use any available BTE interface on any node for the transfer */
#define BTE_USE_ANY (BTE_USE_DEST << 1)
/* macro to force the IBCT0 value valid */
#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
#define BTE_WORD_AVAILABLE (IBLS_BUSY << 1)
#define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE)
/*
* Some macros to simplify reading.
* Start with macros to locate the BTE control registers.
*/
#define BTE_LNSTAT_LOAD(_bte) \
HUB_L(_bte->bte_base_addr)
#define BTE_LNSTAT_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr, (_x))
#define BTE_SRC_STORE(_bte, _x) \
({ \
u64 __addr = ((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_source_addr, __addr); \
})
#define BTE_DEST_STORE(_bte, _x) \
({ \
u64 __addr = ((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_destination_addr, __addr); \
})
#define BTE_CTRL_STORE(_bte, _x) \
HUB_S(_bte->bte_control_addr, (_x))
#define BTE_NOTIF_STORE(_bte, _x) \
({ \
u64 __addr = ia64_tpa((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_notify_addr, __addr); \
})
#define BTE_START_TRANSFER(_bte, _len, _mode) \
is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
: BTE_LNSTAT_STORE(_bte, _len); \
BTE_CTRL_STORE(_bte, _mode)
/* Possible results from bte_copy and bte_unaligned_copy */
/* The following error codes map into the BTE hardware codes
* IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
* an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
* to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
* codes to give the following error codes.
*/
#define BTEFAIL_OFFSET 1
typedef enum {
BTE_SUCCESS, /* 0 is success */
BTEFAIL_DIR, /* Directory error due to IIO access*/
BTEFAIL_POISON, /* poison error on IO access (write to poison page) */
BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */
BTEFAIL_ACCESS, /* access error (protection violation) */
BTEFAIL_PWERR, /* Partial Write Error */
BTEFAIL_PRERR, /* Partial Read Error */
BTEFAIL_TOUT, /* CRB Time out */
BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */
BTEFAIL_NOTAVAIL, /* BTE not available */
} bte_result_t;
#define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */
#define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */
#define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */
#define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */
#define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */
#define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */
#define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */
#define BTE_ERR_BITS 0x3FUL
#define BTE_ERR_SHIFT 36
#define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT)
#define BTE_ERROR_RETRY(value) \
(is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \
: (value != BTEFAIL_TOUT))
/*
* On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2()
*/
#define BTE_SHUB2_ERROR(_status) \
((_status & BTE_ERR_MASK) \
? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \
: _status)
#define BTE_GET_ERROR_STATUS(_status) \
(BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR)
#define BTE_VALID_SH2_ERROR(value) \
((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL))
/*
* Structure defining a bte. An instance of this
* structure is created in the nodepda for each
* bte on that node (as defined by BTES_PER_NODE)
* This structure contains everything necessary
* to work with a BTE.
*/
struct bteinfo_s {
volatile u64 notify ____cacheline_aligned;
u64 *bte_base_addr ____cacheline_aligned;
u64 *bte_source_addr;
u64 *bte_destination_addr;
u64 *bte_control_addr;
u64 *bte_notify_addr;
spinlock_t spinlock;
cnodeid_t bte_cnode; /* cnode */
int bte_error_count; /* Number of errors encountered */
int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
int cleanup_active; /* Interface is locked for cleanup */
volatile bte_result_t bh_error; /* error while processing */
volatile u64 *most_rcnt_na;
struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE];
};
/*
* Function prototypes (functions defined in bte.c, used elsewhere)
*/
extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
extern void bte_error_handler(struct nodepda_s *);
#define bte_zero(dest, len, mode, notification) \
bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
/*
* The following is the preferred way of calling bte_unaligned_copy
* If the copy is fully cache line aligned, then bte_copy is
* used instead. Since bte_copy is inlined, this saves a call
* stack. NOTE: bte_copy is called synchronously and does block
* until the transfer is complete. In order to get the asynch
* version of bte_copy, you must perform this check yourself.
*/
#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
(((len & (L1_CACHE_BYTES - 1)) || \
(src & (L1_CACHE_BYTES - 1)) || \
(dest & (L1_CACHE_BYTES - 1))) ? \
bte_unaligned_copy(src, dest, len, mode) : \
bte_copy(src, dest, len, mode, NULL))
#endif /* _ASM_IA64_SN_BTE_H */

View File

@ -1,28 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
/*
* This file contains definitions for accessing a platform supported high resolution
* clock. The clock is monitonically increasing and can be accessed from any node
* in the system. The clock is synchronized across nodes - all nodes see the
* same value.
*
* RTC_COUNTER_ADDR - contains the address of the counter
*
*/
#ifndef _ASM_IA64_SN_CLKSUPPORT_H
#define _ASM_IA64_SN_CLKSUPPORT_H
extern unsigned long sn_rtc_cycles_per_second;
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
#define rtc_time() (*RTC_COUNTER_ADDR)
#endif /* _ASM_IA64_SN_CLKSUPPORT_H */

View File

@ -1,132 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_GEO_H
#define _ASM_IA64_SN_GEO_H
/* The geoid_t implementation below is based loosely on the pcfg_t
implementation in sys/SN/promcfg.h. */
/* Type declaractions */
/* Size of a geoid_t structure (must be before decl. of geoid_u) */
#define GEOID_SIZE 8 /* Would 16 be better? The size can
be different on different platforms. */
#define MAX_SLOTS 0xf /* slots per module */
#define MAX_SLABS 0xf /* slabs per slot */
typedef unsigned char geo_type_t;
/* Fields common to all substructures */
typedef struct geo_common_s {
moduleid_t module; /* The module (box) this h/w lives in */
geo_type_t type; /* What type of h/w is named by this geoid_t */
slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
} geo_common_t;
/* Additional fields for particular types of hardware */
typedef struct geo_node_s {
geo_common_t common; /* No additional fields needed */
} geo_node_t;
typedef struct geo_rtr_s {
geo_common_t common; /* No additional fields needed */
} geo_rtr_t;
typedef struct geo_iocntl_s {
geo_common_t common; /* No additional fields needed */
} geo_iocntl_t;
typedef struct geo_pcicard_s {
geo_iocntl_t common;
char bus; /* Bus/widget number */
char slot; /* PCI slot number */
} geo_pcicard_t;
/* Subcomponents of a node */
typedef struct geo_cpu_s {
geo_node_t node;
char slice; /* Which CPU on the node */
} geo_cpu_t;
typedef struct geo_mem_s {
geo_node_t node;
char membus; /* The memory bus on the node */
char memslot; /* The memory slot on the bus */
} geo_mem_t;
typedef union geoid_u {
geo_common_t common;
geo_node_t node;
geo_iocntl_t iocntl;
geo_pcicard_t pcicard;
geo_rtr_t rtr;
geo_cpu_t cpu;
geo_mem_t mem;
char padsize[GEOID_SIZE];
} geoid_t;
/* Preprocessor macros */
#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
module/001c07/slab/5/node/memory/2/slot/4 */
/* Values for geo_type_t */
#define GEO_TYPE_INVALID 0
#define GEO_TYPE_MODULE 1
#define GEO_TYPE_NODE 2
#define GEO_TYPE_RTR 3
#define GEO_TYPE_IOCNTL 4
#define GEO_TYPE_IOCARD 5
#define GEO_TYPE_CPU 6
#define GEO_TYPE_MEM 7
#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
/* Parameter for hwcfg_format_geoid_compt() */
#define GEO_COMPT_MODULE 1
#define GEO_COMPT_SLAB 2
#define GEO_COMPT_IOBUS 3
#define GEO_COMPT_IOSLOT 4
#define GEO_COMPT_CPU 5
#define GEO_COMPT_MEMBUS 6
#define GEO_COMPT_MEMSLOT 7
#define GEO_INVALID_STR "<invalid>"
#define INVALID_NASID ((nasid_t)-1)
#define INVALID_CNODEID ((cnodeid_t)-1)
#define INVALID_PNODEID ((pnodeid_t)-1)
#define INVALID_SLAB (slabid_t)-1
#define INVALID_SLOT (slotid_t)-1
#define INVALID_MODULE ((moduleid_t)-1)
static inline slabid_t geo_slab(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_SLAB : g.common.slab;
}
static inline slotid_t geo_slot(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_SLOT : g.common.slot;
}
static inline moduleid_t geo_module(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_MODULE : g.common.module;
}
extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
#endif /* _ASM_IA64_SN_GEO_H */

View File

@ -9,60 +9,7 @@
#ifndef _ASM_IA64_SN_INTR_H
#define _ASM_IA64_SN_INTR_H
#include <linux/rcupdate.h>
#include <asm/sn/types.h>
#define SGI_UART_VECTOR 0xe9
/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
#define SGI_XPC_ACTIVATE 0x30
#define SGI_II_ERROR 0x31
#define SGI_XBOW_ERROR 0x32
#define SGI_PCIASIC_ERROR 0x33
#define SGI_ACPI_SCI_INT 0x34
#define SGI_TIOCA_ERROR 0x35
#define SGI_TIO_ERROR 0x36
#define SGI_TIOCX_ERROR 0x37
#define SGI_MMTIMER_VECTOR 0x38
#define SGI_XPC_NOTIFY 0xe7
#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
#define SN2_IRQ_RESERVED 0x1
#define SN2_IRQ_CONNECTED 0x2
#define SN2_IRQ_SHARED 0x4
// The SN PROM irq struct
struct sn_irq_info {
struct sn_irq_info *irq_next; /* deprecated DO NOT USE */
short irq_nasid; /* Nasid IRQ is assigned to */
int irq_slice; /* slice IRQ is assigned to */
int irq_cpuid; /* kernel logical cpuid */
int irq_irq; /* the IRQ number */
int irq_int_bit; /* Bridge interrupt pin */
/* <0 means MSI */
u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
int irq_bridge_type;/* pciio asic type (pciio.h) */
void *irq_bridge; /* bridge generating irq */
void *irq_pciioinfo; /* associated pciio_info_t */
int irq_last_intr; /* For Shub lb lost intr WAR */
int irq_cookie; /* unique cookie */
int irq_flags; /* flags */
int irq_share_cnt; /* num devices sharing IRQ */
struct list_head list; /* list of sn_irq_info structs */
struct rcu_head rcu; /* rcu callback list */
};
extern void sn_send_IPI_phys(int, long, int, int);
extern u64 sn_intr_alloc(nasid_t, int,
struct sn_irq_info *,
int, nasid_t, int);
extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
extern void sn_set_err_irq_affinity(unsigned int);
extern struct list_head **sn_irq_lh;
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
#endif /* _ASM_IA64_SN_INTR_H */

View File

@ -1,274 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_IO_H
#define _ASM_SN_IO_H
#include <linux/compiler.h>
#include <asm/intrinsics.h>
extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
extern void __sn_mmiowb(void); /* Forward definition */
extern int num_cnodes;
#define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
#define __sn_readb_relaxed ___sn_readb_relaxed
#define __sn_readw_relaxed ___sn_readw_relaxed
#define __sn_readl_relaxed ___sn_readl_relaxed
#define __sn_readq_relaxed ___sn_readq_relaxed
/*
* Convenience macros for setting/clearing bits using the above accessors
*/
#define __sn_setq_relaxed(addr, val) \
writeq((__sn_readq_relaxed(addr) | (val)), (addr))
#define __sn_clrq_relaxed(addr, val) \
writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
___sn_inb (unsigned long port)
{
volatile unsigned char *addr;
unsigned char ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inw (unsigned long port)
{
volatile unsigned short *addr;
unsigned short ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inl (unsigned long port)
{
volatile unsigned int *addr;
unsigned int ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline void
___sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
static inline void
___sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
static inline void
___sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
___sn_readb (const volatile void __iomem *addr)
{
unsigned char val;
val = *(volatile unsigned char __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
___sn_readw (const volatile void __iomem *addr)
{
unsigned short val;
val = *(volatile unsigned short __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
___sn_readl (const volatile void __iomem *addr)
{
unsigned int val;
val = *(volatile unsigned int __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
___sn_readq (const volatile void __iomem *addr)
{
unsigned long val;
val = *(volatile unsigned long __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
___sn_readb_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short
___sn_readw_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int
___sn_readl_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long
___sn_readq_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *) addr;
}
struct pci_dev;
static inline int
sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
{
if (vchan > 1) {
return -1;
}
if (!(*addr >> 32)) /* Using a mask here would be cleaner */
return 0; /* but this generates better code */
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
} else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
return 0;
}
#endif /* _ASM_SN_IO_H */

View File

@ -1,242 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
*/
#ifndef IA64_SN_IOC3_H
#define IA64_SN_IOC3_H
/* serial port register map */
struct ioc3_serialregs {
uint32_t sscr;
uint32_t stpir;
uint32_t stcir;
uint32_t srpir;
uint32_t srcir;
uint32_t srtr;
uint32_t shadow;
};
/* SUPERIO uart register map */
struct ioc3_uartregs {
char iu_lcr;
union {
char iir; /* read only */
char fcr; /* write only */
} u3;
union {
char ier; /* DLAB == 0 */
char dlm; /* DLAB == 1 */
} u2;
union {
char rbr; /* read only, DLAB == 0 */
char thr; /* write only, DLAB == 0 */
char dll; /* DLAB == 1 */
} u1;
char iu_scr;
char iu_msr;
char iu_lsr;
char iu_mcr;
};
#define iu_rbr u1.rbr
#define iu_thr u1.thr
#define iu_dll u1.dll
#define iu_ier u2.ier
#define iu_dlm u2.dlm
#define iu_iir u3.iir
#define iu_fcr u3.fcr
struct ioc3_sioregs {
char fill[0x170];
struct ioc3_uartregs uartb;
struct ioc3_uartregs uarta;
};
/* PCI IO/mem space register map */
struct ioc3 {
uint32_t pci_id;
uint32_t pci_scr;
uint32_t pci_rev;
uint32_t pci_lat;
uint32_t pci_addr;
uint32_t pci_err_addr_l;
uint32_t pci_err_addr_h;
uint32_t sio_ir;
/* these registers are read-only for general kernel code. To
* modify them use the functions in ioc3.c
*/
uint32_t sio_ies;
uint32_t sio_iec;
uint32_t sio_cr;
uint32_t int_out;
uint32_t mcr;
uint32_t gpcr_s;
uint32_t gpcr_c;
uint32_t gpdr;
uint32_t gppr[9];
char fill[0x4c];
/* serial port registers */
uint32_t sbbr_h;
uint32_t sbbr_l;
struct ioc3_serialregs port_a;
struct ioc3_serialregs port_b;
char fill1[0x1ff10];
/* superio registers */
struct ioc3_sioregs sregs;
};
/* These don't exist on the ioc3 serial card... */
#define eier fill1[8]
#define eisr fill1[4]
#define PCI_LAT 0xc /* Latency Timer */
#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
#define UARTA_BASE 0x178
#define UARTB_BASE 0x170
/* bitmasks for serial RX status byte */
#define RXSB_OVERRUN 0x01 /* char(s) lost */
#define RXSB_PAR_ERR 0x02 /* parity error */
#define RXSB_FRAME_ERR 0x04 /* framing error */
#define RXSB_BREAK 0x08 /* break character */
#define RXSB_CTS 0x10 /* state of CTS */
#define RXSB_DCD 0x20 /* state of DCD */
#define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */
#define RXSB_DATA_VALID 0x80 /* FRAME_ERR PAR_ERR & BREAK valid */
/* bitmasks for serial TX control byte */
#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
#define TXCB_INVALID 0x00 /* byte is invalid */
#define TXCB_VALID 0x40 /* byte is valid */
#define TXCB_MCR 0x80 /* data<7:0> to modem cntrl register */
#define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */
/* bitmasks for SBBR_L */
#define SBBR_L_SIZE 0x00000001 /* 0 1KB rings, 1 4KB rings */
/* bitmasks for SSCR_<A:B> */
#define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */
#define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
#define SSCR_HFC_EN 0x00020000 /* h/w flow cntrl enabled */
#define SSCR_RX_RING_DCD 0x00040000 /* postRX record on delta-DCD */
#define SSCR_RX_RING_CTS 0x00080000 /* postRX record on delta-CTS */
#define SSCR_HIGH_SPD 0x00100000 /* 4X speed */
#define SSCR_DIAG 0x00200000 /* bypass clock divider */
#define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */
#define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */
#define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */
#define SSCR_PAUSE_STATE 0x40000000 /* set when PAUSE takes effect*/
#define SSCR_RESET 0x80000000 /* reset DMA channels */
/* all producer/consumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3
/* bitmasks for SRCIR_<A:B> */
#define SRCIR_ARM 0x80000000 /* arm RX timer */
/* bitmasks for SHADOW_<A:B> */
#define SHADOW_DR 0x00000001 /* data ready */
#define SHADOW_OE 0x00000002 /* overrun error */
#define SHADOW_PE 0x00000004 /* parity error */
#define SHADOW_FE 0x00000008 /* framing error */
#define SHADOW_BI 0x00000010 /* break interrupt */
#define SHADOW_THRE 0x00000020 /* transmit holding reg empty */
#define SHADOW_TEMT 0x00000040 /* transmit shift reg empty */
#define SHADOW_RFCE 0x00000080 /* char in RX fifo has error */
#define SHADOW_DCTS 0x00010000 /* delta clear to send */
#define SHADOW_DDCD 0x00080000 /* delta data carrier detect */
#define SHADOW_CTS 0x00100000 /* clear to send */
#define SHADOW_DCD 0x00800000 /* data carrier detect */
#define SHADOW_DTR 0x01000000 /* data terminal ready */
#define SHADOW_RTS 0x02000000 /* request to send */
#define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
#define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
#define SHADOW_LOOP 0x10000000 /* loopback enabled */
/* bitmasks for SRTR_<A:B> */
#define SRTR_CNT 0x00000fff /* reload value for RX timer */
#define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */
#define SRTR_CNT_VAL_SHIFT 16
#define SRTR_HZ 16000 /* SRTR clock frequency */
/* bitmasks for SIO_IR, SIO_IEC and SIO_IES */
#define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */
#define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */
#define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */
#define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */
#define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */
#define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */
#define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */
#define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */
#define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */
#define SIO_IR_SB_TX_MT 0x00000200
#define SIO_IR_SB_RX_FULL 0x00000400
#define SIO_IR_SB_RX_HIGH 0x00000800
#define SIO_IR_SB_RX_TIMER 0x00001000
#define SIO_IR_SB_DELTA_DCD 0x00002000
#define SIO_IR_SB_DELTA_CTS 0x00004000
#define SIO_IR_SB_INT 0x00008000
#define SIO_IR_SB_TX_EXPLICIT 0x00010000
#define SIO_IR_SB_MEMERR 0x00020000
#define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */
#define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */
#define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */
#define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */
#define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */
#define SIO_IR_RT_INT 0x08000000 /* RT output pulse */
#define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */
#define SIO_IR_GEN_INT_SHIFT 28
/* per device interrupt masks */
#define SIO_IR_SA (SIO_IR_SA_TX_MT | \
SIO_IR_SA_RX_FULL | \
SIO_IR_SA_RX_HIGH | \
SIO_IR_SA_RX_TIMER | \
SIO_IR_SA_DELTA_DCD | \
SIO_IR_SA_DELTA_CTS | \
SIO_IR_SA_INT | \
SIO_IR_SA_TX_EXPLICIT | \
SIO_IR_SA_MEMERR)
#define SIO_IR_SB (SIO_IR_SB_TX_MT | \
SIO_IR_SB_RX_FULL | \
SIO_IR_SB_RX_HIGH | \
SIO_IR_SB_RX_TIMER | \
SIO_IR_SB_DELTA_DCD | \
SIO_IR_SB_DELTA_CTS | \
SIO_IR_SB_INT | \
SIO_IR_SB_TX_EXPLICIT | \
SIO_IR_SB_MEMERR)
#define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \
SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
/* bitmasks for SIO_CR */
#define SIO_CR_CMD_PULSE_SHIFT 15
#define SIO_CR_SER_A_BASE_SHIFT 1
#define SIO_CR_SER_B_BASE_SHIFT 8
#define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */
#define SIO_CR_ARB_DIAG_TXA 0x00000000
#define SIO_CR_ARB_DIAG_RXA 0x00080000
#define SIO_CR_ARB_DIAG_TXB 0x00100000
#define SIO_CR_ARB_DIAG_RXB 0x00180000
#define SIO_CR_ARB_DIAG_PP 0x00200000
#define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */
/* defs for some of the generic I/O pins */
#define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */
#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
#define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */
#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrling uartb modeselect */
#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrling uarta modeselect */
#endif /* IA64_SN_IOC3_H */

View File

@ -1,246 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Derived from IRIX <sys/SN/klconfig.h>.
*
* Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCONFIG_H
#define _ASM_IA64_SN_KLCONFIG_H
/*
* The KLCONFIG structures store info about the various BOARDs found
* during Hardware Discovery. In addition, it stores info about the
* components found on the BOARDs.
*/
typedef s32 klconf_off_t;
/* Functions/macros needed to use this structure */
typedef struct kl_config_hdr {
char pad[20];
klconf_off_t ch_board_info; /* the link list of boards */
char pad0[88];
} kl_config_hdr_t;
#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
/*
* The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
* can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
* the LOCAL/current NODE. REMOTE means it is attached to a different
* node.(TBD - Need a way to treat ROUTER boards.)
*
* There are 2 different structures to represent these boards -
* lboard - Local board, rboard - remote board. These 2 structures
* can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
* Figure below). The first byte of the rboard or lboard structure
* is used to find out its type - no unions are used.
* If it is a lboard, then the config info of this board will be found
* on the local node. (LOCAL NODE BASE + offset value gives pointer to
* the structure.
* If it is a rboard, the local structure contains the node number
* and the offset of the beginning of the LINKED LIST on the remote node.
* The details of the hardware on a remote node can be built locally,
* if required, by reading the LINKED LIST on the remote node and
* ignoring all the rboards on that node.
*
* The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
* First board info on the remote node. The remote node list is
* traversed as the local list, using the REMOTE BASE ADDRESS and not
* the local base address and ignoring all rboard values.
*
*
KLCONFIG
+------------+ +------------+ +------------+ +------------+
| lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+------------+ | +------------+ | +------------+ | +------------+
| board info | | | board info | | |errinfo,bptr| | | board info |
+------------+ | +------------+ | +------------+ | +------------+
| offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+------------+ +------------+ +------------+ +------------+
+------------+
| board info |
+------------+ +--------------------------------+
| compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+------------+ +--------------------------------+
| compt 2 |--+
+------------+ | +--------------------------------+
| ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+------------+ +--------------------------------+
| errinfo |--+
+------------+ | +--------------------------------+
+--->|r/l brd errinfo,compt err flags |
+--------------------------------+
*
* Each BOARD consists of COMPONENTs and the BOARD structure has
* pointers (offsets) to its COMPONENT structure.
* The COMPONENT structure has version info, size and speed info, revision,
* error info and the NIC info. This structure can accommodate any
* BOARD with arbitrary COMPONENT composition.
*
* The ERRORINFO part of each BOARD has error information
* that describes errors about the BOARD itself. It also has flags to
* indicate the COMPONENT(s) on the board that have errors. The error
* information specific to the COMPONENT is present in the respective
* COMPONENT structure.
*
* The ERRORINFO structure is also treated like a COMPONENT, ie. the
* BOARD has pointers(offset) to the ERRORINFO structure. The rboard
* structure also has a pointer to the ERRORINFO structure. This is
* the place to store ERRORINFO about a REMOTE NODE, if the HUB on
* that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
* only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
* be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
* which is present on the REMOTE NODE.(TBD)
* REMOTE ERRINFO can be stored on any of the nearest nodes
* or on all the nearest nodes.(TBD)
* Like BOARD structures, REMOTE ERRINFO structures can be built locally
* using the rboard errinfo pointer.
*
* In order to get useful information from this Data organization, a set of
* interface routines are provided (TBD). The important thing to remember while
* manipulating the structures, is that, the NODE number information should
* be used. If the NODE is non-zero (remote) then each offset should
* be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
* This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
*
* Note that these structures do not provide much info about connectivity.
* That info will be part of HWGRAPH, which is an extension of the cfg_t
* data structure. (ref IP27prom/cfg.h) It has to be extended to include
* the IO part of the Network(TBD).
*
* The data structures below define the above concepts.
*/
/*
* BOARD classes
*/
#define KLCLASS_MASK 0xf0
#define KLCLASS_NONE 0x00
#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
#define KLCLASS_CPU KLCLASS_NODE
#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
and the non-graphics widget boards */
#define KLCLASS_ROUTER 0x30 /* Router board */
#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
so that we can record error info */
#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
/*
* board types
*/
#define KLTYPE_MASK 0x0f
#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
/*
* board structures
*/
#define MAX_COMPTS_PER_BRD 24
typedef struct lboard_s {
klconf_off_t brd_next_any; /* Next BOARD */
unsigned char struct_type; /* type of structure, local or remote */
unsigned char brd_type; /* type+class */
unsigned char brd_sversion; /* version of this structure */
unsigned char brd_brevision; /* board revision */
unsigned char brd_promver; /* board prom version, if any */
unsigned char brd_flags; /* Enabled, Disabled etc */
unsigned char brd_slot; /* slot number */
unsigned short brd_debugsw; /* Debug switches */
geoid_t brd_geoid; /* geo id */
partid_t brd_partition; /* Partition number */
unsigned short brd_diagval; /* diagnostic value */
unsigned short brd_diagparm; /* diagnostic parameter */
unsigned char brd_inventory; /* inventory history */
unsigned char brd_numcompts; /* Number of components */
nic_t brd_nic; /* Number in CAN */
nasid_t brd_nasid; /* passed parameter */
klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
char pad0[4];
unsigned char brd_confidence; /* confidence that the board is bad */
nasid_t brd_owner; /* who owns this board */
unsigned char brd_nic_flags; /* To handle 8 more NICs */
char pad1[24]; /* future expansion */
char brd_name[32];
nasid_t brd_next_same_host; /* host of next brd w/same nasid */
klconf_off_t brd_next_same; /* Next BOARD with same nasid */
} lboard_t;
/*
* Generic info structure. This stores common info about a
* component.
*/
typedef struct klinfo_s { /* Generic info */
unsigned char struct_type; /* type of this structure */
unsigned char struct_version; /* version of this structure */
unsigned char flags; /* Enabled, disabled etc */
unsigned char revision; /* component revision */
unsigned short diagval; /* result of diagnostics */
unsigned short diagparm; /* diagnostic parameter */
unsigned char inventory; /* previous inventory status */
unsigned short partid; /* widget part number */
nic_t nic; /* MUst be aligned properly */
unsigned char physid; /* physical id of component */
unsigned int virtid; /* virtual id as seen by system */
unsigned char widid; /* Widget id - if applicable */
nasid_t nasid; /* node number - from parent */
char pad1; /* pad out structure. */
char pad2; /* pad out structure. */
void *data;
klconf_off_t errinfo; /* component specific errors */
unsigned short pad3; /* pci fields have moved over to */
unsigned short pad4; /* klbri_t */
} klinfo_t ;
static inline lboard_t *find_lboard_next(lboard_t * brd)
{
if (brd && brd->brd_next_any)
return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
return NULL;
}
#endif /* _ASM_IA64_SN_KLCONFIG_H */

View File

@ -1,51 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_L1_H
#define _ASM_IA64_SN_L1_H
/* brick type response codes */
#define L1_BRICKTYPE_PX 0x23 /* # */
#define L1_BRICKTYPE_PE 0x25 /* % */
#define L1_BRICKTYPE_N_p0 0x26 /* & */
#define L1_BRICKTYPE_IP45 0x34 /* 4 */
#define L1_BRICKTYPE_IP41 0x35 /* 5 */
#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
#define L1_BRICKTYPE_IX 0x3d /* = */
#define L1_BRICKTYPE_IP34 0x61 /* a */
#define L1_BRICKTYPE_GA 0x62 /* b */
#define L1_BRICKTYPE_C 0x63 /* c */
#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
#define L1_BRICKTYPE_I 0x69 /* i */
#define L1_BRICKTYPE_N 0x6e /* n */
#define L1_BRICKTYPE_OPUS 0x6f /* o */
#define L1_BRICKTYPE_P 0x70 /* p */
#define L1_BRICKTYPE_R 0x72 /* r */
#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
#define L1_BRICKTYPE_X 0x78 /* x */
#define L1_BRICKTYPE_X2 0x79 /* y */
#define L1_BRICKTYPE_SA 0x5e /* ^ */
#define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */
#define L1_BRICKTYPE_ATHENA 0x2b /* + */
#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
#define L1_BRICKTYPE_1932 0x2c /* . */
#define L1_BRICKTYPE_191010 0x2e /* , */
/* board type response codes */
#define L1_BOARDTYPE_IP69 0x0100 /* CA */
#define L1_BOARDTYPE_IP63 0x0200 /* CB */
#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */
#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */
#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */
#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */
#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */
#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */
#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */
#endif /* _ASM_IA64_SN_L1_H */

View File

@ -1,33 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_LEDS_H
#define _ASM_IA64_SN_LEDS_H
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/shub_mmr.h>
#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
#define LED_CPU_SHIFT 16
#define LED_CPU_HEARTBEAT 0x01
#define LED_CPU_ACTIVITY 0x02
#define LED_ALWAYS_SET 0x00
/*
* Basic macros for flashing the LEDS on an SGI SN.
*/
static __inline__ void
set_led_bits(u8 value, u8 mask)
{
pda->led_state = (pda->led_state & ~mask) | (value & mask);
*pda->led_address = (short) pda->led_state;
}
#endif /* _ASM_IA64_SN_LEDS_H */

View File

@ -1,127 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MODULE_H
#define _ASM_IA64_SN_MODULE_H
/* parameter for format_module_id() */
#define MODULE_FORMAT_BRIEF 1
#define MODULE_FORMAT_LONG 2
#define MODULE_FORMAT_LCD 3
/*
* Module id format
*
* 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
* 15-8 Brick type (8-bit ascii character)
* 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
*
*/
/*
* Macros for getting the brick type
*/
#define MODULE_BTYPE_MASK 0xff00
#define MODULE_BTYPE_SHFT 8
#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
/*
* Macros for getting the rack ID.
*/
#define MODULE_RACK_MASK 0xffff0000
#define MODULE_RACK_SHFT 16
#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
/*
* Macros for getting the brick position
*/
#define MODULE_BPOS_MASK 0x00ff
#define MODULE_BPOS_SHFT 0
#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class (0==CPU/mixed, 1==I/O), group, number
*
* Rack number is stored just as it is displayed on the screen:
* a 3-decimal-digit number.
*/
#define RACK_CLASS_DVDR 100
#define RACK_GROUP_DVDR 10
#define RACK_NUM_DVDR 1
#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
(_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
RACK_GROUP_DVDR) / RACK_NUM_DVDR)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class 1 bit, 0==CPU/mixed, 1==I/O
* group 2 bits for CPU/mixed, 3 bits for I/O
* number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
*/
#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
#define RACK_CLASS_MASK(_r) 0x20
#define RACK_CLASS_SHFT(_r) 5
#define RACK_ADD_CLASS(_r, _c) \
((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
#define RACK_GROUP_MASK(_r) \
( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
#define RACK_ADD_GROUP(_r, _g) \
((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
#define RACK_NUM_SHFT(_r) 0
#define RACK_NUM_MASK(_r) \
( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
#define RACK_ADD_NUM(_r, _n) \
((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
/*
* Brick type definitions
*/
#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
extern char brick_types[];
#define MODULE_CBRICK 0
#define MODULE_RBRICK 1
#define MODULE_IBRICK 2
#define MODULE_KBRICK 3
#define MODULE_XBRICK 4
#define MODULE_DBRICK 5
#define MODULE_PBRICK 6
#define MODULE_NBRICK 7
#define MODULE_PEBRICK 8
#define MODULE_PXBRICK 9
#define MODULE_IXBRICK 10
#define MODULE_CGBRICK 11
#define MODULE_OPUSBRICK 12
#define MODULE_SABRICK 13 /* TIO BringUp Brick */
#define MODULE_IABRICK 14
#define MODULE_PABRICK 15
#define MODULE_GABRICK 16
#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
extern char brick_types[];
extern void format_module_id(char *, moduleid_t, int);
#endif /* _ASM_IA64_SN_MODULE_H */

View File

@ -1,59 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2008 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MSPEC_H
#define _ASM_IA64_SN_MSPEC_H
#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
#define FETCHOP_LOAD 0
#define FETCHOP_INCREMENT 8
#define FETCHOP_DECREMENT 16
#define FETCHOP_CLEAR 24
#define FETCHOP_STORE 0
#define FETCHOP_AND 24
#define FETCHOP_OR 32
#define FETCHOP_CLEAR_CACHE 56
#define FETCHOP_LOAD_OP(addr, op) ( \
*(volatile long *)((char*) (addr) + (op)))
#define FETCHOP_STORE_OP(addr, op, x) ( \
*(volatile long *)((char*) (addr) + (op)) = (long) (x))
#ifdef __KERNEL__
/*
* Each Atomic Memory Operation (amo, formerly known as fetchop)
* variable is 64 bytes long. The first 8 bytes are used. The
* remaining 56 bytes are unaddressable due to the operation taking
* that portion of the address.
*
* NOTE: The amo structure _MUST_ be placed in either the first or second
* half of the cache line. The cache line _MUST NOT_ be used for anything
* other than additional amo entries. This is because there are two
* addresses which reference the same physical cache line. One will
* be a cached entry with the memory type bits all set. This address
* may be loaded into processor cache. The amo will be referenced
* uncached via the memory special memory type. If any portion of the
* cached cache-line is modified, when that line is flushed, it will
* overwrite the uncached value in physical memory and lead to
* inconsistency.
*/
struct amo {
u64 variable;
u64 unused[7];
};
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_MSPEC_H */

View File

@ -1,82 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_NODEPDA_H
#define _ASM_IA64_SN_NODEPDA_H
#include <asm/irq.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/bte.h>
/*
* NUMA Node-Specific Data structures are defined in this file.
* In particular, this is the location of the node PDA.
* A pointer to the right node PDA is saved in each CPU PDA.
*/
/*
* Node-specific data structure.
*
* One of these structures is allocated on each node of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all per-node data structures.
*/
struct phys_cpuid {
short nasid;
char subnode;
char slice;
};
struct nodepda_s {
void *pdinfo; /* Platform-dependent per-node info */
/*
* The BTEs on this node are shared by the local cpus
*/
struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
struct timer_list bte_recovery_timer;
spinlock_t bte_recovery_lock;
/*
* Array of pointers to the nodepdas for each node.
*/
struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
/*
* Array of physical cpu identifiers. Indexed by cpuid.
*/
struct phys_cpuid phys_cpuid[NR_CPUS];
spinlock_t ptc_lock ____cacheline_aligned_in_smp;
};
typedef struct nodepda_s nodepda_t;
/*
* Access Functions for node PDA.
* Since there is one nodepda for each node, we need a convenient mechanism
* to access these nodepdas without cluttering code with #ifdefs.
* The next set of definitions provides this.
* Routines are expected to use
*
* sn_nodepda - to access node PDA for the node on which code is running
* NODEPDA(cnodeid) - to access node PDA for cnodeid
*/
DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
#define sn_nodepda __this_cpu_read(__sn_nodepda)
#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
/*
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
#endif /* _ASM_IA64_SN_NODEPDA_H */

View File

@ -1,150 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
/* Workarounds */
#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
#define BUSTYPE_MASK 0x1
/* Macros given a pcibus structure */
#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
asic == PCIIO_ASIC_TYPE_TIOCP)
#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
#define IS_TIOCP_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
/*
* The different PCI Bridge types supported on the SGI Altix platforms
*/
#define PCIBR_BRIDGETYPE_UNKNOWN -1
#define PCIBR_BRIDGETYPE_PIC 2
#define PCIBR_BRIDGETYPE_TIOCP 3
/*
* Bridge 64bit Direct Map Attributes
*/
#define PCI64_ATTR_PREF (1ull << 59)
#define PCI64_ATTR_PREC (1ull << 58)
#define PCI64_ATTR_VIRTUAL (1ull << 57)
#define PCI64_ATTR_BAR (1ull << 56)
#define PCI64_ATTR_SWAP (1ull << 55)
#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
#define PCI32_LOCAL_BASE 0
#define PCI32_MAPPED_BASE 0x40000000
#define PCI32_DIRECT_BASE 0x80000000
#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \
(u64)(x) >= PCI32_MAPPED_BASE)
#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE)
/*
* Bridge PMU Address Transaltion Entry Attibutes
*/
#define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1) /* PIC ASIC ONLY */
#define PCI32_ATE_PIO (0x1 << 1) /* TIOCP ASIC ONLY */
#define PCI32_ATE_MSI (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12
#define MINIMAL_ATES_REQUIRED(addr, size) \
(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
#define MINIMAL_ATE_FLAG(addr, size) \
(MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
/* bit 29 of the pci address is the SWAP bit */
#define ATE_SWAPSHIFT 29
#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
/*
* I/O page size
*/
#if PAGE_SIZE < 16384
#define IOPFNSHIFT 12 /* 4K per mapped page */
#else
#define IOPFNSHIFT 14 /* 16K per mapped page */
#endif
#define IOPGSIZE (1 << IOPFNSHIFT)
#define IOPG(x) ((x) >> IOPFNSHIFT)
#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
#define PCIBR_DEV_SWAP_DIR (1ull << 19)
#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
/*
* PMU resources.
*/
struct ate_resource{
u64 *ate;
u64 num_ate;
u64 lowest_free_index;
};
struct pcibus_info {
struct pcibus_bussoft pbi_buscommon; /* common header */
u32 pbi_moduleid;
short pbi_bridge_type;
short pbi_bridge_mode;
struct ate_resource pbi_int_ate_resource;
u64 pbi_int_ate_size;
u64 pbi_dir_xbase;
char pbi_hub_xid;
u64 pbi_devreg[8];
u32 pbi_valid_devices;
u32 pbi_enabled_devices;
spinlock_t pbi_lock;
};
extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/*
* prototypes for the bridge asic register access routines in pcibr_reg.c
*/
extern void pcireg_control_bit_clr(struct pcibus_info *, u64);
extern void pcireg_control_bit_set(struct pcibus_info *, u64);
extern u64 pcireg_tflush_get(struct pcibus_info *);
extern u64 pcireg_intr_status_get(struct pcibus_info *);
extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
extern void pcireg_force_intr_set(struct pcibus_info *, int);
extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int);
extern void pcireg_int_ate_set(struct pcibus_info *, int, u64);
extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int);
extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
extern int pcibr_ate_alloc(struct pcibus_info *, int);
extern void pcibr_ate_free(struct pcibus_info *, int);
extern void ate_write(struct pcibus_info *, int, int, u64);
extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
void *resp, char **ssdt);
extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
int action, void *resp);
extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
#endif

View File

@ -1,68 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
/*
* SN pci asic types. Do not ever renumber these or reuse values. The
* values must agree with what prom thinks they are.
*/
#define PCIIO_ASIC_TYPE_UNKNOWN 0
#define PCIIO_ASIC_TYPE_PPB 1
#define PCIIO_ASIC_TYPE_PIC 2
#define PCIIO_ASIC_TYPE_TIOCP 3
#define PCIIO_ASIC_TYPE_TIOCA 4
#define PCIIO_ASIC_TYPE_TIOCE 5
#define PCIIO_ASIC_MAX_TYPES 6
/*
* Common pciio bus provider data. There should be one of these as the
* first field in any pciio based provider soft structure (e.g. pcibr_soft
* tioca_soft, etc).
*/
struct pcibus_bussoft {
u32 bs_asic_type; /* chipset type */
u32 bs_xid; /* xwidget id */
u32 bs_persist_busnum; /* Persistent Bus Number */
u32 bs_persist_segment; /* Segment Number */
u64 bs_legacy_io; /* legacy io pio addr */
u64 bs_legacy_mem; /* legacy mem pio addr */
u64 bs_base; /* widget base */
struct xwidget_info *bs_xwidget_info;
};
struct pci_controller;
/*
* SN pci bus indirection
*/
struct sn_pcibus_provider {
dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
void (*force_interrupt)(struct sn_irq_info *);
void (*target_interrupt)(struct sn_irq_info *);
};
/*
* Flags used by the map interfaces
* bits 3:0 specifies format of passed in address
* bit 4 specifies that address is to be used for MSI
*/
#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
extern struct sn_pcibus_provider *sn_pci_provider[];
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */

View File

@ -1,85 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
#define _ASM_IA64_SN_PCI_PCIDEV_H
#include <linux/pci.h>
/*
* In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
* the pcidev_info structs for all devices under a controller, we keep a
* list of pcidev_info under pci_controller->platform_data.
*/
struct sn_platform_data {
void *provider_soft;
struct list_head pcidev_info;
};
#define SN_PLATFORM_DATA(busdev) \
((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
/*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that
* this only works for root busses, not for busses represented by PPB's.
*/
#define SN_PCIBUS_BUSSOFT(pci_bus) \
((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
/*
* Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
* that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
* due to possible PPB's in the path.
*/
#define SN_PCIDEV_BUSSOFT(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_provider)
#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
#define PCIIO_SLOT_NONE 255
#define PCIIO_FUNC_NONE 255
#define PCIIO_VENDOR_ID_NONE (-1)
struct pcidev_info {
u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
struct sn_irq_info *pdi_sn_irq_info;
struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
struct pci_dev *host_pci_dev; /* host bus link */
struct list_head pdi_list; /* List of pcidev_info */
};
extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
extern void sn_irq_unfixup(struct pci_dev *pci_dev);
extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
extern void sn_bus_store_sysdata(struct pci_dev *dev);
extern void sn_bus_free_sysdata(void);
extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
extern void sn_io_slot_fixup(struct pci_dev *);
extern void sn_acpi_slot_fixup(struct pci_dev *);
extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
struct sn_irq_info *);
extern void sn_pci_unfixup_slot(struct pci_dev *dev);
extern void sn_irq_lh_init(void);
#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */

View File

@ -1,68 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PDA_H
#define _ASM_IA64_SN_PDA_H
#include <linux/cache.h>
#include <asm/percpu.h>
/*
* CPU-specific data structure.
*
* One of these structures is allocated for each cpu of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all SN per-cpu data structures.
*/
typedef struct pda_s {
/*
* Support for SN LEDs
*/
volatile short *led_address;
u8 led_state;
u8 hb_state; /* supports blinking heartbeat leds */
unsigned int hb_count;
unsigned int idle_flag;
volatile unsigned long *bedrock_rev_id;
volatile unsigned long *pio_write_status_addr;
unsigned long pio_write_status_val;
volatile unsigned long *pio_shub_war_cam_addr;
unsigned long sn_in_service_ivecs[4];
int sn_lb_int_war_ticks;
int sn_last_irq;
int sn_first_irq;
} pda_t;
#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
/*
* PDA
* Per-cpu private data area for each cpu. The PDA is located immediately after
* the IA64 cpu_data area. A full page is allocated for the cp_data area for each
* cpu but only a small amout of the page is actually used. We put the SNIA PDA
* in the same page as the cpu_data area. Note that there is a check in the setup
* code to verify that we don't overflow the page.
*
* Seems like we should should cache-line align the pda so that any changes in the
* size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
* or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
*/
DECLARE_PER_CPU(struct pda_s, pda_percpu);
#define pda (&__ia64_per_cpu_var(pda_percpu))
#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
#endif /* _ASM_IA64_SN_PDA_H */

View File

@ -1,261 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PIC_H
#define _ASM_IA64_SN_PCI_PIC_H
/*
* PIC AS DEVICE ZERO
* ------------------
*
* PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
* be designated as 'device 0'. That is a departure from earlier SGI
* PCI bridges. Because of that we use config space 1 to access the
* config space of the first actual PCI device on the bus.
* Here's what the PIC manual says:
*
* The current PCI-X bus specification now defines that the parent
* hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
* reduced the total number of devices from 8 to 4 and removed the
* device registers and windows, now only supporting devices 0,1,2, and
* 3. PIC did leave all 8 configuration space windows. The reason was
* there was nothing to gain by removing them. Here in lies the problem.
* The device numbering we do using 0 through 3 is unrelated to the device
* numbering which PCI-X requires in configuration space. In the past we
* correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
* PCI-X requires we start a 1, not 0 and currently the PX brick
* does associate our:
*
* device 0 with configuration space window 1,
* device 1 with configuration space window 2,
* device 2 with configuration space window 3,
* device 3 with configuration space window 4.
*
* The net effect is that all config space access are off-by-one with
* relation to other per-slot accesses on the PIC.
* Here is a table that shows some of that:
*
* Internal Slot#
* |
* | 0 1 2 3
* ----------|---------------------------------------
* config | 0x21000 0x22000 0x23000 0x24000
* |
* even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
* |
* odd rrb | n/a 0[1] n/a 1[1]
* |
* int dev | 00 01 10 11
* |
* ext slot# | 1 2 3 4
* ----------|---------------------------------------
*/
#define PIC_ATE_TARGETID_SHFT 8
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
#define PIC_PCI64_ATTR_TARG_SHFT 60
/*****************************************************************************
*********************** PIC MMR structure mapping ***************************
*****************************************************************************/
/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
* of a 64-bit register. When writing PIC registers, always write the
* entire 64 bits.
*/
struct pic {
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- Standard Widget Configuration */
u64 p_wid_id; /* 0x000000 */
u64 p_wid_stat; /* 0x000008 */
u64 p_wid_err_upper; /* 0x000010 */
u64 p_wid_err_lower; /* 0x000018 */
#define p_wid_err p_wid_err_lower
u64 p_wid_control; /* 0x000020 */
u64 p_wid_req_timeout; /* 0x000028 */
u64 p_wid_int_upper; /* 0x000030 */
u64 p_wid_int_lower; /* 0x000038 */
#define p_wid_int p_wid_int_lower
u64 p_wid_err_cmdword; /* 0x000040 */
u64 p_wid_llp; /* 0x000048 */
u64 p_wid_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
u64 p_wid_aux_err; /* 0x000058 */
u64 p_wid_resp_upper; /* 0x000060 */
u64 p_wid_resp_lower; /* 0x000068 */
#define p_wid_resp p_wid_resp_lower
u64 p_wid_tst_pin_ctrl; /* 0x000070 */
u64 p_wid_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
u64 p_dir_map; /* 0x000080 */
u64 _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
u64 p_map_fault; /* 0x000090 */
u64 _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
u64 p_arb; /* 0x0000A0 */
u64 _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
u64 p_ate_parity_err; /* 0x0000B0 */
u64 _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
u64 p_bus_timeout; /* 0x0000C0 */
u64 p_pci_cfg; /* 0x0000C8 */
u64 p_pci_err_upper; /* 0x0000D0 */
u64 p_pci_err_lower; /* 0x0000D8 */
#define p_pci_err p_pci_err_lower
u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
u64 p_int_status; /* 0x000100 */
u64 p_int_enable; /* 0x000108 */
u64 p_int_rst_stat; /* 0x000110 */
u64 p_int_mode; /* 0x000118 */
u64 p_int_device; /* 0x000120 */
u64 p_int_host_err; /* 0x000128 */
u64 p_int_addr[8]; /* 0x0001{30,,,68} */
u64 p_err_int_view; /* 0x000170 */
u64 p_mult_int; /* 0x000178 */
u64 p_force_always[8]; /* 0x0001{80,,,B8} */
u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
u64 p_device[4]; /* 0x0002{00,,,18} */
u64 _pad_000220[4]; /* 0x0002{20,,,38} */
u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */
u64 _pad_000260[4]; /* 0x0002{60,,,78} */
u64 p_rrb_map[2]; /* 0x0002{80,,,88} */
#define p_even_resp p_rrb_map[0] /* 0x000280 */
#define p_odd_resp p_rrb_map[1] /* 0x000288 */
u64 p_resp_status; /* 0x000290 */
u64 p_resp_clear; /* 0x000298 */
u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
u64 upper; /* 0x0003{00,,,F0} */
u64 lower; /* 0x0003{08,,,F8} */
} p_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
u64 flush_w_touch; /* 0x000{400,,,5C0} */
u64 flush_wo_touch; /* 0x000{408,,,5C8} */
u64 inflight; /* 0x000{410,,,5D0} */
u64 prefetch; /* 0x000{418,,,5D8} */
u64 total_pci_retry; /* 0x000{420,,,5E0} */
u64 max_pci_retry; /* 0x000{428,,,5E8} */
u64 max_latency; /* 0x000{430,,,5F0} */
u64 clear_all; /* 0x000{438,,,5F8} */
} p_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
u64 p_pcix_bus_err_addr; /* 0x000600 */
u64 p_pcix_bus_err_attr; /* 0x000608 */
u64 p_pcix_bus_err_data; /* 0x000610 */
u64 p_pcix_pio_split_addr; /* 0x000618 */
u64 p_pcix_pio_split_attr; /* 0x000620 */
u64 p_pcix_dma_req_err_attr; /* 0x000628 */
u64 p_pcix_dma_req_err_addr; /* 0x000630 */
u64 p_pcix_timeout; /* 0x000638 */
u64 _pad_000640[120]; /* 0x000{640,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
u64 p_buf_addr; /* 0x000{A00,,,AF0} */
u64 p_buf_attr; /* 0X000{A08,,,AF8} */
} p_pcix_read_buf_64[16];
struct {
u64 p_buf_addr; /* 0x000{B00,,,BE0} */
u64 p_buf_attr; /* 0x000{B08,,,BE8} */
u64 p_buf_valid; /* 0x000{B10,,,BF0} */
u64 __pad1; /* 0x000{B18,,,BF8} */
} p_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */
/* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
char _pad_014000[0x18000 - 0x014000];
/* 0x18000-0x197F8 -- PIC Write Request Ram */
u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x20000 - 0x019800];
/* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
union {
u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} p_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} p_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x030007 -- PCIX Special Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} p_pcix_cycle; /* 0x040000-0x040007 */
};
#endif /* _ASM_IA64_SN_PCI_PIC_H */

View File

@ -1,28 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
* This file that access MMRs via uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
* Second MMR will be skipped if address is NULL
*
* Addresses passed to these routines should be uncached physical addresses
* ie., 0x80000....
*/
extern long pio_phys_read_mmr(volatile long *mmr);
extern void pio_phys_write_mmr(volatile long *mmr, long val);
extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
#endif /* _ASM_IA64_SN_RW_MMR_H */

View File

@ -1,502 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SHUB_MMR_H
#define _ASM_IA64_SN_SHUB_MMR_H
/* ==================================================================== */
/* Register "SH_IPI_INT" */
/* SHub Inter-Processor Interrupt Registers */
/* ==================================================================== */
#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380)
#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380)
/* SH_IPI_INT_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_IPI_INT_TYPE_SHFT 0
#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_IPI_INT_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_IPI_INT_AGT_SHFT 3
#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_IPI_INT_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_IPI_INT_PID_SHFT 4
#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_IPI_INT_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_IPI_INT_BASE_SHFT 21
#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_IPI_INT_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_IPI_INT_IDX_SHFT 52
#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* SH_IPI_INT_SEND */
/* Description: Send Interrupt Message to PI, This generates a puls */
#define SH_IPI_INT_SEND_SHFT 63
#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000)
/* ==================================================================== */
/* Register "SH_EVENT_OCCURRED" */
/* SHub Interrupt Event Occurred */
/* ==================================================================== */
#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000)
#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008)
#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000)
#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008)
/* ==================================================================== */
/* Register "SH_PI_CAM_CONTROL" */
/* CRB CAM MMR Access Control */
/* ==================================================================== */
#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300)
/* ==================================================================== */
/* Register "SH_SHUB_ID" */
/* SHub ID Number */
/* ==================================================================== */
#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580)
#define SH1_SHUB_ID_REVISION_SHFT 28
#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000)
/* ==================================================================== */
/* Register "SH_RTC" */
/* Real-time Clock */
/* ==================================================================== */
#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000)
#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000)
#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_PIO_WRITE_STATUS_0|1" */
/* PIO Write Status for CPU 0 & 1 */
/* ==================================================================== */
#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200)
#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280)
#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200)
#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280)
#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300)
#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380)
/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
/* Description: Deadlock response detected */
#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
__IA64_UL_CONST(0x0000000000000002)
/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
/* Description: Count of currently pending PIO writes */
#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
__IA64_UL_CONST(0x3f00000000000000)
/* ==================================================================== */
/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
/* ==================================================================== */
#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208)
#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208)
/* ==================================================================== */
/* Register "SH_EVENT_OCCURRED" */
/* SHub Interrupt Event Occurred */
/* ==================================================================== */
/* SH_EVENT_OCCURRED_UART_INT */
/* Description: Pending Junk Bus UART Interrupt */
#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000)
/* SH_EVENT_OCCURRED_IPI_INT */
/* Description: Pending IPI Interrupt */
#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000)
/* SH_EVENT_OCCURRED_II_INT0 */
/* Description: Pending II 0 Interrupt */
#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000)
/* SH_EVENT_OCCURRED_II_INT1 */
/* Description: Pending II 1 Interrupt */
#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000)
/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
/* Description: Pending SHUB 2 EXT IO INT2 */
#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
/* Description: Pending SHUB 2 EXT IO INT3 */
#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
#define SH_ALL_INT_MASK \
(SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
/* ==================================================================== */
/* LEDS */
/* ==================================================================== */
#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
/* ==================================================================== */
/* Register "SH1_PTC_0" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000)
/* SH1_PTC_0_A */
/* Description: Type */
#define SH1_PTC_0_A_SHFT 0
/* SH1_PTC_0_PS */
/* Description: Page Size */
#define SH1_PTC_0_PS_SHFT 2
/* SH1_PTC_0_RID */
/* Description: Region ID */
#define SH1_PTC_0_RID_SHFT 8
/* SH1_PTC_0_START */
/* Description: Start */
#define SH1_PTC_0_START_SHFT 63
/* ==================================================================== */
/* Register "SH1_PTC_1" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080)
/* SH1_PTC_1_START */
/* Description: PTC_1 Start */
#define SH1_PTC_1_START_SHFT 63
/* ==================================================================== */
/* Register "SH2_PTC" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH2_PTC __IA64_UL_CONST(0x0000000170000000)
/* SH2_PTC_A */
/* Description: Type */
#define SH2_PTC_A_SHFT 0
/* SH2_PTC_PS */
/* Description: Page Size */
#define SH2_PTC_PS_SHFT 2
/* SH2_PTC_RID */
/* Description: Region ID */
#define SH2_PTC_RID_SHFT 4
/* SH2_PTC_START */
/* Description: Start */
#define SH2_PTC_START_SHFT 63
/* SH2_PTC_ADDR_RID */
/* Description: Region ID */
#define SH2_PTC_ADDR_SHFT 4
#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000)
/* ==================================================================== */
/* Register "SH_RTC1_INT_CONFIG" */
/* SHub RTC 1 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480)
#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480)
#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC1_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC1_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC1_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC1_INT_CONFIG_PID_SHFT 4
#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC1_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC1_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC1_INT_ENABLE" */
/* SHub RTC 1 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500)
#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500)
#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
/* Description: Enable RTC 1 Interrupt */
#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* ==================================================================== */
/* Register "SH_RTC2_INT_CONFIG" */
/* SHub RTC 2 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580)
#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580)
#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC2_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC2_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC2_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC2_INT_CONFIG_PID_SHFT 4
#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC2_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC2_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC2_INT_ENABLE" */
/* SHub RTC 2 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600)
#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600)
#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
/* Description: Enable RTC 2 Interrupt */
#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* ==================================================================== */
/* Register "SH_RTC3_INT_CONFIG" */
/* SHub RTC 3 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680)
#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680)
#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC3_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC3_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC3_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC3_INT_CONFIG_PID_SHFT 4
#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC3_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC3_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC3_INT_ENABLE" */
/* SHub RTC 3 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700)
#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700)
#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
/* Description: Enable RTC 3 Interrupt */
#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* SH_EVENT_OCCURRED_RTC1_INT */
/* Description: Pending RTC 1 Interrupt */
#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000)
/* SH_EVENT_OCCURRED_RTC2_INT */
/* Description: Pending RTC 2 Interrupt */
#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000)
/* SH_EVENT_OCCURRED_RTC3_INT */
/* Description: Pending RTC 3 Interrupt */
#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000)
/* ==================================================================== */
/* Register "SH_IPI_ACCESS" */
/* CPU interrupt Access Permission Bits */
/* ==================================================================== */
#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480)
#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00)
#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80)
#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00)
#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80)
/* ==================================================================== */
/* Register "SH_INT_CMPB" */
/* RTC Compare Value for Processor B */
/* ==================================================================== */
#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080)
#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080)
#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPB_REAL_TIME_CMPB */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_INT_CMPC" */
/* RTC Compare Value for Processor C */
/* ==================================================================== */
#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100)
#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100)
#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPC_REAL_TIME_CMPC */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_INT_CMPD" */
/* RTC Compare Value for Processor D */
/* ==================================================================== */
#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180)
#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180)
#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPD_REAL_TIME_CMPD */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300)
/* ==================================================================== */
/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300)
/* ==================================================================== */
/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
/* It is acceptable to use (for example) SH_IPI_INT to reference the */
/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
/* on the type of the SHUB. Do not use these #defines in performance */
/* critical code or loops - there is a small performance penalty. */
/* ==================================================================== */
#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
#define SH_IPI_INT shubmmr(SH, IPI_INT)
#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
#define SH_RTC shubmmr(SH, RTC)
#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
/* ========================================================================== */
/* Register "SH2_BT_ENG_CSR_0" */
/* Engine 0 Control and Status Register */
/* ========================================================================== */
#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000)
#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080)
#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100)
#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180)
/* ========================================================================== */
/* BTE interfaces 1-3 */
/* ========================================================================== */
#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000)
#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000)
#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000)
#endif /* _ASM_IA64_SN_SHUB_MMR_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SIMULATOR_H
#define _ASM_IA64_SN_SIMULATOR_H
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV)
#define SNMAGIC 0xaeeeeeee8badbeefL
#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
#define IS_RUNNING_ON_SIMULATOR() (sn_prom_type)
#define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2)
extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
#else
#define IS_MEDUSA() 0
#define SIMULATOR_SLEEP()
#define IS_RUNNING_ON_SIMULATOR() 0
#endif
#endif /* _ASM_IA64_SN_SIMULATOR_H */

View File

@ -1,242 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*
* Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring
* SGI Altix node and router hardware
*
* Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004
*/
#ifndef SN_HWPERF_H
#define SN_HWPERF_H
/*
* object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO
* return an array of these. Do not change this without also
* changing the corresponding SAL code.
*/
#define SN_HWPERF_MAXSTRING 128
struct sn_hwperf_object_info {
u32 id;
union {
struct {
u64 this_part:1;
u64 is_shared:1;
} fields;
struct {
u64 flags;
u64 reserved;
} b;
} f;
char name[SN_HWPERF_MAXSTRING];
char location[SN_HWPERF_MAXSTRING];
u32 ports;
};
#define sn_hwp_this_part f.fields.this_part
#define sn_hwp_is_shared f.fields.is_shared
#define sn_hwp_flags f.b.flags
/* macros for object classification */
#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub"))
#define SN_HWPERF_IS_NODE_SHUB2(x) ((x) && strstr((x)->name, "SHub 2."))
#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO"))
#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router"))
#define SN_HWPERF_IS_NL4ROUTER(x) ((x) && strstr((x)->name, "NL4Router"))
#define SN_HWPERF_IS_OLDROUTER(x) ((x) && strstr((x)->name, "Router"))
#define SN_HWPERF_IS_ROUTER(x) (SN_HWPERF_IS_NL3ROUTER(x) || \
SN_HWPERF_IS_NL4ROUTER(x) || \
SN_HWPERF_IS_OLDROUTER(x))
#define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared)
#define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\
(SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\
(SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y)))
/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */
struct sn_hwperf_port_info {
u32 port;
u32 conn_id;
u32 conn_port;
};
/* for HWPERF_{GET,SET}_MMRS */
struct sn_hwperf_data {
u64 addr;
u64 data;
};
/* user ioctl() argument, see below */
struct sn_hwperf_ioctl_args {
u64 arg; /* argument, usually an object id */
u64 sz; /* size of transfer */
void *ptr; /* pointer to source/target */
u32 v0; /* second return value */
};
/*
* For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE,
* sn_hwperf_ioctl_args.arg can be used to specify a CPU on which
* to call SAL, and whether to use an interprocessor interrupt
* or task migration in order to do so. If the CPU specified is
* SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used.
*/
#define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL
#define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL
#define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL
#define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL
/*
* ioctl requests on the "sn_hwperf" misc device that call SAL.
*/
#define SN_HWPERF_OP_MEM_COPYIN 0x1000
#define SN_HWPERF_OP_MEM_COPYOUT 0x2000
#define SN_HWPERF_OP_MASK 0x0fff
/*
* Determine mem requirement.
* arg don't care
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_HEAPSIZE 1
/*
* Install mem for SAL drvr
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to buffer for scratch area
*/
#define SN_HWPERF_INSTALL_HEAP 2
/*
* Determine number of objects
* arg don't care
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Determine object "distance", relative to a cpu. This operation can
* execute on a designated logical cpu number, using either an IPI or
* via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
* the current CPU is used. See the SN_HWPERF_ARG_* macros above.
*
* arg bitmap of IPI flag, cpu number and object id
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Enumerate objects. Special case if sz == 8, returns the required
* buffer size.
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_object_info
*/
#define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Enumerate NumaLink ports for an object. Special case if sz == 8,
* returns the required buffer size.
* arg object id
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_port_info
*/
#define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT)
/*
* SET/GET memory mapped registers. These operations can execute
* on a designated logical cpu number, using either an IPI or via
* task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
* the current CPU is used. See the SN_HWPERF_ARG_* macros above.
*
* arg bitmap of ipi flag, cpu number and object id
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_data
*/
#define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN)
#define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \
SN_HWPERF_OP_MEM_COPYIN)
/*
* Lock a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_ACQUIRE 16
/*
* Unlock a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_RELEASE 17
/*
* Break a lock on a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_FORCE_RELEASE 18
/*
* ioctl requests on "sn_hwperf" that do not call SAL
*/
/*
* get cpu info as an array of hwperf_object_info_t.
* id is logical CPU number, name is description, location
* is geoid (e.g. 001c04#1c). Special case if sz == 8,
* returns the required buffer size.
*
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_object_info
*/
#define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given an object id, return it's node number (aka cnode).
* arg object id
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given a node number (cnode), return it's nasid.
* arg ordinal node number (aka cnodeid)
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given a node id, determine the id of the nearest node with CPUs
* and the id of the nearest node that has memory. The argument
* node would normally be a "headless" node, e.g. an "IO node".
* Return 0 on success.
*/
extern int sn_hwperf_get_nearest_node(cnodeid_t node,
cnodeid_t *near_mem, cnodeid_t *near_cpu);
/* return codes */
#define SN_HWPERF_OP_OK 0
#define SN_HWPERF_OP_NOMEM 1
#define SN_HWPERF_OP_NO_PERM 2
#define SN_HWPERF_OP_IO_ERROR 3
#define SN_HWPERF_OP_BUSY 4
#define SN_HWPERF_OP_RECONFIGURE 253
#define SN_HWPERF_OP_INVAL 254
int sn_topology_open(struct inode *inode, struct file *file);
int sn_topology_release(struct inode *inode, struct file *file);
#endif /* SN_HWPERF_H */

View File

@ -1,132 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_CPUID_H
#define _ASM_IA64_SN_SN_CPUID_H
#include <linux/smp.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/intrinsics.h>
/*
* Functions for converting between cpuids, nodeids and NASIDs.
*
* These are for SGI platforms only.
*
*/
/*
* Definitions of terms (these definitions are for IA64 ONLY. Other architectures
* use cpuid/cpunum quite defferently):
*
* CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
* the cpu. The value cpuid has no significance on IA64 other than
* the boot cpu is 0.
* smp_processor_id() returns the cpuid of the current cpu.
*
* CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
* This is the same as 31:24 of the processor LID register
* hard_smp_processor_id()- cpu_physical_id of current processor
* cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
* cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
* * not real efficient - don't use in perf critical code
*
* SLICE - a number in the range of 0 - 3 (typically) that represents the
* cpu number on a brick.
*
* SUBNODE - (almost obsolete) the number of the FSB that a cpu is
* connected to. This is also the same as the PI number. Usually 0 or 1.
*
* NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
* significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
*
*
* The macros convert between cpu physical ids & slice/nasid/cnodeid.
* These terms are described below:
*
*
* Brick
* ----- ----- ----- ----- CPU
* | 0 | | 1 | | 0 | | 1 | SLICE
* ----- ----- ----- -----
* | | | |
* | | | |
* 0 | | 2 0 | | 2 FSB SLOT
* ------- -------
* | |
* | |
* | |
* ------------ -------------
* | | | |
* | SHUB | | SHUB | NASID (0..MAX_NASIDS)
* | |----- | | CNODEID (0..num_compact_nodes-1)
* | | | |
* | | | |
* ------------ -------------
* | |
*
*
*/
#define get_node_number(addr) NASID_GET(addr)
/*
* NOTE: on non-MP systems, only cpuid 0 exists
*/
extern short physical_node_map[]; /* indexed by nasid to get cnode */
/*
* Macros for retrieving info about current cpu
*/
#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
/*
* Macros for retrieving info about an arbitrary cpu
* cpuid - logical cpu id
*/
#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
/*
* Dont use the following in performance critical code. They require scans
* of potentially large tables.
*/
extern int nasid_slice_to_cpuid(int, int);
/*
* cnodeid_to_nasid - convert a cnodeid to a NASID
*/
#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
/*
* nasid_to_cnodeid - convert a NASID to a cnodeid
*/
#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
/*
* partition_coherence_id - get the coherence ID of the current partition
*/
extern u8 sn_coherency_id;
#define partition_coherence_id() (sn_coherency_id)
#endif /* _ASM_IA64_SN_SN_CPUID_H */

View File

@ -1,58 +0,0 @@
#ifndef _ASM_IA64_SN_FEATURE_SETS_H
#define _ASM_IA64_SN_FEATURE_SETS_H
/*
* SN PROM Features
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved.
*/
/* --------------------- PROM Features -----------------------------*/
extern int sn_prom_feature_available(int id);
#define MAX_PROM_FEATURE_SETS 2
/*
* The following defines features that may or may not be supported by the
* current PROM. The OS uses sn_prom_feature_available(feature) to test for
* the presence of a PROM feature. Down rev (old) PROMs will always test
* "false" for new features.
*
* Use:
* if (sn_prom_feature_available(PRF_XXX))
* ...
*/
#define PRF_PAL_CACHE_FLUSH_SAFE 0
#define PRF_DEVICE_FLUSH_LIST 1
#define PRF_HOTPLUG_SUPPORT 2
#define PRF_CPU_DISABLE_SUPPORT 3
/* --------------------- OS Features -------------------------------*/
/*
* The following defines OS features that are optionally present in
* the operating system.
* During boot, PROM is notified of these features via a series of calls:
*
* ia64_sn_set_os_feature(feature1);
*
* Once enabled, a feature cannot be disabled.
*
* By default, features are disabled unless explicitly enabled.
*
* These defines must be kept in sync with the corresponding
* PROM definitions in feature_sets.h.
*/
#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
#define OSF_FEAT_LOG_SBES 1
#define OSF_ACPI_ENABLE 2
#define OSF_PCISEGMENT_ENABLE 3
#endif /* _ASM_IA64_SN_FEATURE_SETS_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,596 +0,0 @@
#ifndef _ASM_IA64_SN_TIO_TIOCA_H
#define _ASM_IA64_SN_TIO_TIOCA_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#define TIOCA_PART_NUM 0xE020
#define TIOCA_MFGR_NUM 0x24
#define TIOCA_REV_A 0x1
/*
* Register layout for TIO:CA. See below for bitmasks for each register.
*/
struct tioca {
u64 ca_id; /* 0x000000 */
u64 ca_control1; /* 0x000008 */
u64 ca_control2; /* 0x000010 */
u64 ca_status1; /* 0x000018 */
u64 ca_status2; /* 0x000020 */
u64 ca_gart_aperature; /* 0x000028 */
u64 ca_gfx_detach; /* 0x000030 */
u64 ca_inta_dest_addr; /* 0x000038 */
u64 ca_intb_dest_addr; /* 0x000040 */
u64 ca_err_int_dest_addr; /* 0x000048 */
u64 ca_int_status; /* 0x000050 */
u64 ca_int_status_alias; /* 0x000058 */
u64 ca_mult_error; /* 0x000060 */
u64 ca_mult_error_alias; /* 0x000068 */
u64 ca_first_error; /* 0x000070 */
u64 ca_int_mask; /* 0x000078 */
u64 ca_crm_pkterr_type; /* 0x000080 */
u64 ca_crm_pkterr_type_alias; /* 0x000088 */
u64 ca_crm_ct_error_detail_1; /* 0x000090 */
u64 ca_crm_ct_error_detail_2; /* 0x000098 */
u64 ca_crm_tnumto; /* 0x0000A0 */
u64 ca_gart_err; /* 0x0000A8 */
u64 ca_pcierr_type; /* 0x0000B0 */
u64 ca_pcierr_addr; /* 0x0000B8 */
u64 ca_pad_0000C0[3]; /* 0x0000{C0..D0} */
u64 ca_pci_rd_buf_flush; /* 0x0000D8 */
u64 ca_pci_dma_addr_extn; /* 0x0000E0 */
u64 ca_agp_dma_addr_extn; /* 0x0000E8 */
u64 ca_force_inta; /* 0x0000F0 */
u64 ca_force_intb; /* 0x0000F8 */
u64 ca_debug_vector_sel; /* 0x000100 */
u64 ca_debug_mux_core_sel; /* 0x000108 */
u64 ca_debug_mux_pci_sel; /* 0x000110 */
u64 ca_debug_domain_sel; /* 0x000118 */
u64 ca_pad_000120[28]; /* 0x0001{20..F8} */
u64 ca_gart_ptr_table; /* 0x200 */
u64 ca_gart_tlb_addr[8]; /* 0x2{08..40} */
};
/*
* Mask/shift definitions for TIO:CA registers. The convention here is
* to mainly use the names as they appear in the "TIO AEGIS Programmers'
* Reference" with a CA_ prefix added. Some exceptions were made to fix
* duplicate field names or to generalize fields that are common to
* different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
* example).
*
* Fields consisting of a single bit have a single #define have a single
* macro declaration to mask the bit. Fields consisting of multiple bits
* have two declarations: one to mask the proper bits in a register, and
* a second with the suffix "_SHFT" to identify how far the mask needs to
* be shifted right to get its base value.
*/
/* ==== ca_control1 */
#define CA_SYS_BIG_END (1ull << 0)
#define CA_DMA_AGP_SWAP (1ull << 1)
#define CA_DMA_PCI_SWAP (1ull << 2)
#define CA_PIO_IO_SWAP (1ull << 3)
#define CA_PIO_MEM_SWAP (1ull << 4)
#define CA_GFX_WR_SWAP (1ull << 5)
#define CA_AGP_FW_ENABLE (1ull << 6)
#define CA_AGP_CAL_CYCLE (0x7ull << 7)
#define CA_AGP_CAL_CYCLE_SHFT 7
#define CA_AGP_CAL_PRSCL_BYP (1ull << 10)
#define CA_AGP_INIT_CAL_ENB (1ull << 11)
#define CA_INJ_ADDR_PERR (1ull << 12)
#define CA_INJ_DATA_PERR (1ull << 13)
/* bits 15:14 unused */
#define CA_PCIM_IO_NBE_AD (0x7ull << 16)
#define CA_PCIM_IO_NBE_AD_SHFT 16
#define CA_PCIM_FAST_BTB_ENB (1ull << 19)
/* bits 23:20 unused */
#define CA_PIO_ADDR_OFFSET (0xffull << 24)
#define CA_PIO_ADDR_OFFSET_SHFT 24
/* bits 35:32 unused */
#define CA_AGPDMA_OP_COMBDELAY (0x1full << 36)
#define CA_AGPDMA_OP_COMBDELAY_SHFT 36
/* bit 41 unused */
#define CA_AGPDMA_OP_ENB_COMBDELAY (1ull << 42)
#define CA_PCI_INT_LPCNT (0xffull << 44)
#define CA_PCI_INT_LPCNT_SHFT 44
/* bits 63:52 unused */
/* ==== ca_control2 */
#define CA_AGP_LATENCY_TO (0xffull << 0)
#define CA_AGP_LATENCY_TO_SHFT 0
#define CA_PCI_LATENCY_TO (0xffull << 8)
#define CA_PCI_LATENCY_TO_SHFT 8
#define CA_PCI_MAX_RETRY (0x3ffull << 16)
#define CA_PCI_MAX_RETRY_SHFT 16
/* bits 27:26 unused */
#define CA_RT_INT_EN (0x3ull << 28)
#define CA_RT_INT_EN_SHFT 28
#define CA_MSI_INT_ENB (1ull << 30)
#define CA_PCI_ARB_ERR_ENB (1ull << 31)
#define CA_GART_MEM_PARAM (0x3ull << 32)
#define CA_GART_MEM_PARAM_SHFT 32
#define CA_GART_RD_PREFETCH_ENB (1ull << 34)
#define CA_GART_WR_PREFETCH_ENB (1ull << 35)
#define CA_GART_FLUSH_TLB (1ull << 36)
/* bits 39:37 unused */
#define CA_CRM_TNUMTO_PERIOD (0x1fffull << 40)
#define CA_CRM_TNUMTO_PERIOD_SHFT 40
/* bits 55:53 unused */
#define CA_CRM_TNUMTO_ENB (1ull << 56)
#define CA_CRM_PRESCALER_BYP (1ull << 57)
/* bits 59:58 unused */
#define CA_CRM_MAX_CREDIT (0x7ull << 60)
#define CA_CRM_MAX_CREDIT_SHFT 60
/* bit 63 unused */
/* ==== ca_status1 */
#define CA_CORELET_ID (0x3ull << 0)
#define CA_CORELET_ID_SHFT 0
#define CA_INTA_N (1ull << 2)
#define CA_INTB_N (1ull << 3)
#define CA_CRM_CREDIT_AVAIL (0x7ull << 4)
#define CA_CRM_CREDIT_AVAIL_SHFT 4
/* bit 7 unused */
#define CA_CRM_SPACE_AVAIL (0x7full << 8)
#define CA_CRM_SPACE_AVAIL_SHFT 8
/* bit 15 unused */
#define CA_GART_TLB_VAL (0xffull << 16)
#define CA_GART_TLB_VAL_SHFT 16
/* bits 63:24 unused */
/* ==== ca_status2 */
#define CA_GFX_CREDIT_AVAIL (0xffull << 0)
#define CA_GFX_CREDIT_AVAIL_SHFT 0
#define CA_GFX_OPQ_AVAIL (0xffull << 8)
#define CA_GFX_OPQ_AVAIL_SHFT 8
#define CA_GFX_WRBUFF_AVAIL (0xffull << 16)
#define CA_GFX_WRBUFF_AVAIL_SHFT 16
#define CA_ADMA_OPQ_AVAIL (0xffull << 24)
#define CA_ADMA_OPQ_AVAIL_SHFT 24
#define CA_ADMA_WRBUFF_AVAIL (0xffull << 32)
#define CA_ADMA_WRBUFF_AVAIL_SHFT 32
#define CA_ADMA_RDBUFF_AVAIL (0x7full << 40)
#define CA_ADMA_RDBUFF_AVAIL_SHFT 40
#define CA_PCI_PIO_OP_STAT (1ull << 47)
#define CA_PDMA_OPQ_AVAIL (0xfull << 48)
#define CA_PDMA_OPQ_AVAIL_SHFT 48
#define CA_PDMA_WRBUFF_AVAIL (0xfull << 52)
#define CA_PDMA_WRBUFF_AVAIL_SHFT 52
#define CA_PDMA_RDBUFF_AVAIL (0x3ull << 56)
#define CA_PDMA_RDBUFF_AVAIL_SHFT 56
/* bits 63:58 unused */
/* ==== ca_gart_aperature */
#define CA_GART_AP_ENB_AGP (1ull << 0)
#define CA_GART_PAGE_SIZE (1ull << 1)
#define CA_GART_AP_ENB_PCI (1ull << 2)
/* bits 11:3 unused */
#define CA_GART_AP_SIZE (0x3ffull << 12)
#define CA_GART_AP_SIZE_SHFT 12
#define CA_GART_AP_BASE (0x3ffffffffffull << 22)
#define CA_GART_AP_BASE_SHFT 22
/* ==== ca_inta_dest_addr
==== ca_intb_dest_addr
==== ca_err_int_dest_addr */
/* bits 2:0 unused */
#define CA_INT_DEST_ADDR (0x7ffffffffffffull << 3)
#define CA_INT_DEST_ADDR_SHFT 3
/* bits 55:54 unused */
#define CA_INT_DEST_VECT (0xffull << 56)
#define CA_INT_DEST_VECT_SHFT 56
/* ==== ca_int_status */
/* ==== ca_int_status_alias */
/* ==== ca_mult_error */
/* ==== ca_mult_error_alias */
/* ==== ca_first_error */
/* ==== ca_int_mask */
#define CA_PCI_ERR (1ull << 0)
/* bits 3:1 unused */
#define CA_GART_FETCH_ERR (1ull << 4)
#define CA_GFX_WR_OVFLW (1ull << 5)
#define CA_PIO_REQ_OVFLW (1ull << 6)
#define CA_CRM_PKTERR (1ull << 7)
#define CA_CRM_DVERR (1ull << 8)
#define CA_TNUMTO (1ull << 9)
#define CA_CXM_RSP_CRED_OVFLW (1ull << 10)
#define CA_CXM_REQ_CRED_OVFLW (1ull << 11)
#define CA_PIO_INVALID_ADDR (1ull << 12)
#define CA_PCI_ARB_TO (1ull << 13)
#define CA_AGP_REQ_OFLOW (1ull << 14)
#define CA_SBA_TYPE1_ERR (1ull << 15)
/* bit 16 unused */
#define CA_INTA (1ull << 17)
#define CA_INTB (1ull << 18)
#define CA_MULT_INTA (1ull << 19)
#define CA_MULT_INTB (1ull << 20)
#define CA_GFX_CREDIT_OVFLW (1ull << 21)
/* bits 63:22 unused */
/* ==== ca_crm_pkterr_type */
/* ==== ca_crm_pkterr_type_alias */
#define CA_CRM_PKTERR_SBERR_HDR (1ull << 0)
#define CA_CRM_PKTERR_DIDN (1ull << 1)
#define CA_CRM_PKTERR_PACTYPE (1ull << 2)
#define CA_CRM_PKTERR_INV_TNUM (1ull << 3)
#define CA_CRM_PKTERR_ADDR_RNG (1ull << 4)
#define CA_CRM_PKTERR_ADDR_ALGN (1ull << 5)
#define CA_CRM_PKTERR_HDR_PARAM (1ull << 6)
#define CA_CRM_PKTERR_CW_ERR (1ull << 7)
#define CA_CRM_PKTERR_SBERR_NH (1ull << 8)
#define CA_CRM_PKTERR_EARLY_TERM (1ull << 9)
#define CA_CRM_PKTERR_EARLY_TAIL (1ull << 10)
#define CA_CRM_PKTERR_MSSNG_TAIL (1ull << 11)
#define CA_CRM_PKTERR_MSSNG_HDR (1ull << 12)
/* bits 15:13 unused */
#define CA_FIRST_CRM_PKTERR_SBERR_HDR (1ull << 16)
#define CA_FIRST_CRM_PKTERR_DIDN (1ull << 17)
#define CA_FIRST_CRM_PKTERR_PACTYPE (1ull << 18)
#define CA_FIRST_CRM_PKTERR_INV_TNUM (1ull << 19)
#define CA_FIRST_CRM_PKTERR_ADDR_RNG (1ull << 20)
#define CA_FIRST_CRM_PKTERR_ADDR_ALGN (1ull << 21)
#define CA_FIRST_CRM_PKTERR_HDR_PARAM (1ull << 22)
#define CA_FIRST_CRM_PKTERR_CW_ERR (1ull << 23)
#define CA_FIRST_CRM_PKTERR_SBERR_NH (1ull << 24)
#define CA_FIRST_CRM_PKTERR_EARLY_TERM (1ull << 25)
#define CA_FIRST_CRM_PKTERR_EARLY_TAIL (1ull << 26)
#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL (1ull << 27)
#define CA_FIRST_CRM_PKTERR_MSSNG_HDR (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_crm_ct_error_detail_1 */
#define CA_PKT_TYPE (0xfull << 0)
#define CA_PKT_TYPE_SHFT 0
#define CA_SRC_ID (0x3ull << 4)
#define CA_SRC_ID_SHFT 4
#define CA_DATA_SZ (0x3ull << 6)
#define CA_DATA_SZ_SHFT 6
#define CA_TNUM (0xffull << 8)
#define CA_TNUM_SHFT 8
#define CA_DW_DATA_EN (0xffull << 16)
#define CA_DW_DATA_EN_SHFT 16
#define CA_GFX_CRED (0xffull << 24)
#define CA_GFX_CRED_SHFT 24
#define CA_MEM_RD_PARAM (0x3ull << 32)
#define CA_MEM_RD_PARAM_SHFT 32
#define CA_PIO_OP (1ull << 34)
#define CA_CW_ERR (1ull << 35)
/* bits 62:36 unused */
#define CA_VALID (1ull << 63)
/* ==== ca_crm_ct_error_detail_2 */
/* bits 2:0 unused */
#define CA_PKT_ADDR (0x1fffffffffffffull << 3)
#define CA_PKT_ADDR_SHFT 3
/* bits 63:56 unused */
/* ==== ca_crm_tnumto */
#define CA_CRM_TNUMTO_VAL (0xffull << 0)
#define CA_CRM_TNUMTO_VAL_SHFT 0
#define CA_CRM_TNUMTO_WR (1ull << 8)
/* bits 63:9 unused */
/* ==== ca_gart_err */
#define CA_GART_ERR_SOURCE (0x3ull << 0)
#define CA_GART_ERR_SOURCE_SHFT 0
/* bits 3:2 unused */
#define CA_GART_ERR_ADDR (0xfffffffffull << 4)
#define CA_GART_ERR_ADDR_SHFT 4
/* bits 63:40 unused */
/* ==== ca_pcierr_type */
#define CA_PCIERR_DATA (0xffffffffull << 0)
#define CA_PCIERR_DATA_SHFT 0
#define CA_PCIERR_ENB (0xfull << 32)
#define CA_PCIERR_ENB_SHFT 32
#define CA_PCIERR_CMD (0xfull << 36)
#define CA_PCIERR_CMD_SHFT 36
#define CA_PCIERR_A64 (1ull << 40)
#define CA_PCIERR_SLV_SERR (1ull << 41)
#define CA_PCIERR_SLV_WR_PERR (1ull << 42)
#define CA_PCIERR_SLV_RD_PERR (1ull << 43)
#define CA_PCIERR_MST_SERR (1ull << 44)
#define CA_PCIERR_MST_WR_PERR (1ull << 45)
#define CA_PCIERR_MST_RD_PERR (1ull << 46)
#define CA_PCIERR_MST_MABT (1ull << 47)
#define CA_PCIERR_MST_TABT (1ull << 48)
#define CA_PCIERR_MST_RETRY_TOUT (1ull << 49)
#define CA_PCIERR_TYPES \
(CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \
CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \
CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \
CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT)
/* bits 63:50 unused */
/* ==== ca_pci_dma_addr_extn */
#define CA_UPPER_NODE_OFFSET (0x3full << 0)
#define CA_UPPER_NODE_OFFSET_SHFT 0
/* bits 7:6 unused */
#define CA_CHIPLET_ID (0x3ull << 8)
#define CA_CHIPLET_ID_SHFT 8
/* bits 11:10 unused */
#define CA_PCI_DMA_NODE_ID (0xffffull << 12)
#define CA_PCI_DMA_NODE_ID_SHFT 12
/* bits 27:26 unused */
#define CA_PCI_DMA_PIO_MEM_TYPE (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_agp_dma_addr_extn */
/* bits 19:0 unused */
#define CA_AGP_DMA_NODE_ID (0xffffull << 20)
#define CA_AGP_DMA_NODE_ID_SHFT 20
/* bits 27:26 unused */
#define CA_AGP_DMA_PIO_MEM_TYPE (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_debug_vector_sel */
#define CA_DEBUG_MN_VSEL (0xfull << 0)
#define CA_DEBUG_MN_VSEL_SHFT 0
#define CA_DEBUG_PP_VSEL (0xfull << 4)
#define CA_DEBUG_PP_VSEL_SHFT 4
#define CA_DEBUG_GW_VSEL (0xfull << 8)
#define CA_DEBUG_GW_VSEL_SHFT 8
#define CA_DEBUG_GT_VSEL (0xfull << 12)
#define CA_DEBUG_GT_VSEL_SHFT 12
#define CA_DEBUG_PD_VSEL (0xfull << 16)
#define CA_DEBUG_PD_VSEL_SHFT 16
#define CA_DEBUG_AD_VSEL (0xfull << 20)
#define CA_DEBUG_AD_VSEL_SHFT 20
#define CA_DEBUG_CX_VSEL (0xfull << 24)
#define CA_DEBUG_CX_VSEL_SHFT 24
#define CA_DEBUG_CR_VSEL (0xfull << 28)
#define CA_DEBUG_CR_VSEL_SHFT 28
#define CA_DEBUG_BA_VSEL (0xfull << 32)
#define CA_DEBUG_BA_VSEL_SHFT 32
#define CA_DEBUG_PE_VSEL (0xfull << 36)
#define CA_DEBUG_PE_VSEL_SHFT 36
#define CA_DEBUG_BO_VSEL (0xfull << 40)
#define CA_DEBUG_BO_VSEL_SHFT 40
#define CA_DEBUG_BI_VSEL (0xfull << 44)
#define CA_DEBUG_BI_VSEL_SHFT 44
#define CA_DEBUG_AS_VSEL (0xfull << 48)
#define CA_DEBUG_AS_VSEL_SHFT 48
#define CA_DEBUG_PS_VSEL (0xfull << 52)
#define CA_DEBUG_PS_VSEL_SHFT 52
#define CA_DEBUG_PM_VSEL (0xfull << 56)
#define CA_DEBUG_PM_VSEL_SHFT 56
/* bits 63:60 unused */
/* ==== ca_debug_mux_core_sel */
/* ==== ca_debug_mux_pci_sel */
#define CA_DEBUG_MSEL0 (0x7ull << 0)
#define CA_DEBUG_MSEL0_SHFT 0
/* bit 3 unused */
#define CA_DEBUG_NSEL0 (0x7ull << 4)
#define CA_DEBUG_NSEL0_SHFT 4
/* bit 7 unused */
#define CA_DEBUG_MSEL1 (0x7ull << 8)
#define CA_DEBUG_MSEL1_SHFT 8
/* bit 11 unused */
#define CA_DEBUG_NSEL1 (0x7ull << 12)
#define CA_DEBUG_NSEL1_SHFT 12
/* bit 15 unused */
#define CA_DEBUG_MSEL2 (0x7ull << 16)
#define CA_DEBUG_MSEL2_SHFT 16
/* bit 19 unused */
#define CA_DEBUG_NSEL2 (0x7ull << 20)
#define CA_DEBUG_NSEL2_SHFT 20
/* bit 23 unused */
#define CA_DEBUG_MSEL3 (0x7ull << 24)
#define CA_DEBUG_MSEL3_SHFT 24
/* bit 27 unused */
#define CA_DEBUG_NSEL3 (0x7ull << 28)
#define CA_DEBUG_NSEL3_SHFT 28
/* bit 31 unused */
#define CA_DEBUG_MSEL4 (0x7ull << 32)
#define CA_DEBUG_MSEL4_SHFT 32
/* bit 35 unused */
#define CA_DEBUG_NSEL4 (0x7ull << 36)
#define CA_DEBUG_NSEL4_SHFT 36
/* bit 39 unused */
#define CA_DEBUG_MSEL5 (0x7ull << 40)
#define CA_DEBUG_MSEL5_SHFT 40
/* bit 43 unused */
#define CA_DEBUG_NSEL5 (0x7ull << 44)
#define CA_DEBUG_NSEL5_SHFT 44
/* bit 47 unused */
#define CA_DEBUG_MSEL6 (0x7ull << 48)
#define CA_DEBUG_MSEL6_SHFT 48
/* bit 51 unused */
#define CA_DEBUG_NSEL6 (0x7ull << 52)
#define CA_DEBUG_NSEL6_SHFT 52
/* bit 55 unused */
#define CA_DEBUG_MSEL7 (0x7ull << 56)
#define CA_DEBUG_MSEL7_SHFT 56
/* bit 59 unused */
#define CA_DEBUG_NSEL7 (0x7ull << 60)
#define CA_DEBUG_NSEL7_SHFT 60
/* bit 63 unused */
/* ==== ca_debug_domain_sel */
#define CA_DEBUG_DOMAIN_L (1ull << 0)
#define CA_DEBUG_DOMAIN_H (1ull << 1)
/* bits 63:2 unused */
/* ==== ca_gart_ptr_table */
#define CA_GART_PTR_VAL (1ull << 0)
/* bits 11:1 unused */
#define CA_GART_PTR_ADDR (0xfffffffffffull << 12)
#define CA_GART_PTR_ADDR_SHFT 12
/* bits 63:56 unused */
/* ==== ca_gart_tlb_addr[0-7] */
#define CA_GART_TLB_ADDR (0xffffffffffffffull << 0)
#define CA_GART_TLB_ADDR_SHFT 0
/* bits 62:56 unused */
#define CA_GART_TLB_ENTRY_VAL (1ull << 63)
/*
* PIO address space ranges for TIO:CA
*/
/* CA internal registers */
#define CA_PIO_ADMIN 0x00000000
#define CA_PIO_ADMIN_LEN 0x00010000
/* GFX Write Buffer - Diagnostics */
#define CA_PIO_GFX 0x00010000
#define CA_PIO_GFX_LEN 0x00010000
/* AGP DMA Write Buffer - Diagnostics */
#define CA_PIO_AGP_DMAWRITE 0x00020000
#define CA_PIO_AGP_DMAWRITE_LEN 0x00010000
/* AGP DMA READ Buffer - Diagnostics */
#define CA_PIO_AGP_DMAREAD 0x00030000
#define CA_PIO_AGP_DMAREAD_LEN 0x00010000
/* PCI Config Type 0 */
#define CA_PIO_PCI_TYPE0_CONFIG 0x01000000
#define CA_PIO_PCI_TYPE0_CONFIG_LEN 0x01000000
/* PCI Config Type 1 */
#define CA_PIO_PCI_TYPE1_CONFIG 0x02000000
#define CA_PIO_PCI_TYPE1_CONFIG_LEN 0x01000000
/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
#define CA_PIO_PCI_IO 0x03000000
#define CA_PIO_PCI_IO_LEN 0x05000000
/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
/* use Fast Write if enabled and coretalk packet type is a GFX request */
#define CA_PIO_PCI_MEM_OFFSET 0x08000000
#define CA_PIO_PCI_MEM_OFFSET_LEN 0x08000000
/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
/* use Fast Write if enabled and coretalk packet type is a GFX request */
#define CA_PIO_PCI_MEM 0x40000000
#define CA_PIO_PCI_MEM_LEN 0xc0000000
/*
* DMA space
*
* The CA aperature (ie. bus address range) mapped by the GART is segmented into
* two parts. The lower portion of the aperature is used for mapping 32 bit
* PCI addresses which are managed by the dma interfaces in this file. The
* upper poprtion of the aperature is used for mapping 48 bit AGP addresses.
* The AGP portion of the aperature is managed by the agpgart_be.c driver
* in drivers/linux/agp. There are ca-specific hooks in that driver to
* manipulate the gart, but management of the AGP portion of the aperature
* is the responsibility of that driver.
*
* CA allows three main types of DMA mapping:
*
* PCI 64-bit Managed by this driver
* PCI 32-bit Managed by this driver
* AGP 48-bit Managed by hooks in the /dev/agpgart driver
*
* All of the above can optionally be remapped through the GART. The following
* table lists the combinations of addressing types and GART remapping that
* is currently supported by the driver (h/w supports all, s/w limits this):
*
* PCI64 PCI32 AGP48
* GART no yes yes
* Direct yes yes no
*
* GART remapping of PCI64 is not done because there is no need to. The
* 64 bit PCI address holds all of the information necessary to target any
* memory in the system.
*
* AGP48 is always mapped through the GART. Management of the AGP48 portion
* of the aperature is the responsibility of code in the agpgart_be driver.
*
* The non-64 bit bus address space will currently be partitioned like this:
*
* 0xffff_ffff_ffff +--------
* | AGP48 direct
* | Space managed by this driver
* CA_AGP_DIRECT_BASE +--------
* | AGP GART mapped (gfx aperature)
* | Space managed by /dev/agpgart driver
* | This range is exposed to the agpgart
* | driver as the "graphics aperature"
* CA_AGP_MAPPED_BASE +-----
* | PCI GART mapped
* | Space managed by this driver
* CA_PCI32_MAPPED_BASE +----
* | PCI32 direct
* | Space managed by this driver
* 0xC000_0000 +--------
* (CA_PCI32_DIRECT_BASE)
*
* The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE
* is what we call the CA aperature. Addresses falling in this range will
* be remapped using the GART.
*
* The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE
* is what we call the graphics aperature. This is a subset of the CA
* aperature and is under the control of the agpgart_be driver.
*
* CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are
* somewhat arbitrary values. The known constraints on choosing these is:
*
* 1) CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size)
* must be one of the values supported by the ca_gart_aperature register.
* Currently valid values are: 4MB through 4096MB in powers of 2 increments
*
* 2) CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size)
* must be in MB units since that's what the agpgart driver assumes.
*/
/*
* Define Bus DMA ranges. These are configurable (see constraints above)
* and will probably need tuning based on experience.
*/
/*
* 11/24/03
* CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it
* generally unusable. The problem is that for PCI direct 32
* DMA's, all 32 bits of the bus address are used to form the lower 32 bits
* of the coretalk address, and coretalk bits 38:32 come from a register.
* Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available
* for DMA (the rest is allocated to PIO), host node addresses need to be
* such that their lower 32 bits fall in the 0xC0000000-0xffffffff range
* as well. So there can be no PCI32 direct DMA below 3GB!! For this
* reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes
* tioca_dma_direct32() a noop but preserves the code flow should this issue
* be fixed in a respin.
*
* For now, all PCI32 DMA's must be mapped through the GART.
*/
#define CA_PCI32_DIRECT_BASE 0xC0000000UL /* BASE not configurable */
#define CA_PCI32_DIRECT_SIZE 0x00000000UL /* 0 MB */
#define CA_PCI32_MAPPED_BASE 0xC0000000UL
#define CA_PCI32_MAPPED_SIZE 0x40000000UL /* 2GB */
#define CA_AGP_MAPPED_BASE 0x80000000UL
#define CA_AGP_MAPPED_SIZE 0x40000000UL /* 2GB */
#define CA_AGP_DIRECT_BASE 0x40000000UL /* 2GB */
#define CA_AGP_DIRECT_SIZE 0x40000000UL
#define CA_APERATURE_BASE (CA_AGP_MAPPED_BASE)
#define CA_APERATURE_SIZE (CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE)
#endif /* _ASM_IA64_SN_TIO_TIOCA_H */

View File

@ -1,207 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
#define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
#include <asm/sn/tioca.h>
/*
* WAR enables
* Defines for individual WARs. Each is a bitmask of applicable
* part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
* (3 << 1) == (rev A or rev B), etc
*/
#define TIOCA_WAR_ENABLED(pv, tioca_common) \
((1 << tioca_common->ca_rev) & pv)
/* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */
#define PV907908 (1 << 1)
/* ATI config space problems after BIOS execution starts */
#define PV908234 (1 << 1)
/* CA:AGPDMA write request data mismatch with ABC1CL merge */
#define PV895469 (1 << 1)
/* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/
#define PV910244 (1 << 1)
struct tioca_dmamap{
struct list_head cad_list; /* headed by ca_list */
dma_addr_t cad_dma_addr; /* Linux dma handle */
uint cad_gart_entry; /* start entry in ca_gart_pagemap */
uint cad_gart_size; /* #entries for this map */
};
/*
* Kernel only fields. Prom may look at this stuff for debugging only.
* Access this structure through the ca_kernel_private ptr.
*/
struct tioca_common ;
struct tioca_kernel {
struct tioca_common *ca_common; /* tioca this belongs to */
struct list_head ca_list; /* list of all ca's */
struct list_head ca_dmamaps;
spinlock_t ca_lock; /* Kernel lock */
cnodeid_t ca_closest_node;
struct list_head *ca_devices; /* bus->devices */
/*
* General GART stuff
*/
u64 ca_ap_size; /* size of aperature in bytes */
u32 ca_gart_entries; /* # u64 entries in gart */
u32 ca_ap_pagesize; /* aperature page size in bytes */
u64 ca_ap_bus_base; /* bus address of CA aperature */
u64 ca_gart_size; /* gart size in bytes */
u64 *ca_gart; /* gart table vaddr */
u64 ca_gart_coretalk_addr; /* gart coretalk addr */
u8 ca_gart_iscoherent; /* used in tioca_tlbflush */
/* PCI GART convenience values */
u64 ca_pciap_base; /* pci aperature bus base address */
u64 ca_pciap_size; /* pci aperature size (bytes) */
u64 ca_pcigart_base; /* gfx GART bus base address */
u64 *ca_pcigart; /* gfx GART vm address */
u32 ca_pcigart_entries;
u32 ca_pcigart_start; /* PCI start index in ca_gart */
void *ca_pcigart_pagemap;
/* AGP GART convenience values */
u64 ca_gfxap_base; /* gfx aperature bus base address */
u64 ca_gfxap_size; /* gfx aperature size (bytes) */
u64 ca_gfxgart_base; /* gfx GART bus base address */
u64 *ca_gfxgart; /* gfx GART vm address */
u32 ca_gfxgart_entries;
u32 ca_gfxgart_start; /* agpgart start index in ca_gart */
};
/*
* Common tioca info shared between kernel and prom
*
* DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES
* TO THE PROM VERSION.
*/
struct tioca_common {
struct pcibus_bussoft ca_common; /* common pciio header */
u32 ca_rev;
u32 ca_closest_nasid;
u64 ca_prom_private;
u64 ca_kernel_private;
};
/**
* tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry
* @paddr: page address to convert
*
* Convert a system [coretalk] address to a GART entry. GART entries are
* formed using the following:
*
* data = ( (1<<63) | ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) |
* (REMAP_SYS_ADDR) ) >> 12 )
*
* DATA written to 1 GART TABLE Entry in system memory is remapped system
* addr for 1 page
*
* The data is for coretalk address format right shifted 12 bits with a
* valid bit.
*
* GART_TABLE_ENTRY [ 25:0 ] -- REMAP_SYS_ADDRESS[37:12].
* GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id.
* GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID.
* GART_TABLE_ENTRY [ 63 ] -- Valid Bit
*/
static inline u64
tioca_paddr_to_gart(unsigned long paddr)
{
/*
* We are assuming right now that paddr already has the correct
* format since the address from xtalk_dmaXXX should already have
* NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations.
*/
return ((paddr) >> 12) | (1UL << 63);
}
/**
* tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA
* @page_addr: system page address to map
*/
static inline unsigned long
tioca_physpage_to_gart(u64 page_addr)
{
u64 coretalk_addr;
coretalk_addr = PHYS_TO_TIODMA(page_addr);
if (!coretalk_addr) {
return 0;
}
return tioca_paddr_to_gart(coretalk_addr);
}
/**
* tioca_tlbflush - invalidate cached SGI CA GART TLB entries
* @tioca_kernel: CA context
*
* Invalidate tlb entries for a given CA GART. Main complexity is to account
* for revA bug.
*/
static inline void
tioca_tlbflush(struct tioca_kernel *tioca_kernel)
{
volatile u64 tmp;
volatile struct tioca __iomem *ca_base;
struct tioca_common *tioca_common;
tioca_common = tioca_kernel->ca_common;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
/*
* Explicit flushes not needed if GART is in cached mode
*/
if (tioca_kernel->ca_gart_iscoherent) {
if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) {
/*
* PV910244: RevA CA needs explicit flushes.
* Need to put GART into uncached mode before
* flushing otherwise the explicit flush is ignored.
*
* Alternate WAR would be to leave GART cached and
* touch every CL aligned GART entry.
*/
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
__sn_setq_relaxed(&ca_base->ca_control2,
(0x2ull << CA_GART_MEM_PARAM_SHFT));
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
}
return;
}
/*
* Gart in uncached mode ... need an explicit flush.
*/
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
}
extern u32 tioca_gart_found;
extern struct list_head tioca_list;
extern int tioca_init_provider(void);
extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */

View File

@ -1,760 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef __ASM_IA64_SN_TIOCE_H__
#define __ASM_IA64_SN_TIOCE_H__
/* CE ASIC part & mfgr information */
#define TIOCE_PART_NUM 0xCE00
#define TIOCE_SRC_ID 0x01
#define TIOCE_REV_A 0x1
/* CE Virtual PPB Vendor/Device IDs */
#define CE_VIRT_PPB_VENDOR_ID 0x10a9
#define CE_VIRT_PPB_DEVICE_ID 0x4002
/* CE Host Bridge Vendor/Device IDs */
#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9
#define CE_HOST_BRIDGE_DEVICE_ID 0x4001
#define TIOCE_NUM_M40_ATES 4096
#define TIOCE_NUM_M3240_ATES 2048
#define TIOCE_NUM_PORTS 2
/*
* Register layout for TIOCE. MMR offsets are shown at the far right of the
* structure definition.
*/
typedef volatile struct tioce {
/*
* ADMIN : Administration Registers
*/
u64 ce_adm_id; /* 0x000000 */
u64 ce_pad_000008; /* 0x000008 */
u64 ce_adm_dyn_credit_status; /* 0x000010 */
u64 ce_adm_last_credit_status; /* 0x000018 */
u64 ce_adm_credit_limit; /* 0x000020 */
u64 ce_adm_force_credit; /* 0x000028 */
u64 ce_adm_control; /* 0x000030 */
u64 ce_adm_mmr_chn_timeout; /* 0x000038 */
u64 ce_adm_ssp_ure_timeout; /* 0x000040 */
u64 ce_adm_ssp_dre_timeout; /* 0x000048 */
u64 ce_adm_ssp_debug_sel; /* 0x000050 */
u64 ce_adm_int_status; /* 0x000058 */
u64 ce_adm_int_status_alias; /* 0x000060 */
u64 ce_adm_int_mask; /* 0x000068 */
u64 ce_adm_int_pending; /* 0x000070 */
u64 ce_adm_force_int; /* 0x000078 */
u64 ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */
u64 ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */
u64 ce_adm_error_summary; /* 0x000100 */
u64 ce_adm_error_summary_alias; /* 0x000108 */
u64 ce_adm_error_mask; /* 0x000110 */
u64 ce_adm_first_error; /* 0x000118 */
u64 ce_adm_error_overflow; /* 0x000120 */
u64 ce_adm_error_overflow_alias; /* 0x000128 */
u64 ce_pad_000130[2]; /* 0x000130 -- 0x000138 */
u64 ce_adm_tnum_error; /* 0x000140 */
u64 ce_adm_mmr_err_detail; /* 0x000148 */
u64 ce_adm_msg_sram_perr_detail; /* 0x000150 */
u64 ce_adm_bap_sram_perr_detail; /* 0x000158 */
u64 ce_adm_ce_sram_perr_detail; /* 0x000160 */
u64 ce_adm_ce_credit_oflow_detail; /* 0x000168 */
u64 ce_adm_tx_link_idle_max_timer; /* 0x000170 */
u64 ce_adm_pcie_debug_sel; /* 0x000178 */
u64 ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */
u64 ce_adm_pcie_debug_sel_top; /* 0x000200 */
u64 ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */
u64 ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */
u64 ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */
u64 ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */
u64 ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */
u64 ce_adm_pcie_trig_compare_top; /* 0x000230 */
u64 ce_adm_pcie_trig_compare_en_top; /* 0x000238 */
u64 ce_adm_ssp_debug_sel_top; /* 0x000240 */
u64 ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */
u64 ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */
u64 ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */
u64 ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */
u64 ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */
u64 ce_adm_ssp_trig_compare_top; /* 0x000270 */
u64 ce_adm_ssp_trig_compare_en_top; /* 0x000278 */
u64 ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */
u64 ce_adm_bap_ctrl; /* 0x000400 */
u64 ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */
u64 ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */
u64 ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */
u64 ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */
u64 ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */
u64 ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */
u64 ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */
u64 ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */
/*
* LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
* Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
* NOTE: the comment offsets at far right: let 'z' = {2 or 3}
*/
#define ce_lsi(link_num) ce_lsi[link_num-1]
struct ce_lsi_reg {
u64 ce_lsi_lpu_id; /* 0x00z000 */
u64 ce_lsi_rst; /* 0x00z008 */
u64 ce_lsi_dbg_stat; /* 0x00z010 */
u64 ce_lsi_dbg_cfg; /* 0x00z018 */
u64 ce_lsi_ltssm_ctrl; /* 0x00z020 */
u64 ce_lsi_lk_stat; /* 0x00z028 */
u64 ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */
u64 ce_lsi_int_and_stat; /* 0x00z040 */
u64 ce_lsi_int_mask; /* 0x00z048 */
u64 ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */
u64 ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */
u64 ce_pad_00z108; /* 0x00z108 */
u64 ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */
u64 ce_pad_00z118; /* 0x00z118 */
u64 ce_lsi_lk_perf_cnt1; /* 0x00z120 */
u64 ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */
u64 ce_lsi_lk_perf_cnt2; /* 0x00z130 */
u64 ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */
u64 ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */
u64 ce_lsi_lk_lyr_cfg; /* 0x00z200 */
u64 ce_lsi_lk_lyr_status; /* 0x00z208 */
u64 ce_lsi_lk_lyr_int_stat; /* 0x00z210 */
u64 ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */
u64 ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */
u64 ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */
u64 ce_lsi_fc_upd_ctl; /* 0x00z240 */
u64 ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */
u64 ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */
u64 ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */
u64 ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */
u64 ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */
u64 ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */
u64 ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */
u64 ce_lsi_rply_tmr_thr; /* 0x00z410 */
u64 ce_lsi_rply_tmr; /* 0x00z418 */
u64 ce_lsi_rply_num_stat; /* 0x00z420 */
u64 ce_lsi_rty_buf_max_addr; /* 0x00z428 */
u64 ce_lsi_rty_fifo_ptr; /* 0x00z430 */
u64 ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */
u64 ce_lsi_rty_fifo_cred; /* 0x00z440 */
u64 ce_lsi_seq_cnt; /* 0x00z448 */
u64 ce_lsi_ack_sent_seq_num; /* 0x00z450 */
u64 ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */
u64 ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */
u64 ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */
u64 ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */
u64 ce_pad_00z478; /* 0x00z478 */
u64 ce_lsi_mem_addr_ctl; /* 0x00z480 */
u64 ce_lsi_mem_d_ld0; /* 0x00z488 */
u64 ce_lsi_mem_d_ld1; /* 0x00z490 */
u64 ce_lsi_mem_d_ld2; /* 0x00z498 */
u64 ce_lsi_mem_d_ld3; /* 0x00z4A0 */
u64 ce_lsi_mem_d_ld4; /* 0x00z4A8 */
u64 ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */
u64 ce_lsi_rty_d_cnt; /* 0x00z4C0 */
u64 ce_lsi_seq_buf_cnt; /* 0x00z4C8 */
u64 ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */
u64 ce_pad_00z4D8; /* 0x00z4D8 */
u64 ce_lsi_ack_lat_thr; /* 0x00z4E0 */
u64 ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */
u64 ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */
u64 ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */
u64 ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */
u64 ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */
u64 ce_lsi_phy_lyr_cfg; /* 0x00z600 */
u64 ce_pad_00z608; /* 0x00z608 */
u64 ce_lsi_phy_lyr_int_stat; /* 0x00z610 */
u64 ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */
u64 ce_lsi_phy_lyr_int_mask; /* 0x00z620 */
u64 ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */
u64 ce_lsi_rcv_phy_cfg; /* 0x00z680 */
u64 ce_lsi_rcv_phy_stat1; /* 0x00z688 */
u64 ce_lsi_rcv_phy_stat2; /* 0x00z690 */
u64 ce_lsi_rcv_phy_stat3; /* 0x00z698 */
u64 ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */
u64 ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */
u64 ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */
u64 ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */
u64 ce_lsi_tx_phy_cfg; /* 0x00z700 */
u64 ce_lsi_tx_phy_stat; /* 0x00z708 */
u64 ce_lsi_tx_phy_int_stat; /* 0x00z710 */
u64 ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */
u64 ce_lsi_tx_phy_int_mask; /* 0x00z720 */
u64 ce_lsi_tx_phy_stat2; /* 0x00z728 */
u64 ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */
u64 ce_lsi_ltssm_cfg1; /* 0x00z780 */
u64 ce_lsi_ltssm_cfg2; /* 0x00z788 */
u64 ce_lsi_ltssm_cfg3; /* 0x00z790 */
u64 ce_lsi_ltssm_cfg4; /* 0x00z798 */
u64 ce_lsi_ltssm_cfg5; /* 0x00z7A0 */
u64 ce_lsi_ltssm_stat1; /* 0x00z7A8 */
u64 ce_lsi_ltssm_stat2; /* 0x00z7B0 */
u64 ce_lsi_ltssm_int_stat; /* 0x00z7B8 */
u64 ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */
u64 ce_lsi_ltssm_int_mask; /* 0x00z7C8 */
u64 ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */
u64 ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */
u64 ce_lsi_gb_cfg1; /* 0x00z800 */
u64 ce_lsi_gb_cfg2; /* 0x00z808 */
u64 ce_lsi_gb_cfg3; /* 0x00z810 */
u64 ce_lsi_gb_cfg4; /* 0x00z818 */
u64 ce_lsi_gb_stat; /* 0x00z820 */
u64 ce_lsi_gb_int_stat; /* 0x00z828 */
u64 ce_lsi_gb_int_stat_test; /* 0x00z830 */
u64 ce_lsi_gb_int_mask; /* 0x00z838 */
u64 ce_lsi_gb_pwr_dn1; /* 0x00z840 */
u64 ce_lsi_gb_pwr_dn2; /* 0x00z848 */
u64 ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
} ce_lsi[2];
u64 ce_pad_004000[10]; /* 0x004000 -- 0x004048 */
/*
* CRM: Coretalk Receive Module Registers
*/
u64 ce_crm_debug_mux; /* 0x004050 */
u64 ce_pad_004058; /* 0x004058 */
u64 ce_crm_ssp_err_cmd_wrd; /* 0x004060 */
u64 ce_crm_ssp_err_addr; /* 0x004068 */
u64 ce_crm_ssp_err_syn; /* 0x004070 */
u64 ce_pad_004078[499]; /* 0x004078 -- 0x005008 */
/*
* CXM: Coretalk Xmit Module Registers
*/
u64 ce_cxm_dyn_credit_status; /* 0x005010 */
u64 ce_cxm_last_credit_status; /* 0x005018 */
u64 ce_cxm_credit_limit; /* 0x005020 */
u64 ce_cxm_force_credit; /* 0x005028 */
u64 ce_cxm_disable_bypass; /* 0x005030 */
u64 ce_pad_005038[3]; /* 0x005038 -- 0x005048 */
u64 ce_cxm_debug_mux; /* 0x005050 */
u64 ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */
/*
* DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
* DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
* DTL: the comment offsets at far right: let 'y' = {6 or 8}
*
* UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
* UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
* UTL: the comment offsets at far right: let 'z' = {7 or 9}
*/
#define ce_dtl(link_num) ce_dtl_utl[link_num-1]
#define ce_utl(link_num) ce_dtl_utl[link_num-1]
struct ce_dtl_utl_reg {
/* DTL */
u64 ce_dtl_dtdr_credit_limit; /* 0x00y000 */
u64 ce_dtl_dtdr_credit_force; /* 0x00y008 */
u64 ce_dtl_dyn_credit_status; /* 0x00y010 */
u64 ce_dtl_dtl_last_credit_stat; /* 0x00y018 */
u64 ce_dtl_dtl_ctrl; /* 0x00y020 */
u64 ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */
u64 ce_dtl_debug_sel; /* 0x00y050 */
u64 ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
/* UTL */
u64 ce_utl_utl_ctrl; /* 0x00z000 */
u64 ce_utl_debug_sel; /* 0x00z008 */
u64 ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
} ce_dtl_utl[2];
u64 ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */
/*
* URE: Upstream Request Engine
*/
u64 ce_ure_dyn_credit_status; /* 0x00B010 */
u64 ce_ure_last_credit_status; /* 0x00B018 */
u64 ce_ure_credit_limit; /* 0x00B020 */
u64 ce_pad_00B028; /* 0x00B028 */
u64 ce_ure_control; /* 0x00B030 */
u64 ce_ure_status; /* 0x00B038 */
u64 ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */
u64 ce_ure_debug_sel; /* 0x00B050 */
u64 ce_ure_pcie_debug_sel; /* 0x00B058 */
u64 ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */
u64 ce_ure_ssp_err_addr; /* 0x00B068 */
u64 ce_ure_page_map; /* 0x00B070 */
u64 ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */
u64 ce_ure_pipe_sel1; /* 0x00B088 */
u64 ce_ure_pipe_mask1; /* 0x00B090 */
u64 ce_ure_pipe_sel2; /* 0x00B098 */
u64 ce_ure_pipe_mask2; /* 0x00B0A0 */
u64 ce_ure_pcie1_credits_sent; /* 0x00B0A8 */
u64 ce_ure_pcie1_credits_used; /* 0x00B0B0 */
u64 ce_ure_pcie1_credit_limit; /* 0x00B0B8 */
u64 ce_ure_pcie2_credits_sent; /* 0x00B0C0 */
u64 ce_ure_pcie2_credits_used; /* 0x00B0C8 */
u64 ce_ure_pcie2_credit_limit; /* 0x00B0D0 */
u64 ce_ure_pcie_force_credit; /* 0x00B0D8 */
u64 ce_ure_rd_tnum_val; /* 0x00B0E0 */
u64 ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */
u64 ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */
u64 ce_ure_rd_tnum_error; /* 0x00B0F8 */
u64 ce_ure_rd_tnum_first_cl; /* 0x00B100 */
u64 ce_ure_rd_tnum_link_buf; /* 0x00B108 */
u64 ce_ure_wr_tnum_val; /* 0x00B110 */
u64 ce_ure_sram_err_addr0; /* 0x00B118 */
u64 ce_ure_sram_err_addr1; /* 0x00B120 */
u64 ce_ure_sram_err_addr2; /* 0x00B128 */
u64 ce_ure_sram_rd_addr0; /* 0x00B130 */
u64 ce_ure_sram_rd_addr1; /* 0x00B138 */
u64 ce_ure_sram_rd_addr2; /* 0x00B140 */
u64 ce_ure_sram_wr_addr0; /* 0x00B148 */
u64 ce_ure_sram_wr_addr1; /* 0x00B150 */
u64 ce_ure_sram_wr_addr2; /* 0x00B158 */
u64 ce_ure_buf_flush10; /* 0x00B160 */
u64 ce_ure_buf_flush11; /* 0x00B168 */
u64 ce_ure_buf_flush12; /* 0x00B170 */
u64 ce_ure_buf_flush13; /* 0x00B178 */
u64 ce_ure_buf_flush20; /* 0x00B180 */
u64 ce_ure_buf_flush21; /* 0x00B188 */
u64 ce_ure_buf_flush22; /* 0x00B190 */
u64 ce_ure_buf_flush23; /* 0x00B198 */
u64 ce_ure_pcie_control1; /* 0x00B1A0 */
u64 ce_ure_pcie_control2; /* 0x00B1A8 */
u64 ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */
/* Upstream Data Buffer, Port1 */
struct ce_ure_maint_ups_dat1_data {
u64 data63_0[512]; /* 0x00C000 -- 0x00CFF8 */
u64 data127_64[512]; /* 0x00D000 -- 0x00DFF8 */
u64 parity[512]; /* 0x00E000 -- 0x00EFF8 */
} ce_ure_maint_ups_dat1;
/* Upstream Header Buffer, Port1 */
struct ce_ure_maint_ups_hdr1_data {
u64 data63_0[512]; /* 0x00F000 -- 0x00FFF8 */
u64 data127_64[512]; /* 0x010000 -- 0x010FF8 */
u64 parity[512]; /* 0x011000 -- 0x011FF8 */
} ce_ure_maint_ups_hdr1;
/* Upstream Data Buffer, Port2 */
struct ce_ure_maint_ups_dat2_data {
u64 data63_0[512]; /* 0x012000 -- 0x012FF8 */
u64 data127_64[512]; /* 0x013000 -- 0x013FF8 */
u64 parity[512]; /* 0x014000 -- 0x014FF8 */
} ce_ure_maint_ups_dat2;
/* Upstream Header Buffer, Port2 */
struct ce_ure_maint_ups_hdr2_data {
u64 data63_0[512]; /* 0x015000 -- 0x015FF8 */
u64 data127_64[512]; /* 0x016000 -- 0x016FF8 */
u64 parity[512]; /* 0x017000 -- 0x017FF8 */
} ce_ure_maint_ups_hdr2;
/* Downstream Data Buffer */
struct ce_ure_maint_dns_dat_data {
u64 data63_0[512]; /* 0x018000 -- 0x018FF8 */
u64 data127_64[512]; /* 0x019000 -- 0x019FF8 */
u64 parity[512]; /* 0x01A000 -- 0x01AFF8 */
} ce_ure_maint_dns_dat;
/* Downstream Header Buffer */
struct ce_ure_maint_dns_hdr_data {
u64 data31_0[64]; /* 0x01B000 -- 0x01B1F8 */
u64 data95_32[64]; /* 0x01B200 -- 0x01B3F8 */
u64 parity[64]; /* 0x01B400 -- 0x01B5F8 */
} ce_ure_maint_dns_hdr;
/* RCI Buffer Data */
struct ce_ure_maint_rci_data {
u64 data41_0[64]; /* 0x01B600 -- 0x01B7F8 */
u64 data69_42[64]; /* 0x01B800 -- 0x01B9F8 */
} ce_ure_maint_rci;
/* Response Queue */
u64 ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */
u64 ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */
/* Admin Build-a-Packet Buffer */
struct ce_adm_maint_bap_buf_data {
u64 data63_0[258]; /* 0x024000 -- 0x024808 */
u64 data127_64[258]; /* 0x024810 -- 0x025018 */
u64 parity[258]; /* 0x025020 -- 0x025828 */
} ce_adm_maint_bap_buf;
u64 ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */
/* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */
u64 ce_ure_ate40[TIOCE_NUM_M40_ATES];
/* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */
u64 ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
u64 ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */
/*
* DRE: Down Stream Request Engine
*/
u64 ce_dre_dyn_credit_status1; /* 0x040010 */
u64 ce_dre_dyn_credit_status2; /* 0x040018 */
u64 ce_dre_last_credit_status1; /* 0x040020 */
u64 ce_dre_last_credit_status2; /* 0x040028 */
u64 ce_dre_credit_limit1; /* 0x040030 */
u64 ce_dre_credit_limit2; /* 0x040038 */
u64 ce_dre_force_credit1; /* 0x040040 */
u64 ce_dre_force_credit2; /* 0x040048 */
u64 ce_dre_debug_mux1; /* 0x040050 */
u64 ce_dre_debug_mux2; /* 0x040058 */
u64 ce_dre_ssp_err_cmd_wrd; /* 0x040060 */
u64 ce_dre_ssp_err_addr; /* 0x040068 */
u64 ce_dre_comp_err_cmd_wrd; /* 0x040070 */
u64 ce_dre_comp_err_addr; /* 0x040078 */
u64 ce_dre_req_status; /* 0x040080 */
u64 ce_dre_config1; /* 0x040088 */
u64 ce_dre_config2; /* 0x040090 */
u64 ce_dre_config_req_status; /* 0x040098 */
u64 ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */
u64 ce_dre_dyn_fifo; /* 0x040100 */
u64 ce_pad_040108[3]; /* 0x040108 -- 0x040118 */
u64 ce_dre_last_fifo; /* 0x040120 */
u64 ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */
/* DRE Downstream Head Queue */
struct ce_dre_maint_ds_head_queue {
u64 data63_0[32]; /* 0x040200 -- 0x0402F8 */
u64 data127_64[32]; /* 0x040300 -- 0x0403F8 */
u64 parity[32]; /* 0x040400 -- 0x0404F8 */
} ce_dre_maint_ds_head_q;
u64 ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */
/* DRE Downstream Data Queue */
struct ce_dre_maint_ds_data_queue {
u64 data63_0[256]; /* 0x041000 -- 0x0417F8 */
u64 ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
u64 data127_64[256]; /* 0x042000 -- 0x0427F8 */
u64 ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
u64 parity[256]; /* 0x043000 -- 0x0437F8 */
u64 ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
} ce_dre_maint_ds_data_q;
/* DRE URE Upstream Response Queue */
struct ce_dre_maint_ure_us_rsp_queue {
u64 data63_0[8]; /* 0x044000 -- 0x044038 */
u64 ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */
u64 data127_64[8]; /* 0x044100 -- 0x044138 */
u64 ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */
u64 parity[8]; /* 0x044200 -- 0x044238 */
u64 ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */
} ce_dre_maint_ure_us_rsp_q;
u64 ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
u64 ce_end_of_struct; /* 0x044400 */
} tioce_t;
/* ce_lsiX_gb_cfg1 register bit masks & shifts */
#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0
#define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0)
#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8
#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8)
#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12
#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12)
#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15
#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15)
#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16
#define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16)
#define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18
#define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18)
#define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19
#define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19)
#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20
#define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20)
#define CE_LSI_GB_CFG1_SLF_TS_SHFT 24
#define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24)
/* ce_adm_int_mask/ce_adm_int_status register bit defines */
#define CE_ADM_INT_CE_ERROR_SHFT 0
#define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1
#define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2
#define CE_ADM_INT_PCIE_ERROR_SHFT 3
#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4
#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5
#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6
#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7
#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8
#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9
#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10
#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11
#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12
#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13
#define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/
#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14
#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15
#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16
#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17
#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22
#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23
/* ce_adm_force_int register bit defines */
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7
#define CE_ADM_FORCE_INT_ALWAYS_SHFT 8
/* ce_adm_int_dest register bit masks & shifts */
#define INTR_VECTOR_SHFT 56
/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
#define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0)
#define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1)
#define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2)
#define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3)
#define CE_ADM_ERR_SSP_SBE (0x1ULL << 4)
#define CE_ADM_ERR_SSP_MBE (0x1ULL << 5)
#define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6)
#define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7)
#define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8)
#define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9)
#define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10)
#define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11)
#define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12)
#define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13)
#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14)
#define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15)
#define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16)
#define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17)
#define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18)
#define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19)
#define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20)
#define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21)
#define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22)
#define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23)
#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24)
#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25)
#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26)
#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27)
#define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28)
#define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29)
#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30)
#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31)
#define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32)
#define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33)
#define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34)
#define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35)
#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36)
#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37)
#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38)
#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39)
#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40)
#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41)
#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42)
#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43)
#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44)
#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45)
#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46)
#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47)
#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48)
#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49)
#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50)
#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51)
#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52)
#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53)
#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54)
#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55)
#define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56)
#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57)
#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58)
#define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59)
#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60)
#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61)
/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
#define FLUSH_SEL_PORT1_PIPE0_SHFT 0
#define FLUSH_SEL_PORT1_PIPE1_SHFT 4
#define FLUSH_SEL_PORT1_PIPE2_SHFT 8
#define FLUSH_SEL_PORT1_PIPE3_SHFT 12
#define FLUSH_SEL_PORT2_PIPE0_SHFT 16
#define FLUSH_SEL_PORT2_PIPE1_SHFT 20
#define FLUSH_SEL_PORT2_PIPE2_SHFT 24
#define FLUSH_SEL_PORT2_PIPE3_SHFT 28
/* ce_dre_config1 register bit masks and shifts */
#define CE_DRE_RO_ENABLE (0x1ULL << 0)
#define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1)
#define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2)
#define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3)
#define CE_DRE_ADDR_MODE_SHFT 4
/* ce_dre_config_req_status register bit masks */
#define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0)
#define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3)
#define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4)
#define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5)
/* ce_ure_control register bit masks & shifts */
#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0)
#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4)
#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5)
#define CE_URE_WRT_MRG_TIMER_SHFT 12
#define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT)
#define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \
CE_URE_WRT_MRG_TIMER_SHFT) & \
CE_URE_WRT_MRG_TIMER_MASK)
#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24)
#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32)
#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33)
#define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34)
#define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35)
#define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36)
#define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37)
#define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38)
#define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39)
#define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40)
#define CE_URE_MALFORM_DISABLE (0x1ULL << 44)
#define CE_URE_UNSUP_DISABLE (0x1ULL << 45)
/* ce_ure_page_map register bit masks & shifts */
#define CE_URE_ATE3240_ENABLE (0x1ULL << 0)
#define CE_URE_ATE40_ENABLE (0x1ULL << 1)
#define CE_URE_PAGESIZE_SHFT 4
#define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT)
/* ce_ure_pipe_sel register bit masks & shifts */
#define PKT_TRAFIC_SHRT 16
#define BUS_SRC_ID_SHFT 8
#define DEV_SRC_ID_SHFT 3
#define FNC_SRC_ID_SHFT 0
#define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT)
#define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT)
#define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT)
#define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT)
#define CE_URE_PIPE_BUS(b) (((u64)(b) << BUS_SRC_ID_SHFT) & \
CE_URE_BUS_MASK)
#define CE_URE_PIPE_DEV(d) (((u64)(d) << DEV_SRC_ID_SHFT) & \
CE_URE_DEV_MASK)
#define CE_URE_PIPE_FNC(f) (((u64)(f) << FNC_SRC_ID_SHFT) & \
CE_URE_FNC_MASK)
#define CE_URE_SEL1_SHFT 0
#define CE_URE_SEL2_SHFT 20
#define CE_URE_SEL3_SHFT 40
#define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT)
#define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT)
#define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT)
/* ce_ure_pipe_mask register bit masks & shifts */
#define CE_URE_MASK1_SHFT 0
#define CE_URE_MASK2_SHFT 20
#define CE_URE_MASK3_SHFT 40
#define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT)
#define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT)
#define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT)
/* ce_ure_pcie_control1 register bit masks & shifts */
#define CE_URE_SI (0x1ULL << 0)
#define CE_URE_ELAL_SHFT 4
#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT)
#define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \
CE_URE_ELAL_MASK)
#define CE_URE_ELAL1_SHFT 8
#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT)
#define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \
CE_URE_ELAL1_MASK)
#define CE_URE_SCC (0x1ULL << 12)
#define CE_URE_PN1_SHFT 16
#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT)
#define CE_URE_PN2_SHFT 24
#define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT)
#define CE_URE_PN1_SET(n) (((u64)(n) << CE_URE_PN1_SHFT) & \
CE_URE_PN1_MASK)
#define CE_URE_PN2_SET(n) (((u64)(n) << CE_URE_PN2_SHFT) & \
CE_URE_PN2_MASK)
/* ce_ure_pcie_control2 register bit masks & shifts */
#define CE_URE_ABP (0x1ULL << 0)
#define CE_URE_PCP (0x1ULL << 1)
#define CE_URE_MSP (0x1ULL << 2)
#define CE_URE_AIP (0x1ULL << 3)
#define CE_URE_PIP (0x1ULL << 4)
#define CE_URE_HPS (0x1ULL << 5)
#define CE_URE_HPC (0x1ULL << 6)
#define CE_URE_SPLV_SHFT 7
#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT)
#define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \
CE_URE_SPLV_MASK)
#define CE_URE_SPLS_SHFT 15
#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT)
#define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \
CE_URE_SPLS_MASK)
#define CE_URE_PSN1_SHFT 19
#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT)
#define CE_URE_PSN2_SHFT 32
#define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT)
#define CE_URE_PSN1_SET(n) (((u64)(n) << CE_URE_PSN1_SHFT) & \
CE_URE_PSN1_MASK)
#define CE_URE_PSN2_SET(n) (((u64)(n) << CE_URE_PSN2_SHFT) & \
CE_URE_PSN2_MASK)
/*
* PIO address space ranges for CE
*/
/* Local CE Registers Space */
#define CE_PIO_MMR 0x00000000
#define CE_PIO_MMR_LEN 0x04000000
/* PCI Compatible Config Space */
#define CE_PIO_CONFIG_SPACE 0x04000000
#define CE_PIO_CONFIG_SPACE_LEN 0x04000000
/* PCI I/O Space Alias */
#define CE_PIO_IO_SPACE_ALIAS 0x08000000
#define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000
/* PCI Enhanced Config Space */
#define CE_PIO_E_CONFIG_SPACE 0x10000000
#define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000
/* PCI I/O Space */
#define CE_PIO_IO_SPACE 0x100000000
#define CE_PIO_IO_SPACE_LEN 0x100000000
/* PCI MEM Space */
#define CE_PIO_MEM_SPACE 0x200000000
#define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE
/*
* CE PCI Enhanced Config Space shifts & masks
*/
#define CE_E_CONFIG_BUS_SHFT 20
#define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT)
#define CE_E_CONFIG_DEVICE_SHFT 15
#define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT)
#define CE_E_CONFIG_FUNC_SHFT 12
#define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT)
#endif /* __ASM_IA64_SN_TIOCE_H__ */

View File

@ -1,63 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_CE_PROVIDER_H
#define _ASM_IA64_SN_CE_PROVIDER_H
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioce.h>
/*
* Common TIOCE structure shared between the prom and kernel
*
* DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
* PROM VERSION.
*/
struct tioce_common {
struct pcibus_bussoft ce_pcibus; /* common pciio header */
u32 ce_rev;
u64 ce_kernel_private;
u64 ce_prom_private;
};
struct tioce_kernel {
struct tioce_common *ce_common;
spinlock_t ce_lock;
struct list_head ce_dmamap_list;
u64 ce_ate40_shadow[TIOCE_NUM_M40_ATES];
u64 ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
u32 ce_ate3240_pagesize;
u8 ce_port1_secondary;
/* per-port resources */
struct {
int dirmap_refcnt;
u64 dirmap_shadow;
} ce_port[TIOCE_NUM_PORTS];
};
struct tioce_dmamap {
struct list_head ce_dmamap_list; /* headed by tioce_kernel */
u32 refcnt;
u64 nbytes; /* # bytes mapped */
u64 ct_start; /* coretalk start address */
u64 pci_start; /* bus start address */
u64 __iomem *ate_hw;/* hw ptr of first ate in map */
u64 *ate_shadow; /* shadow ptr of firat ate */
u16 ate_count; /* # ate's in the map */
};
extern int tioce_init_provider(void);
#endif /* __ASM_IA64_SN_CE_PROVIDER_H */

View File

@ -1,257 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
/*****************************************************************************
*********************** TIOCP MMR structure mapping ***************************
*****************************************************************************/
struct tiocp{
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
u64 cp_id; /* 0x000000 */
u64 cp_stat; /* 0x000008 */
u64 cp_err_upper; /* 0x000010 */
u64 cp_err_lower; /* 0x000018 */
#define cp_err cp_err_lower
u64 cp_control; /* 0x000020 */
u64 cp_req_timeout; /* 0x000028 */
u64 cp_intr_upper; /* 0x000030 */
u64 cp_intr_lower; /* 0x000038 */
#define cp_intr cp_intr_lower
u64 cp_err_cmdword; /* 0x000040 */
u64 _pad_000048; /* 0x000048 */
u64 cp_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Configuration */
u64 cp_aux_err; /* 0x000058 */
u64 cp_resp_upper; /* 0x000060 */
u64 cp_resp_lower; /* 0x000068 */
#define cp_resp cp_resp_lower
u64 cp_tst_pin_ctrl; /* 0x000070 */
u64 cp_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
u64 cp_dir_map; /* 0x000080 */
u64 _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
u64 cp_map_fault; /* 0x000090 */
u64 _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
u64 cp_arb; /* 0x0000A0 */
u64 _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
u64 cp_ate_parity_err; /* 0x0000B0 */
u64 _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
u64 cp_bus_timeout; /* 0x0000C0 */
u64 cp_pci_cfg; /* 0x0000C8 */
u64 cp_pci_err_upper; /* 0x0000D0 */
u64 cp_pci_err_lower; /* 0x0000D8 */
#define cp_pci_err cp_pci_err_lower
u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
u64 cp_int_status; /* 0x000100 */
u64 cp_int_enable; /* 0x000108 */
u64 cp_int_rst_stat; /* 0x000110 */
u64 cp_int_mode; /* 0x000118 */
u64 cp_int_device; /* 0x000120 */
u64 cp_int_host_err; /* 0x000128 */
u64 cp_int_addr[8]; /* 0x0001{30,,,68} */
u64 cp_err_int_view; /* 0x000170 */
u64 cp_mult_int; /* 0x000178 */
u64 cp_force_always[8]; /* 0x0001{80,,,B8} */
u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
u64 cp_device[4]; /* 0x0002{00,,,18} */
u64 _pad_000220[4]; /* 0x0002{20,,,38} */
u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
u64 _pad_000260[4]; /* 0x0002{60,,,78} */
u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */
#define cp_even_resp cp_rrb_map[0] /* 0x000280 */
#define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
u64 cp_resp_status; /* 0x000290 */
u64 cp_resp_clear; /* 0x000298 */
u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
u64 upper; /* 0x0003{00,,,F0} */
u64 lower; /* 0x0003{08,,,F8} */
} cp_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
u64 flush_w_touch; /* 0x000{400,,,5C0} */
u64 flush_wo_touch; /* 0x000{408,,,5C8} */
u64 inflight; /* 0x000{410,,,5D0} */
u64 prefetch; /* 0x000{418,,,5D8} */
u64 total_pci_retry; /* 0x000{420,,,5E0} */
u64 max_pci_retry; /* 0x000{428,,,5E8} */
u64 max_latency; /* 0x000{430,,,5F0} */
u64 clear_all; /* 0x000{438,,,5F8} */
} cp_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
u64 cp_pcix_bus_err_addr; /* 0x000600 */
u64 cp_pcix_bus_err_attr; /* 0x000608 */
u64 cp_pcix_bus_err_data; /* 0x000610 */
u64 cp_pcix_pio_split_addr; /* 0x000618 */
u64 cp_pcix_pio_split_attr; /* 0x000620 */
u64 cp_pcix_dma_req_err_attr; /* 0x000628 */
u64 cp_pcix_dma_req_err_addr; /* 0x000630 */
u64 cp_pcix_timeout; /* 0x000638 */
u64 _pad_000640[24]; /* 0x000{640,,,6F8} */
/* 0x000700-0x000737 -- Debug Registers */
u64 cp_ct_debug_ctl; /* 0x000700 */
u64 cp_br_debug_ctl; /* 0x000708 */
u64 cp_mux3_debug_ctl; /* 0x000710 */
u64 cp_mux4_debug_ctl; /* 0x000718 */
u64 cp_mux5_debug_ctl; /* 0x000720 */
u64 cp_mux6_debug_ctl; /* 0x000728 */
u64 cp_mux7_debug_ctl; /* 0x000730 */
u64 _pad_000738[89]; /* 0x000{738,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
u64 cp_buf_addr; /* 0x000{A00,,,AF0} */
u64 cp_buf_attr; /* 0X000{A08,,,AF8} */
} cp_pcix_read_buf_64[16];
struct {
u64 cp_buf_addr; /* 0x000{B00,,,BE0} */
u64 cp_buf_attr; /* 0x000{B08,,,BE8} */
u64 cp_buf_valid; /* 0x000{B10,,,BF0} */
u64 __pad1; /* 0x000{B18,,,BF8} */
} cp_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
char _pad_012000[0x14000 - 0x012000];
/* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
char _pad_016000[0x18000 - 0x016000];
/* 0x18000-0x197F8 -- TIOCP Write Request Ram */
u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x1C000 - 0x019800];
/* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
char _pad_01F000[0x20000 - 0x01F000];
/* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
char _pad_020000[0x021000 - 0x20000];
/* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
union {
u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} cp_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} cp_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x040007 -- PCIX Special Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} cp_pcix_cycle; /* 0x040000-0x040007 */
char _pad_040007[0x200000-0x040008];
/* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
union {
u8 c[0x100000 / 1];
u16 s[0x100000 / 2];
u32 l[0x100000 / 4];
u64 d[0x100000 / 8];
} cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
#define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
char _pad_800000[0xA00000-0x800000];
/* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
union {
u8 c[0x100000 / 1];
u16 s[0x100000 / 2];
u32 l[0x100000 / 4];
u64 d[0x100000 / 8];
} cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
#define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
};
#endif /* _ASM_IA64_SN_PCI_TIOCP_H */

View File

@ -1,72 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_TIOCX_H
#define _ASM_IA64_SN_TIO_TIOCX_H
#ifdef __KERNEL__
struct cx_id_s {
unsigned int part_num;
unsigned int mfg_num;
int nasid;
};
struct cx_dev {
struct cx_id_s cx_id;
int bt; /* board/blade type */
void *soft; /* driver specific */
struct hubdev_info *hubdev;
struct device dev;
struct cx_drv *driver;
};
struct cx_device_id {
unsigned int part_num;
unsigned int mfg_num;
};
struct cx_drv {
char *name;
const struct cx_device_id *id_table;
struct device_driver driver;
int (*probe) (struct cx_dev * dev, const struct cx_device_id * id);
int (*remove) (struct cx_dev * dev);
};
/* create DMA address by stripping AS bits */
#define TIOCX_DMA_ADDR(a) (u64)((u64)(a) & 0xffffcfffffffffUL)
#define TIOCX_TO_TIOCX_DMA_ADDR(a) (u64)(((u64)(a) & 0xfffffffff) | \
((((u64)(a)) & 0xffffc000000000UL) <<2))
#define TIO_CE_ASIC_PARTNUM 0xce00
#define TIOCX_CORELET 3
/* These are taken from tio_mmr_as.h */
#define TIO_ICE_FRZ_CFG TIO_MMR_ADDR_MOD(0x00000000b0008100UL)
#define TIO_ICE_PMI_TX_CFG TIO_MMR_ADDR_MOD(0x00000000b000b100UL)
#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL)
#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL
#define to_cx_dev(n) container_of(n, struct cx_dev, dev)
#define to_cx_driver(drv) container_of(drv, struct cx_drv, driver)
extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int);
extern void tiocx_irq_free(struct sn_irq_info *);
extern int cx_device_unregister(struct cx_dev *);
extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int);
extern int cx_driver_unregister(struct cx_drv *);
extern int cx_driver_register(struct cx_drv *);
extern u64 tiocx_dma_addr(u64 addr);
extern u64 tiocx_swin_base(int nasid);
extern void tiocx_mmr_store(int nasid, u64 offset, u64 value);
extern u64 tiocx_mmr_load(int nasid, u64 offset);
#endif // __KERNEL__
#endif // _ASM_IA64_SN_TIO_TIOCX__

View File

@ -1,26 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_TYPES_H
#define _ASM_IA64_SN_TYPES_H
#include <linux/types.h>
typedef unsigned long cpuid_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
typedef signed char partid_t; /* partition ID type */
typedef unsigned int moduleid_t; /* user-visible module number type */
typedef unsigned int cmoduleid_t; /* kernel compact module id type */
typedef unsigned char slotid_t; /* slot (blade) within module */
typedef unsigned char slabid_t; /* slab (asic) within slot */
typedef u64 nic_t;
typedef unsigned long iopaddr_t;
typedef unsigned long paddr_t;
typedef short cnodeid_t;
#endif /* _ASM_IA64_SN_TYPES_H */

View File

@ -69,7 +69,6 @@ extern void ia64_load_extra (struct task_struct *task);
if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
(task_cpu(current) != \
task_thread_info(current)->last_cpu))) { \
platform_migrate(current); \
task_thread_info(current)->last_cpu = task_cpu(current); \
} \
} while (0)

View File

@ -45,7 +45,6 @@
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>
#include <asm-generic/tlb.h>

View File

@ -2,12 +2,29 @@
#ifndef _ASM_IA64_UV_UV_H
#define _ASM_IA64_UV_UV_H
#include <asm/sn/simulator.h>
#ifdef CONFIG_IA64_SGI_UV
extern bool ia64_is_uv;
static inline int is_uv_system(void)
{
/* temporary support for running on hardware simulator */
return IS_MEDUSA() || ia64_platform_is("uv");
return ia64_is_uv;
}
void __init uv_probe_system_type(void);
void __init uv_setup(char **cmdline_p);
#else /* CONFIG_IA64_SGI_UV */
static inline int is_uv_system(void)
{
return false;
}
static inline void __init uv_probe_system_type(void)
{
}
static inline void __init uv_setup(char **cmdline_p)
{
}
#endif /* CONFIG_IA64_SGI_UV */
#endif /* _ASM_IA64_UV_UV_H */

View File

@ -10,15 +10,14 @@ endif
extra-y := head.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
irq_lsapic.o ivt.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o
unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \
acpi-ext.o
obj-$(CONFIG_ACPI) += acpi.o acpi-ext.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_NUMA) += numa.o
@ -31,7 +30,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_PCI_MSI) += msi_ia64.o
obj-y += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

Some files were not shown because too many files have changed in this diff Show More