alistair23-linux/include/acpi/platform/aclinuxex.h
Lv Zheng 174cc7187e ACPICA: Tables: Back port acpi_get_table_with_size() and early_acpi_os_unmap_memory() from Linux kernel
ACPICA commit cac6790954d4d752a083e6122220b8a22febcd07

This patch back ports Linux acpi_get_table_with_size() and
early_acpi_os_unmap_memory() into ACPICA upstream to reduce divergences.

The 2 APIs are used by Linux as table management APIs for long time, it
contains a hidden logic that during the early stage, the mapped tables
should be unmapped before the early stage ends.

During the early stage, tables are handled by the following sequence:
 acpi_get_table_with_size();
 parse the table
 early_acpi_os_unmap_memory();
During the late stage, tables are handled by the following sequence:
 acpi_get_table();
 parse the table
Linux uses acpi_gbl_permanent_mmap to distinguish the early stage and the
late stage.

The reasoning of introducing acpi_get_table_with_size() is: ACPICA will
remember the early mapped pointer in acpi_get_table() and Linux isn't able to
prevent ACPICA from using the wrong early mapped pointer during the late
stage as there is no API provided from ACPICA to be an inverse of
acpi_get_table() to forget the early mapped pointer.

But how ACPICA can work with the early/late stage requirement? Inside of
ACPICA, tables are ensured to be remained in "INSTALLED" state during the
early stage, and they are carefully not transitioned to "VALIDATED" state
until the late stage. So the same logic is in fact implemented inside of
ACPICA in a different way. The gap is only that the feature is not provided
to the OSPMs in an accessible external API style.

It then is possible to fix the gap by providing an inverse of
acpi_get_table() from ACPICA, so that the two Linux sequences can be
combined:
 acpi_get_table();
 parse the table
 acpi_put_table();
In order to work easier with the current Linux code, acpi_get_table() and
acpi_put_table() is implemented in a usage counting based style:
 1. When the usage count of the table is increased from 0 to 1, table is
    mapped and .Pointer is set with the mapping address (VALIDATED);
 2. When the usage count of the table is decreased from 1 to 0, .Pointer
    is unset and the mapping address is unmapped (INVALIDATED).
So that we can deploy the new APIs to Linux with minimal effort by just
invoking acpi_get_table() in acpi_get_table_with_size() and invoking
acpi_put_table() in early_acpi_os_unmap_memory(). Lv Zheng.

Link: https://github.com/acpica/acpica/commit/cac67909
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-12-21 02:36:38 +01:00

149 lines
4.4 KiB
C

/******************************************************************************
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef __ACLINUXEX_H__
#define __ACLINUXEX_H__
#ifdef __KERNEL__
#ifndef ACPI_USE_NATIVE_DIVIDE
#ifndef ACPI_DIV_64_BY_32
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
do { \
u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
(r32) = do_div ((__n), (d32)); \
(q32) = (u32) (__n); \
} while (0)
#endif
#ifndef ACPI_SHIFT_RIGHT_64
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
do { \
(n_lo) >>= 1; \
(n_lo) |= (((n_hi) & 1) << 31); \
(n_hi) >>= 1; \
} while (0)
#endif
#endif
/*
* Overrides for in-kernel ACPICA
*/
acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
acpi_status acpi_os_terminate(void);
/*
* The irqs_disabled() check is for resume from RAM.
* Interrupts are off during resume, just like they are for boot.
* However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not.
*/
static inline void *acpi_os_allocate(acpi_size size)
{
return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void *acpi_os_allocate_zeroed(acpi_size size)
{
return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void acpi_os_free(void *memory)
{
kfree(memory);
}
static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
{
return kmem_cache_zalloc(cache,
irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
return (acpi_thread_id) (unsigned long)current;
}
/*
* When lockdep is enabled, the spin_lock_init() macro stringifies it's
* argument and uses that as a name for the lock in debugging.
* By executing spin_lock_init() in a macro the key changes from "lock" for
* all locks to the name of the argument of acpi_os_create_lock(), which
* prevents lockdep from reporting false positives for ACPICA locks.
*/
#define acpi_os_create_lock(__handle) \
({ \
spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
if (lock) { \
*(__handle) = lock; \
spin_lock_init(*(__handle)); \
} \
lock ? AE_OK : AE_NO_MEMORY; \
})
static inline u8 acpi_os_readable(void *pointer, acpi_size length)
{
return TRUE;
}
static inline acpi_status acpi_os_initialize_command_signals(void)
{
return AE_OK;
}
static inline void acpi_os_terminate_command_signals(void)
{
return;
}
/*
* OSL interfaces added by Linux
*/
#endif /* __KERNEL__ */
#endif /* __ACLINUXEX_H__ */