alistair23-linux/arch/arm/mach-shmobile/pm-rcar.c
Linus Torvalds 12f03ee606 libnvdimm for 4.3:
1/ Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
    mechanism for adding device-driver-discovered memory regions to the
    kernel's direct map.  This facility is used by the pmem driver to
    enable pfn_to_page() operations on the page frames returned by DAX
    ('direct_access' in 'struct block_device_operations'). For now, the
    'memmap' allocation for these "device" pages comes from "System
    RAM".  Support for allocating the memmap from device memory will
    arrive in a later kernel.
 
 2/ Introduce memremap() to replace usages of ioremap_cache() and
    ioremap_wt().  memremap() drops the __iomem annotation for these
    mappings to memory that do not have i/o side effects.  The
    replacement of ioremap_cache() with memremap() is limited to the
    pmem driver to ease merging the api change in v4.3.  Completion of
    the conversion is targeted for v4.4.
 
 3/ Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
    driver, update the VFS DAX implementation and PMEM api to provide
    persistence guarantees for kernel operations on a DAX mapping.
 
 4/ Convert the ACPI NFIT 'BLK' driver to map the block apertures as
    cacheable to improve performance.
 
 5/ Miscellaneous updates and fixes to libnvdimm including support
    for issuing "address range scrub" commands, clarifying the optimal
    'sector size' of pmem devices, a clarification of the usage of the
    ACPI '_STA' (status) property for DIMM devices, and other minor
    fixes.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJV6Nx7AAoJEB7SkWpmfYgCWyYQAI5ju6Gvw27RNFtPovHcZUf5
 JGnxXejI6/AqeTQ+IulgprxtEUCrXOHjCDA5dkjr1qvsoqK1qxug+vJHOZLgeW0R
 OwDtmdW4Qrgeqm+CPoxETkorJ8wDOc8mol81kTiMgeV3UqbYeeHIiTAmwe7VzZ0C
 nNdCRDm5g8dHCjTKcvK3rvozgyoNoWeBiHkPe76EbnxDICxCB5dak7XsVKNMIVFQ
 NuYlnw6IYN7+rMHgpgpRux38NtIW8VlYPWTmHExejc2mlioWMNBG/bmtwLyJ6M3e
 zliz4/cnonTMUaizZaVozyinTa65m7wcnpjK+vlyGV2deDZPJpDRvSOtB0lH30bR
 1gy+qrKzuGKpaN6thOISxFLLjmEeYwzYd7SvC9n118r32qShz+opN9XX0WmWSFlA
 sajE1ehm4M7s5pkMoa/dRnAyR8RUPu4RNINdQ/Z9jFfAOx+Q26rLdQXwf9+uqbEb
 bIeSQwOteK5vYYCstvpAcHSMlJAglzIX5UfZBvtEIJN7rlb0VhmGWfxAnTu+ktG1
 o9cqAt+J4146xHaFwj5duTsyKhWb8BL9+xqbKPNpXEp+PbLsrnE/+WkDLFD67jxz
 dgIoK60mGnVXp+16I2uMqYYDgAyO5zUdmM4OygOMnZNa1mxesjbDJC6Wat1Wsndn
 slsw6DkrWT60CRE42nbK
 =o57/
 -----END PGP SIGNATURE-----

Merge tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dan Williams:
 "This update has successfully completed a 0day-kbuild run and has
  appeared in a linux-next release.  The changes outside of the typical
  drivers/nvdimm/ and drivers/acpi/nfit.[ch] paths are related to the
  removal of IORESOURCE_CACHEABLE, the introduction of memremap(), and
  the introduction of ZONE_DEVICE + devm_memremap_pages().

  Summary:

   - Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
     mechanism for adding device-driver-discovered memory regions to the
     kernel's direct map.

     This facility is used by the pmem driver to enable pfn_to_page()
     operations on the page frames returned by DAX ('direct_access' in
     'struct block_device_operations').

     For now, the 'memmap' allocation for these "device" pages comes
     from "System RAM".  Support for allocating the memmap from device
     memory will arrive in a later kernel.

   - Introduce memremap() to replace usages of ioremap_cache() and
     ioremap_wt().  memremap() drops the __iomem annotation for these
     mappings to memory that do not have i/o side effects.  The
     replacement of ioremap_cache() with memremap() is limited to the
     pmem driver to ease merging the api change in v4.3.

     Completion of the conversion is targeted for v4.4.

   - Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
     driver, update the VFS DAX implementation and PMEM api to provide
     persistence guarantees for kernel operations on a DAX mapping.

   - Convert the ACPI NFIT 'BLK' driver to map the block apertures as
     cacheable to improve performance.

   - Miscellaneous updates and fixes to libnvdimm including support for
     issuing "address range scrub" commands, clarifying the optimal
     'sector size' of pmem devices, a clarification of the usage of the
     ACPI '_STA' (status) property for DIMM devices, and other minor
     fixes"

* tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (34 commits)
  libnvdimm, pmem: direct map legacy pmem by default
  libnvdimm, pmem: 'struct page' for pmem
  libnvdimm, pfn: 'struct page' provider infrastructure
  x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB
  add devm_memremap_pages
  mm: ZONE_DEVICE for "device memory"
  mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h
  dax: drop size parameter to ->direct_access()
  nd_blk: change aperture mapping from WC to WB
  nvdimm: change to use generic kvfree()
  pmem, dax: have direct_access use __pmem annotation
  dax: update I/O path to do proper PMEM flushing
  pmem: add copy_from_iter_pmem() and clear_pmem()
  pmem, x86: clean up conditional pmem includes
  pmem: remove layer when calling arch_has_wmb_pmem()
  pmem, x86: move x86 PMEM API to new pmem.h header
  libnvdimm, e820: make CONFIG_X86_PMEM_LEGACY a tristate option
  pmem: switch to devm_ allocations
  devres: add devm_memremap
  libnvdimm, btt: write and validate parent_uuid
  ...
2015-09-08 14:35:59 -07:00

165 lines
4.1 KiB
C

/*
* R-Car SYSC Power management support
*
* Copyright (C) 2014 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include "pm-rcar.h"
/* SYSC Common */
#define SYSCSR 0x00 /* SYSC Status Register */
#define SYSCISR 0x04 /* Interrupt Status Register */
#define SYSCISCR 0x08 /* Interrupt Status Clear Register */
#define SYSCIER 0x0c /* Interrupt Enable Register */
#define SYSCIMR 0x10 /* Interrupt Mask Register */
/* SYSC Status Register */
#define SYSCSR_PONENB 1 /* Ready for power resume requests */
#define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */
/*
* Power Control Register Offsets inside the register block for each domain
* Note: The "CR" registers for ARM cores exist on H1 only
* Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
*/
#define PWRSR_OFFS 0x00 /* Power Status Register */
#define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */
#define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */
#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */
#define PWRONSR_OFFS 0x10 /* Power Resume Status Register */
#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
#define SYSCSR_RETRIES 100
#define SYSCSR_DELAY_US 1
#define PWRER_RETRIES 100
#define PWRER_DELAY_US 1
#define SYSCISR_RETRIES 1000
#define SYSCISR_DELAY_US 1
static void __iomem *rcar_sysc_base;
static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int sr_bit, reg_offs;
int k;
if (on) {
sr_bit = SYSCSR_PONENB;
reg_offs = PWRONCR_OFFS;
} else {
sr_bit = SYSCSR_POFFENB;
reg_offs = PWROFFCR_OFFS;
}
/* Wait until SYSC is ready to accept a power request */
for (k = 0; k < SYSCSR_RETRIES; k++) {
if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
break;
udelay(SYSCSR_DELAY_US);
}
if (k == SYSCSR_RETRIES)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(BIT(sysc_ch->chan_bit),
rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
return 0;
}
static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int isr_mask = BIT(sysc_ch->isr_bit);
unsigned int chan_mask = BIT(sysc_ch->chan_bit);
unsigned int status;
unsigned long flags;
int ret = 0;
int k;
spin_lock_irqsave(&rcar_sysc_lock, flags);
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PWRER_RETRIES; k++) {
ret = rcar_sysc_pwr_on_off(sysc_ch, on);
if (ret)
goto out;
status = ioread32(rcar_sysc_base +
sysc_ch->chan_offs + PWRER_OFFS);
if (!(status & chan_mask))
break;
udelay(PWRER_DELAY_US);
}
if (k == PWRER_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
for (k = 0; k < SYSCISR_RETRIES; k++) {
if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
break;
udelay(SYSCISR_DELAY_US);
}
if (k == SYSCISR_RETRIES)
ret = -EIO;
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
out:
spin_unlock_irqrestore(&rcar_sysc_lock, flags);
pr_debug("sysc power domain %d: %08x -> %d\n",
sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
return ret;
}
int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
{
return rcar_sysc_power(sysc_ch, false);
}
int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
{
return rcar_sysc_power(sysc_ch, true);
}
bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
{
unsigned int st;
st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
if (st & BIT(sysc_ch->chan_bit))
return true;
return false;
}
void __iomem *rcar_sysc_init(phys_addr_t base)
{
rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
if (!rcar_sysc_base)
panic("unable to ioremap R-Car SYSC hardware block\n");
return rcar_sysc_base;
}