Merge commit 'gcl/next' into next

This commit is contained in:
Benjamin Herrenschmidt 2009-12-09 17:10:22 +11:00
commit 8c82da5e24
17 changed files with 1609 additions and 320 deletions

View file

@ -103,7 +103,22 @@ fsl,mpc5200-gpt nodes
---------------------
On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board
design supports the internal wdt, then the device node for GPT0 should
include the empty property 'fsl,has-wdt'.
include the empty property 'fsl,has-wdt'. Note that this does not activate
the watchdog. The timer will function as a GPT if the timer api is used, and
it will function as watchdog if the watchdog device is used. The watchdog
mode has priority over the gpt mode, i.e. if the watchdog is activated, any
gpt api call to this timer will fail with -EBUSY.
If you add the property
fsl,wdt-on-boot = <n>;
GPT0 will be marked as in-use watchdog, i.e. blocking every gpt access to it.
If n>0, the watchdog is started with a timeout of n seconds. If n=0, the
configuration of the watchdog is not touched. This is useful in two cases:
- just mark GPT0 as watchdog, blocking gpt accesses, and configure it later;
- do not touch a configuration assigned by the boot loader which supervises
the boot process itself.
The watchdog will respect the CONFIG_WATCHDOG_NOWAYOUT option.
An mpc5200-gpt can be used as a single line GPIO controller. To do so,
add the following properties to the gpt node:

View file

@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
struct mpc52xx_gpt_priv;
extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous);
extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
/* mpc52xx_lpbfifo.c */
#define MPC52XX_LPBFIFO_FLAG_READ (0)
#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
struct mpc52xx_lpbfifo_request {
struct list_head list;
/* localplus bus address */
unsigned int cs;
size_t offset;
/* Memory address */
void *data;
phys_addr_t data_phys;
/* Details of transfer */
size_t size;
size_t pos; /* current position of transfer */
int flags;
/* What to do when finished */
void (*callback)(struct mpc52xx_lpbfifo_request *);
void *priv; /* Driver private data */
/* statistics */
int irq_count;
int irq_ticks;
u8 last_byte;
int buffer_not_done_cnt;
};
extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_poll(void);
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
extern unsigned int mpc52xx_get_irq(void);

View file

@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
dest++;
n--;
}
while(n > 4) {
while(n >= 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
eieio();
vsrc += 4;
@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
vdest++;
n--;
}
while(n > 4) {
while(n >= 4) {
*((volatile u32 *)vdest) = *((volatile u32 *)src);
src += 4;
vdest += 4;

View file

@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO
select GENERIC_GPIO
help
Enable gpiolib support for mpc5200 based boards
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
depends on PPC_MPC52xx
select PPC_BESTCOMM_GEN_BD

View file

@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
endif
obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o

View file

@ -16,8 +16,14 @@
* output signals or measure input signals.
*
* This driver supports the GPIO and IRQ controller functions of the GPT
* device. Timer functions are not yet supported, nor is the watchdog
* timer.
* device. Timer functions are not yet supported.
*
* The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used,
* this prevents the use of any gpt0 gpt function (i.e. they will fail with
* -EBUSY). Thus, the safety wdt function always has precedence over the gpt
* function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT,
* this means that gpt0 is locked in wdt mode until the next reboot - this
* may be a requirement in safety applications.
*
* To use the GPIO function, the following two properties must be added
* to the device tree node for the gpt device (typically in the .dts file
@ -46,17 +52,24 @@
* the output mode. This driver does not change the output mode setting.
*/
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
#include <asm/mpc52xx.h>
MODULE_DESCRIPTION("Freescale MPC52xx gpt driver");
MODULE_AUTHOR("Sascha Hauer, Grant Likely");
MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß");
MODULE_LICENSE("GPL");
/**
@ -66,18 +79,27 @@ MODULE_LICENSE("GPL");
* @lock: spinlock to coordinate between different functions.
* @of_gc: of_gpio_chip instance structure; used when GPIO is enabled
* @irqhost: Pointer to irq_host instance; used when IRQ mode is supported
* @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
* if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
* if the timer is actively used as wdt which blocks gpt functions
*/
struct mpc52xx_gpt_priv {
struct list_head list; /* List of all GPT devices */
struct device *dev;
struct mpc52xx_gpt __iomem *regs;
spinlock_t lock;
struct irq_host *irqhost;
u32 ipb_freq;
u8 wdt_mode;
#if defined(CONFIG_GPIOLIB)
struct of_gpio_chip of_gc;
#endif
};
LIST_HEAD(mpc52xx_gpt_list);
DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
#define MPC52xx_GPT_MODE_MS_MASK (0x07)
#define MPC52xx_GPT_MODE_MS_IC (0x01)
#define MPC52xx_GPT_MODE_MS_OC (0x02)
@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv {
#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20)
#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30)
#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000)
#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400)
#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200)
#define MPC52xx_GPT_MODE_IRQ_EN (0x0100)
#define MPC52xx_GPT_MODE_WDT_EN (0x8000)
#define MPC52xx_GPT_MODE_ICT_MASK (0x030000)
#define MPC52xx_GPT_MODE_ICT_RISING (0x010000)
#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000)
#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000)
#define MPC52xx_GPT_MODE_WDT_PING (0xa5)
#define MPC52xx_GPT_STATUS_IRQMASK (0x000f)
#define MPC52xx_GPT_CAN_WDT (1 << 0)
#define MPC52xx_GPT_IS_WDT (1 << 1)
/* ---------------------------------------------------------------------
* Cascaded interrupt controller hooks
*/
@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) {
if ((intsize < 1) || (intspec[0] > 3)) {
dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name);
return -EINVAL;
}
@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
{
int cascade_virq;
unsigned long flags;
/* Only setup cascaded IRQ if device tree claims the GPT is
* an interrupt controller */
if (!of_find_property(node, "interrupt-controller", NULL))
return;
u32 mode;
cascade_virq = irq_of_parse_and_map(node, 0);
if (!cascade_virq)
return;
gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1,
&mpc52xx_gpt_irq_ops, -1);
@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
}
gpt->irqhost->host_data = gpt;
set_irq_data(cascade_virq, gpt);
set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
/* Set to Input Capture mode */
/* If the GPT is currently disabled, then change it to be in Input
* Capture mode. If the mode is non-zero, then the pin could be
* already in use for something. */
spin_lock_irqsave(&gpt->lock, flags);
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
MPC52xx_GPT_MODE_MS_IC);
mode = in_be32(&gpt->regs->mode);
if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0)
out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC);
spin_unlock_irqrestore(&gpt->lock, flags);
dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq);
@ -335,6 +367,354 @@ static void
mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
#endif /* defined(CONFIG_GPIOLIB) */
/***********************************************************************
* Timer API
*/
/**
* mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number
* @irq: irq of timer.
*/
struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
{
struct mpc52xx_gpt_priv *gpt;
struct list_head *pos;
/* Iterate over the list of timers looking for a matching device */
mutex_lock(&mpc52xx_gpt_list_mutex);
list_for_each(pos, &mpc52xx_gpt_list) {
gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
mutex_unlock(&mpc52xx_gpt_list_mutex);
return gpt;
}
}
mutex_unlock(&mpc52xx_gpt_list_mutex);
return NULL;
}
EXPORT_SYMBOL(mpc52xx_gpt_from_irq);
static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous, int as_wdt)
{
u32 clear, set;
u64 clocks;
u32 prescale;
unsigned long flags;
clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS;
set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE;
if (as_wdt) {
clear |= MPC52xx_GPT_MODE_IRQ_EN;
set |= MPC52xx_GPT_MODE_WDT_EN;
} else if (continuous)
set |= MPC52xx_GPT_MODE_CONTINUOUS;
/* Determine the number of clocks in the requested period. 64 bit
* arithmatic is done here to preserve the precision until the value
* is scaled back down into the u32 range. Period is in 'ns', bus
* frequency is in Hz. */
clocks = period * (u64)gpt->ipb_freq;
do_div(clocks, 1000000000); /* Scale it down to ns range */
/* This device cannot handle a clock count greater than 32 bits */
if (clocks > 0xffffffff)
return -EINVAL;
/* Calculate the prescaler and count values from the clocks value.
* 'clocks' is the number of clock ticks in the period. The timer
* has 16 bit precision and a 16 bit prescaler. Prescaler is
* calculated by integer dividing the clocks by 0x10000 (shifting
* down 16 bits) to obtain the smallest possible divisor for clocks
* to get a 16 bit count value.
*
* Note: the prescale register is '1' based, not '0' based. ie. a
* value of '1' means divide the clock by one. 0xffff divides the
* clock by 0xffff. '0x0000' does not divide by zero, but wraps
* around and divides by 0x10000. That is why prescale must be
* a u32 variable, not a u16, for this calculation. */
prescale = (clocks >> 16) + 1;
do_div(clocks, prescale);
if (clocks > 0xffff) {
pr_err("calculation error; prescale:%x clocks:%llx\n",
prescale, clocks);
return -EINVAL;
}
/* Set and enable the timer, reject an attempt to use a wdt as gpt */
spin_lock_irqsave(&gpt->lock, flags);
if (as_wdt)
gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
spin_unlock_irqrestore(&gpt->lock, flags);
return -EBUSY;
}
out_be32(&gpt->regs->count, prescale << 16 | clocks);
clrsetbits_be32(&gpt->regs->mode, clear, set);
spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
/**
* mpc52xx_gpt_start_timer - Set and enable the GPT timer
* @gpt: Pointer to gpt private data structure
* @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
* @continuous: set to 1 to make timer continuous free running
*
* An interrupt will be generated every time the timer fires
*/
int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous)
{
return mpc52xx_gpt_do_start(gpt, period, continuous, 0);
}
EXPORT_SYMBOL(mpc52xx_gpt_start_timer);
/**
* mpc52xx_gpt_stop_timer - Stop a gpt
* @gpt: Pointer to gpt private data structure
*
* Returns an error if attempting to stop a wdt
*/
int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt)
{
unsigned long flags;
/* reject the operation if the timer is used as watchdog (gpt 0 only) */
spin_lock_irqsave(&gpt->lock, flags);
if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
spin_unlock_irqrestore(&gpt->lock, flags);
return -EBUSY;
}
clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE);
spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
EXPORT_SYMBOL(mpc52xx_gpt_stop_timer);
/**
* mpc52xx_gpt_timer_period - Read the timer period
* @gpt: Pointer to gpt private data structure
*
* Returns the timer period in ns
*/
u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
{
u64 period;
u64 prescale;
unsigned long flags;
spin_lock_irqsave(&gpt->lock, flags);
period = in_be32(&gpt->regs->count);
spin_unlock_irqrestore(&gpt->lock, flags);
prescale = period >> 16;
period &= 0xffff;
if (prescale == 0)
prescale = 0x10000;
period = period * prescale * 1000000000ULL;
do_div(period, (u64)gpt->ipb_freq);
return period;
}
EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
#if defined(CONFIG_MPC5200_WDT)
/***********************************************************************
* Watchdog API for gpt0
*/
#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
static unsigned long wdt_is_active;
/* wdt-capable gpt */
static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt;
/* low-level wdt functions */
static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt)
{
unsigned long flags;
spin_lock_irqsave(&gpt_wdt->lock, flags);
out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING);
spin_unlock_irqrestore(&gpt_wdt->lock, flags);
}
/* wdt misc device api */
static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
mpc52xx_gpt_wdt_ping(gpt_wdt);
return 0;
}
static struct watchdog_info mpc5200_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = WDT_IDENTITY,
};
static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
int __user *data = (int __user *)arg;
int timeout;
u64 real_timeout;
int ret = 0;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(data, &mpc5200_wdt_info,
sizeof(mpc5200_wdt_info));
if (ret)
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, data);
break;
case WDIOC_KEEPALIVE:
mpc52xx_gpt_wdt_ping(gpt_wdt);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(timeout, data);
if (ret)
break;
real_timeout = (u64) timeout * 1000000000ULL;
ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1);
if (ret)
break;
/* fall through and return the timeout */
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
* situation:
* - timeout requested is 1 second;
* - real timeout @33MHz is 999997090ns
* - the int divide by 10^9 will return 0.
*/
real_timeout =
mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL;
do_div(real_timeout, 1000000000ULL);
timeout = (int) real_timeout;
ret = put_user(timeout, data);
break;
default:
ret = -ENOTTY;
}
return ret;
}
static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
{
int ret;
/* sanity check */
if (!mpc52xx_gpt_wdt)
return -ENODEV;
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &wdt_is_active))
return -EBUSY;
/* Set and activate the watchdog with 30 seconds timeout */
ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL,
0, 1);
if (ret) {
clear_bit(0, &wdt_is_active);
return ret;
}
file->private_data = mpc52xx_gpt_wdt;
return nonseekable_open(inode, file);
}
static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
{
/* note: releasing the wdt in NOWAYOUT-mode does not stop it */
#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
unsigned long flags;
spin_lock_irqsave(&gpt_wdt->lock, flags);
clrbits32(&gpt_wdt->regs->mode,
MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN);
gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT;
spin_unlock_irqrestore(&gpt_wdt->lock, flags);
#endif
clear_bit(0, &wdt_is_active);
return 0;
}
static const struct file_operations mpc52xx_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = mpc52xx_wdt_write,
.unlocked_ioctl = mpc52xx_wdt_ioctl,
.open = mpc52xx_wdt_open,
.release = mpc52xx_wdt_release,
};
static struct miscdevice mpc52xx_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mpc52xx_wdt_fops,
};
static int __devinit mpc52xx_gpt_wdt_init(void)
{
int err;
/* try to register the watchdog misc device */
err = misc_register(&mpc52xx_wdt_miscdev);
if (err)
pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY);
else
pr_info("%s: watchdog device registered\n", WDT_IDENTITY);
return err;
}
static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
const u32 *period)
{
u64 real_timeout;
/* remember the gpt for the wdt operation */
mpc52xx_gpt_wdt = gpt;
/* configure the wdt if the device tree contained a timeout */
if (!period || *period == 0)
return 0;
real_timeout = (u64) *period * 1000000000ULL;
if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1))
dev_warn(gpt->dev, "starting as wdt failed\n");
else
dev_info(gpt->dev, "watchdog set to %us timeout\n", *period);
return 0;
}
#else
static int __devinit mpc52xx_gpt_wdt_init(void)
{
return 0;
}
#define mpc52xx_gpt_wdt_setup(x, y) (0)
#endif /* CONFIG_MPC5200_WDT */
/* ---------------------------------------------------------------------
* of_platform bus binding code
*/
@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node);
gpt->regs = of_iomap(ofdev->node, 0);
if (!gpt->regs) {
kfree(gpt);
@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
mpc52xx_gpt_gpio_setup(gpt, ofdev->node);
mpc52xx_gpt_irq_setup(gpt, ofdev->node);
mutex_lock(&mpc52xx_gpt_list_mutex);
list_add(&gpt->list, &mpc52xx_gpt_list);
mutex_unlock(&mpc52xx_gpt_list_mutex);
/* check if this device could be a watchdog */
if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) ||
of_get_property(ofdev->node, "has-wdt", NULL)) {
const u32 *on_boot_wdt;
gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot",
NULL);
if (on_boot_wdt) {
dev_info(gpt->dev, "used as watchdog\n");
gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
} else
dev_info(gpt->dev, "can function as watchdog\n");
mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt);
}
return 0;
}
@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void)
/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
subsys_initcall(mpc52xx_gpt_init);
device_initcall(mpc52xx_gpt_wdt_init);

View file

@ -0,0 +1,560 @@
/*
* LocalPlus Bus FIFO driver for the Freescale MPC52xx.
*
* Copyright (C) 2009 Secret Lab Technologies Ltd.
*
* This file is released under the GPLv2
*
* Todo:
* - Add support for multiple requests to be queued.
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
#include <asm/time.h>
#include <sysdev/bestcomm/bestcomm.h>
#include <sysdev/bestcomm/bestcomm_priv.h>
#include <sysdev/bestcomm/gen_bd.h>
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
MODULE_LICENSE("GPL");
#define LPBFIFO_REG_PACKET_SIZE (0x00)
#define LPBFIFO_REG_START_ADDRESS (0x04)
#define LPBFIFO_REG_CONTROL (0x08)
#define LPBFIFO_REG_ENABLE (0x0C)
#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
#define LPBFIFO_REG_FIFO_DATA (0x40)
#define LPBFIFO_REG_FIFO_STATUS (0x44)
#define LPBFIFO_REG_FIFO_CONTROL (0x48)
#define LPBFIFO_REG_FIFO_ALARM (0x4C)
struct mpc52xx_lpbfifo {
struct device *dev;
phys_addr_t regs_phys;
void __iomem *regs;
int irq;
spinlock_t lock;
struct bcom_task *bcom_tx_task;
struct bcom_task *bcom_rx_task;
struct bcom_task *bcom_cur_task;
/* Current state data */
struct mpc52xx_lpbfifo_request *req;
int dma_irqs_enabled;
};
/* The MPC5200 has only one fifo, so only need one instance structure */
static struct mpc52xx_lpbfifo lpbfifo;
/**
* mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
*/
static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
{
size_t transfer_size = req->size - req->pos;
struct bcom_bd *bd;
void __iomem *reg;
u32 *data;
int i;
int bit_fields;
int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
/* Set and clear the reset bits; is good practice in User Manual */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
/* set master enable bit */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
if (!dma) {
/* While the FIFO can be setup for transfer sizes as large as
* 16M-1, the FIFO itself is only 512 bytes deep and it does
* not generate interrupts for FIFO full events (only transfer
* complete will raise an IRQ). Therefore when not using
* Bestcomm to drive the FIFO it needs to either be polled, or
* transfers need to constrained to the size of the fifo.
*
* This driver restricts the size of the transfer
*/
if (transfer_size > 512)
transfer_size = 512;
/* Load the FIFO with data */
if (write) {
reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
data = req->data + req->pos;
for (i = 0; i < transfer_size; i += 4)
out_be32(reg, *data++);
}
/* Unmask both error and completion irqs */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
} else {
/* Choose the correct direction
*
* Configure the watermarks so DMA will always complete correctly.
* It may be worth experimenting with the ALARM value to see if
* there is a performance impacit. However, if it is wrong there
* is a risk of DMA not transferring the last chunk of data
*/
if (write) {
out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
} else {
out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
if (poll_dma) {
if (lpbfifo.dma_irqs_enabled) {
disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
lpbfifo.dma_irqs_enabled = 0;
}
} else {
if (!lpbfifo.dma_irqs_enabled) {
enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
lpbfifo.dma_irqs_enabled = 1;
}
}
}
bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
bd->status = transfer_size;
if (!write) {
/*
* In the DMA read case, the DMA doesn't complete,
* possibly due to incorrect watermarks in the ALARM
* and CONTROL regs. For now instead of trying to
* determine the right watermarks that will make this
* work, just increase the number of bytes the FIFO is
* expecting.
*
* When submitting another operation, the FIFO will get
* reset, so the condition of the FIFO waiting for a
* non-existent 4 bytes will get cleared.
*/
transfer_size += 4; /* BLECH! */
}
bd->data[0] = req->data_phys + req->pos;
bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
/* error irq & master enabled bit */
bit_fields = 0x00000201;
/* Unmask irqs */
if (write && (!poll_dma))
bit_fields |= 0x00000100; /* completion irq too */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
}
/* Set transfer size, width, chip select and READ mode */
out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
req->offset + req->pos);
out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
bit_fields = req->cs << 24 | 0x000008;
if (!write)
bit_fields |= 0x010000; /* read mode */
out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
/* Kick it off */
out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
if (dma)
bcom_enable(lpbfifo.bcom_cur_task);
}
/**
* mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
*
* On transmit, the dma completion irq triggers before the fifo completion
* triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
* task completion irq becuase everyting is not really done until the LPB FIFO
* completion irq triggers.
*
* In other words:
* For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
* transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
* case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
*
* Reasons for entering this routine:
* 1) PIO mode rx and tx completion irq
* 2) DMA interrupt mode tx completion irq
* 3) DMA polled mode tx
*
* Exit conditions:
* 1) Transfer aborted
* 2) FIFO complete without DMA; more data to do
* 3) FIFO complete without DMA; all data transfered
* 4) FIFO complete using DMA
*
* Condition 1 can occur regardless of whether or not DMA is used.
* It requires executing the callback to report the error and exiting
* immediately.
*
* Condition 2 requires programming the FIFO with the next block of data
*
* Condition 3 requires executing the callback to report completion
*
* Condition 4 means the same as 3, except that we also retrieve the bcom
* buffer so DMA doesn't get clogged up.
*
* To make things trickier, the spinlock must be dropped before
* executing the callback, otherwise we could end up with a deadlock
* or nested spinlock condition. The out path is non-trivial, so
* extra fiddling is done to make sure all paths lead to the same
* outbound code.
*/
static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
{
struct mpc52xx_lpbfifo_request *req;
u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
void __iomem *reg;
u32 *data;
int count, i;
int do_callback = 0;
u32 ts;
unsigned long flags;
int dma, write, poll_dma;
spin_lock_irqsave(&lpbfifo.lock, flags);
ts = get_tbl();
req = lpbfifo.req;
if (!req) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
pr_err("bogus LPBFIFO IRQ\n");
return IRQ_HANDLED;
}
dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
if (dma && !write) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
return IRQ_HANDLED;
}
if ((status & 0x01) == 0) {
goto out;
}
/* check abort bit */
if (status & 0x10) {
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
do_callback = 1;
goto out;
}
/* Read result from hardware */
count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
count &= 0x00ffffff;
if (!dma && !write) {
/* copy the data out of the FIFO */
reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
data = req->data + req->pos;
for (i = 0; i < count; i += 4)
*data++ = in_be32(reg);
}
/* Update transfer position and count */
req->pos += count;
/* Decide what to do next */
if (req->size - req->pos)
mpc52xx_lpbfifo_kick(req); /* more work to do */
else
do_callback = 1;
out:
/* Clear the IRQ */
out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
if (dma && (status & 0x11)) {
/*
* Count the DMA as complete only when the FIFO completion
* status or abort bits are set.
*
* (status & 0x01) should always be the case except sometimes
* when using polled DMA.
*
* (status & 0x10) {transfer aborted}: This case needs more
* testing.
*/
bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
}
req->last_byte = ((u8 *)req->data)[req->size - 1];
/* When the do_callback flag is set; it means the transfer is finished
* so set the FIFO as idle */
if (do_callback)
lpbfifo.req = NULL;
if (irq != 0) /* don't increment on polled case */
req->irq_count++;
req->irq_ticks += get_tbl() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
/* Spinlock is released; it is now safe to call the callback */
if (do_callback && req->callback)
req->callback(req);
return IRQ_HANDLED;
}
/**
* mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
*
* Only used when receiving data.
*/
static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
{
struct mpc52xx_lpbfifo_request *req;
unsigned long flags;
u32 status;
u32 ts;
spin_lock_irqsave(&lpbfifo.lock, flags);
ts = get_tbl();
req = lpbfifo.req;
if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
return IRQ_HANDLED;
}
if (irq != 0) /* don't increment on polled case */
req->irq_count++;
if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
req->buffer_not_done_cnt++;
if ((req->buffer_not_done_cnt % 1000) == 0)
pr_err("transfer stalled\n");
return IRQ_HANDLED;
}
bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
req->last_byte = ((u8 *)req->data)[req->size - 1];
req->pos = status & 0x00ffffff;
/* Mark the FIFO as idle */
lpbfifo.req = NULL;
/* Release the lock before calling out to the callback. */
req->irq_ticks += get_tbl() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
if (req->callback)
req->callback(req);
return IRQ_HANDLED;
}
/**
* mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
*/
void mpc52xx_lpbfifo_poll(void)
{
struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
/*
* For more information, see comments on the "Fat Lady"
*/
if (dma && write)
mpc52xx_lpbfifo_irq(0, NULL);
else
mpc52xx_lpbfifo_bcom_irq(0, NULL);
}
EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
/**
* mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
* @req: Pointer to request structure
*/
int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
{
unsigned long flags;
if (!lpbfifo.regs)
return -ENODEV;
spin_lock_irqsave(&lpbfifo.lock, flags);
/* If the req pointer is already set, then a transfer is in progress */
if (lpbfifo.req) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
return -EBUSY;
}
/* Setup the transfer */
lpbfifo.req = req;
req->irq_count = 0;
req->irq_ticks = 0;
req->buffer_not_done_cnt = 0;
req->pos = 0;
mpc52xx_lpbfifo_kick(req);
spin_unlock_irqrestore(&lpbfifo.lock, flags);
return 0;
}
EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
{
unsigned long flags;
spin_lock_irqsave(&lpbfifo.lock, flags);
if (lpbfifo.req == req) {
/* Put it into reset and clear the state */
bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
lpbfifo.req = NULL;
}
spin_unlock_irqrestore(&lpbfifo.lock, flags);
}
EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
static int __devinit
mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
{
struct resource res;
int rc = -ENOMEM;
if (lpbfifo.dev != NULL)
return -ENOSPC;
lpbfifo.irq = irq_of_parse_and_map(op->node, 0);
if (!lpbfifo.irq)
return -ENODEV;
if (of_address_to_resource(op->node, 0, &res))
return -ENODEV;
lpbfifo.regs_phys = res.start;
lpbfifo.regs = of_iomap(op->node, 0);
if (!lpbfifo.regs)
return -ENOMEM;
spin_lock_init(&lpbfifo.lock);
/* Put FIFO into reset */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
/* Register the interrupt handler */
rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
"mpc52xx-lpbfifo", &lpbfifo);
if (rc)
goto err_irq;
/* Request the Bestcomm receive (fifo --> memory) task and IRQ */
lpbfifo.bcom_rx_task =
bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
16*1024*1024);
if (!lpbfifo.bcom_rx_task)
goto err_bcom_rx;
rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
mpc52xx_lpbfifo_bcom_irq, 0,
"mpc52xx-lpbfifo-rx", &lpbfifo);
if (rc)
goto err_bcom_rx_irq;
/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
lpbfifo.bcom_tx_task =
bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
if (!lpbfifo.bcom_tx_task)
goto err_bcom_tx;
lpbfifo.dev = &op->dev;
return 0;
err_bcom_tx:
free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
err_bcom_rx_irq:
bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
err_bcom_rx:
err_irq:
iounmap(lpbfifo.regs);
lpbfifo.regs = NULL;
dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
return -ENODEV;
}
static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op)
{
if (lpbfifo.dev != &op->dev)
return 0;
/* Put FIFO in reset */
out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
/* Release the bestcomm transmit task */
free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
/* Release the bestcomm receive task */
free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
free_irq(lpbfifo.irq, &lpbfifo);
iounmap(lpbfifo.regs);
lpbfifo.regs = NULL;
lpbfifo.dev = NULL;
return 0;
}
static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
{ .compatible = "fsl,mpc5200-lpbfifo", },
{},
};
static struct of_platform_driver mpc52xx_lpbfifo_driver = {
.owner = THIS_MODULE,
.name = "mpc52xx-lpbfifo",
.match_table = mpc52xx_lpbfifo_match,
.probe = mpc52xx_lpbfifo_probe,
.remove = __devexit_p(mpc52xx_lpbfifo_remove),
};
/***********************************************************************
* Module init/exit
*/
static int __init mpc52xx_lpbfifo_init(void)
{
pr_debug("Registering LocalPlus bus FIFO driver\n");
return of_register_platform_driver(&mpc52xx_lpbfifo_driver);
}
module_init(mpc52xx_lpbfifo_init);
static void __exit mpc52xx_lpbfifo_exit(void)
{
pr_debug("Unregistering LocalPlus bus FIFO driver\n");
of_unregister_platform_driver(&mpc52xx_lpbfifo_driver);
}
module_exit(mpc52xx_lpbfifo_exit);

View file

@ -22,6 +22,8 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
MODULE_LICENSE("GPL");
enum {
CD_GPIO = 0,
WP_GPIO,

View file

@ -133,6 +133,14 @@ config SPI_LM70_LLP
which interfaces to an LM70 temperature sensor using
a parallel port.
config SPI_MPC52xx
tristate "Freescale MPC52xx SPI (non-PSC) controller support"
depends on PPC_MPC52xx && SPI
select SPI_MASTER_OF
help
This drivers supports the MPC52xx SPI controller in master SPI
mode.
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
depends on PPC_MPC52xx && EXPERIMENTAL

View file

@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_ORION) += orion_spi.o
obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o

View file

@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <linux/of_spi.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/io.h>
@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
u32 mclken_div;
int ret = 0;
int ret;
/* default sysclk is 512MHz */
mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
if (ret)
return ret;
/* Reset the PSC into a known state */
out_8(&psc->command, MPC52xx_PSC_RST_RX);
@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
mps->bits_per_word = 8;
return ret;
return 0;
}
static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
goto free_master;
ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
if (ret < 0)
if (ret < 0) {
dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
goto free_irq;
}
spin_lock_init(&mps->lock);
init_completion(&mps->done);
@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
const u32 *regaddr_p;
u64 regaddr64, size64;
s16 id = -1;
int rc;
regaddr_p = of_get_address(op->node, 0, &size64, NULL);
if (!regaddr_p) {
printk(KERN_ERR "Invalid PSC address\n");
dev_err(&op->dev, "Invalid PSC address\n");
return -EINVAL;
}
regaddr64 = of_translate_address(op->node, regaddr_p);
@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
psc_nump = of_get_property(op->node, "cell-index", NULL);
if (!psc_nump || *psc_nump > 5) {
printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid "
"cell-index property\n", op->node->full_name);
dev_err(&op->dev, "Invalid cell-index property\n");
return -EINVAL;
}
id = *psc_nump + 1;
}
return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
irq_of_parse_and_map(op->node, 0), id);
if (rc == 0)
of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
return rc;
}
static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)

520
drivers/spi/mpc52xx_spi.c Normal file
View file

@ -0,0 +1,520 @@
/*
* MPC52xx SPI bus driver.
*
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* This file is released under the GPLv2
*
* This is the driver for the MPC5200's dedicated SPI controller.
*
* Note: this driver does not support the MPC5200 PSC in SPI mode. For
* that driver see drivers/spi/mpc52xx_psc_spi.c
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/spi/mpc52xx_spi.h>
#include <linux/of_spi.h>
#include <linux/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
MODULE_LICENSE("GPL");
/* Register offsets */
#define SPI_CTRL1 0x00
#define SPI_CTRL1_SPIE (1 << 7)
#define SPI_CTRL1_SPE (1 << 6)
#define SPI_CTRL1_MSTR (1 << 4)
#define SPI_CTRL1_CPOL (1 << 3)
#define SPI_CTRL1_CPHA (1 << 2)
#define SPI_CTRL1_SSOE (1 << 1)
#define SPI_CTRL1_LSBFE (1 << 0)
#define SPI_CTRL2 0x01
#define SPI_BRR 0x04
#define SPI_STATUS 0x05
#define SPI_STATUS_SPIF (1 << 7)
#define SPI_STATUS_WCOL (1 << 6)
#define SPI_STATUS_MODF (1 << 4)
#define SPI_DATA 0x09
#define SPI_PORTDATA 0x0d
#define SPI_DATADIR 0x10
/* FSM state return values */
#define FSM_STOP 0 /* Nothing more for the state machine to */
/* do. If something interesting happens */
/* then and IRQ will be received */
#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
/* not expected */
#define FSM_CONTINUE 2 /* Keep iterating the state machine */
/* Driver internal data */
struct mpc52xx_spi {
struct spi_master *master;
u32 sysclk;
void __iomem *regs;
int irq0; /* MODF irq */
int irq1; /* SPIF irq */
int ipb_freq;
/* Statistics */
int msg_count;
int wcol_count;
int wcol_ticks;
u32 wcol_tx_timestamp;
int modf_count;
int byte_count;
struct list_head queue; /* queue of pending messages */
spinlock_t lock;
struct work_struct work;
/* Details of current transfer (length, and buffer pointers) */
struct spi_message *message; /* current message */
struct spi_transfer *transfer; /* current transfer */
int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
int len;
int timestamp;
u8 *rx_buf;
const u8 *tx_buf;
int cs_change;
};
/*
* CS control function
*/
static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
{
out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
}
/*
* Start a new transfer. This is called both by the idle state
* for the first transfer in a message, and by the wait state when the
* previous transfer in a message is complete.
*/
static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
{
ms->rx_buf = ms->transfer->rx_buf;
ms->tx_buf = ms->transfer->tx_buf;
ms->len = ms->transfer->len;
/* Activate the chip select */
if (ms->cs_change)
mpc52xx_spi_chipsel(ms, 1);
ms->cs_change = ms->transfer->cs_change;
/* Write out the first byte */
ms->wcol_tx_timestamp = get_tbl();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
out_8(ms->regs + SPI_DATA, 0);
}
/* Forward declaration of state handlers */
static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data);
static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data);
/*
* IDLE state
*
* No transfers are in progress; if another transfer is pending then retrieve
* it and kick it off. Otherwise, stop processing the state machine
*/
static int
mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
{
struct spi_device *spi;
int spr, sppr;
u8 ctrl1;
if (status && (irq != NO_IRQ))
dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
status);
/* Check if there is another transfer waiting. */
if (list_empty(&ms->queue))
return FSM_STOP;
/* get the head of the queue */
ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
list_del_init(&ms->message->queue);
/* Setup the controller parameters */
ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
spi = ms->message->spi;
if (spi->mode & SPI_CPHA)
ctrl1 |= SPI_CTRL1_CPHA;
if (spi->mode & SPI_CPOL)
ctrl1 |= SPI_CTRL1_CPOL;
if (spi->mode & SPI_LSB_FIRST)
ctrl1 |= SPI_CTRL1_LSBFE;
out_8(ms->regs + SPI_CTRL1, ctrl1);
/* Setup the controller speed */
/* minimum divider is '2'. Also, add '1' to force rounding the
* divider up. */
sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
spr = 0;
if (sppr < 1)
sppr = 1;
while (((sppr - 1) & ~0x7) != 0) {
sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
spr++;
}
sppr--; /* sppr quantity in register is offset by 1 */
if (spr > 7) {
/* Don't overrun limits of SPI baudrate register */
spr = 7;
sppr = 7;
}
out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
ms->cs_change = 1;
ms->transfer = container_of(ms->message->transfers.next,
struct spi_transfer, transfer_list);
mpc52xx_spi_start_transfer(ms);
ms->state = mpc52xx_spi_fsmstate_transfer;
return FSM_CONTINUE;
}
/*
* TRANSFER state
*
* In the middle of a transfer. If the SPI core has completed processing
* a byte, then read out the received data and write out the next byte
* (unless this transfer is finished; in which case go on to the wait
* state)
*/
static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data)
{
if (!status)
return ms->irq0 ? FSM_STOP : FSM_POLL;
if (status & SPI_STATUS_WCOL) {
/* The SPI controller is stoopid. At slower speeds, it may
* raise the SPIF flag before the state machine is actually
* finished, which causes a collision (internal to the state
* machine only). The manual recommends inserting a delay
* between receiving the interrupt and sending the next byte,
* but it can also be worked around simply by retrying the
* transfer which is what we do here. */
ms->wcol_count++;
ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
ms->wcol_tx_timestamp = get_tbl();
data = 0;
if (ms->tx_buf)
data = *(ms->tx_buf-1);
out_8(ms->regs + SPI_DATA, data); /* try again */
return FSM_CONTINUE;
} else if (status & SPI_STATUS_MODF) {
ms->modf_count++;
dev_err(&ms->master->dev, "mode fault\n");
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = -EIO;
ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
/* Read data out of the spi device */
ms->byte_count++;
if (ms->rx_buf)
*ms->rx_buf++ = data;
/* Is the transfer complete? */
ms->len--;
if (ms->len == 0) {
ms->timestamp = get_tbl();
ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
ms->state = mpc52xx_spi_fsmstate_wait;
return FSM_CONTINUE;
}
/* Write out the next byte */
ms->wcol_tx_timestamp = get_tbl();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
out_8(ms->regs + SPI_DATA, 0);
return FSM_CONTINUE;
}
/*
* WAIT state
*
* A transfer has completed; need to wait for the delay period to complete
* before starting the next transfer
*/
static int
mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
{
if (status && irq)
dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
status);
if (((int)get_tbl()) - ms->timestamp < 0)
return FSM_POLL;
ms->message->actual_length += ms->transfer->len;
/* Check if there is another transfer in this message. If there
* aren't then deactivate CS, notify sender, and drop back to idle
* to start the next message. */
if (ms->transfer->transfer_list.next == &ms->message->transfers) {
ms->msg_count++;
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = 0;
ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
/* There is another transfer; kick it off */
if (ms->cs_change)
mpc52xx_spi_chipsel(ms, 0);
ms->transfer = container_of(ms->transfer->transfer_list.next,
struct spi_transfer, transfer_list);
mpc52xx_spi_start_transfer(ms);
ms->state = mpc52xx_spi_fsmstate_transfer;
return FSM_CONTINUE;
}
/**
* mpc52xx_spi_fsm_process - Finite State Machine iteration function
* @irq: irq number that triggered the FSM or 0 for polling
* @ms: pointer to mpc52xx_spi driver data
*/
static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
{
int rc = FSM_CONTINUE;
u8 status, data;
while (rc == FSM_CONTINUE) {
/* Interrupt cleared by read of STATUS followed by
* read of DATA registers */
status = in_8(ms->regs + SPI_STATUS);
data = in_8(ms->regs + SPI_DATA);
rc = ms->state(irq, ms, status, data);
}
if (rc == FSM_POLL)
schedule_work(&ms->work);
}
/**
* mpc52xx_spi_irq - IRQ handler
*/
static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
{
struct mpc52xx_spi *ms = _ms;
spin_lock(&ms->lock);
mpc52xx_spi_fsm_process(irq, ms);
spin_unlock(&ms->lock);
return IRQ_HANDLED;
}
/**
* mpc52xx_spi_wq - Workqueue function for polling the state machine
*/
static void mpc52xx_spi_wq(struct work_struct *work)
{
struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
unsigned long flags;
spin_lock_irqsave(&ms->lock, flags);
mpc52xx_spi_fsm_process(0, ms);
spin_unlock_irqrestore(&ms->lock, flags);
}
/*
* spi_master ops
*/
static int mpc52xx_spi_setup(struct spi_device *spi)
{
if (spi->bits_per_word % 8)
return -EINVAL;
if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
return -EINVAL;
if (spi->chip_select >= spi->master->num_chipselect)
return -EINVAL;
return 0;
}
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
unsigned long flags;
m->actual_length = 0;
m->status = -EINPROGRESS;
spin_lock_irqsave(&ms->lock, flags);
list_add_tail(&m->queue, &ms->queue);
spin_unlock_irqrestore(&ms->lock, flags);
schedule_work(&ms->work);
return 0;
}
/*
* OF Platform Bus Binding
*/
static int __devinit mpc52xx_spi_probe(struct of_device *op,
const struct of_device_id *match)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
void __iomem *regs;
int rc;
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
regs = of_iomap(op->node, 0);
if (!regs)
return -ENODEV;
/* initialize the device */
out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR);
out_8(regs + SPI_CTRL2, 0x0);
out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
/* Clear the status register and re-read it to check for a MODF
* failure. This driver cannot currently handle multiple masters
* on the SPI bus. This fault will also occur if the SPI signals
* are not connected to any pins (port_config setting) */
in_8(regs + SPI_STATUS);
in_8(regs + SPI_DATA);
if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
dev_err(&op->dev, "mode fault; is port_config correct?\n");
rc = -EIO;
goto err_init;
}
dev_dbg(&op->dev, "allocating spi_master struct\n");
master = spi_alloc_master(&op->dev, sizeof *ms);
if (!master) {
rc = -ENOMEM;
goto err_alloc;
}
master->bus_num = -1;
master->num_chipselect = 1;
master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
dev_set_drvdata(&op->dev, master);
ms = spi_master_get_devdata(master);
ms->master = master;
ms->regs = regs;
ms->irq0 = irq_of_parse_and_map(op->node, 0);
ms->irq1 = irq_of_parse_and_map(op->node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
spin_lock_init(&ms->lock);
INIT_LIST_HEAD(&ms->queue);
INIT_WORK(&ms->work, mpc52xx_spi_wq);
/* Decide if interrupts can be used */
if (ms->irq0 && ms->irq1) {
rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
"mpc5200-spi-modf", ms);
rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
"mpc5200-spi-spiF", ms);
if (rc) {
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
ms->irq0 = ms->irq1 = 0;
}
} else {
/* operate in polled mode */
ms->irq0 = ms->irq1 = 0;
}
if (!ms->irq0)
dev_info(&op->dev, "using polled mode\n");
dev_dbg(&op->dev, "registering spi_master struct\n");
rc = spi_register_master(master);
if (rc)
goto err_register;
of_register_spi_devices(master, op->node);
dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
return rc;
err_register:
dev_err(&ms->master->dev, "initialization failed\n");
spi_master_put(master);
err_alloc:
err_init:
iounmap(regs);
return rc;
}
static int __devexit mpc52xx_spi_remove(struct of_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
spi_unregister_master(master);
spi_master_put(master);
iounmap(ms->regs);
return 0;
}
static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
static struct of_platform_driver mpc52xx_spi_of_driver = {
.owner = THIS_MODULE,
.name = "mpc52xx-spi",
.match_table = mpc52xx_spi_match,
.probe = mpc52xx_spi_probe,
.remove = __exit_p(mpc52xx_spi_remove),
};
static int __init mpc52xx_spi_init(void)
{
return of_register_platform_driver(&mpc52xx_spi_of_driver);
}
module_init(mpc52xx_spi_init);
static void __exit mpc52xx_spi_exit(void)
{
of_unregister_platform_driver(&mpc52xx_spi_of_driver);
}
module_exit(mpc52xx_spi_exit);

View file

@ -148,7 +148,8 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
{
u8 bits_per_word;
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
bits_per_word = (t && t->bits_per_word)
? t->bits_per_word : spi->bits_per_word;
if (bits_per_word != 8) {
dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
__func__, bits_per_word);

View file

@ -861,8 +861,10 @@ config GEF_WDT
Watchdog timer found in a number of GE Fanuc single board computers.
config MPC5200_WDT
tristate "MPC5200 Watchdog Timer"
bool "MPC52xx Watchdog Timer"
depends on PPC_MPC52xx
help
Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog.
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"

View file

@ -118,7 +118,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
# POWERPC Architecture
obj-$(CONFIG_GEF_WDT) += gef_wdt.o
obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
obj-$(CONFIG_PIKA_WDT) += pika_wdt.o

View file

@ -1,293 +0,0 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <asm/mpc52xx.h>
#define GPT_MODE_WDT (1 << 15)
#define GPT_MODE_CE (1 << 12)
#define GPT_MODE_MS_TIMER (0x4)
struct mpc5200_wdt {
unsigned count; /* timer ticks before watchdog kicks in */
long ipb_freq;
struct miscdevice miscdev;
struct resource mem;
struct mpc52xx_gpt __iomem *regs;
spinlock_t io_lock;
};
/* is_active stores wether or not the /dev/watchdog device is opened */
static unsigned long is_active;
/* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from
* file operations, which sucks. But there can be max 1 watchdog anyway, so...
*/
static struct mpc5200_wdt *wdt_global;
/* helper to calculate timeout in timer counts */
static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout)
{
/* use biggest prescaler of 64k */
wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout;
if (wdt->count > 0xffff)
wdt->count = 0xffff;
}
/* return timeout in seconds (calculated from timer count) */
static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt)
{
return wdt->count * 0x10000 / wdt->ipb_freq;
}
/* watchdog operations */
static int mpc5200_wdt_start(struct mpc5200_wdt *wdt)
{
spin_lock(&wdt->io_lock);
/* disable */
out_be32(&wdt->regs->mode, 0);
/* set timeout, with maximum prescaler */
out_be32(&wdt->regs->count, 0x0 | wdt->count);
/* enable watchdog */
out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT |
GPT_MODE_MS_TIMER);
spin_unlock(&wdt->io_lock);
return 0;
}
static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt)
{
spin_lock(&wdt->io_lock);
/* writing A5 to OCPW resets the watchdog */
out_be32(&wdt->regs->mode, 0xA5000000 |
(0xffffff & in_be32(&wdt->regs->mode)));
spin_unlock(&wdt->io_lock);
return 0;
}
static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt)
{
spin_lock(&wdt->io_lock);
/* disable */
out_be32(&wdt->regs->mode, 0);
spin_unlock(&wdt->io_lock);
return 0;
}
/* file operations */
static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
struct mpc5200_wdt *wdt = file->private_data;
mpc5200_wdt_ping(wdt);
return 0;
}
static struct watchdog_info mpc5200_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "mpc5200 watchdog on GPT0",
};
static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mpc5200_wdt *wdt = file->private_data;
int __user *data = (int __user *)arg;
int timeout;
int ret = 0;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(data, &mpc5200_wdt_info,
sizeof(mpc5200_wdt_info));
if (ret)
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, data);
break;
case WDIOC_KEEPALIVE:
mpc5200_wdt_ping(wdt);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(timeout, data);
if (ret)
break;
mpc5200_wdt_set_timeout(wdt, timeout);
mpc5200_wdt_start(wdt);
/* fall through and return the timeout */
case WDIOC_GETTIMEOUT:
timeout = mpc5200_wdt_get_timeout(wdt);
ret = put_user(timeout, data);
break;
default:
ret = -ENOTTY;
}
return ret;
}
static int mpc5200_wdt_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &is_active))
return -EBUSY;
/* Set and activate the watchdog */
mpc5200_wdt_set_timeout(wdt_global, 30);
mpc5200_wdt_start(wdt_global);
file->private_data = wdt_global;
return nonseekable_open(inode, file);
}
static int mpc5200_wdt_release(struct inode *inode, struct file *file)
{
#if WATCHDOG_NOWAYOUT == 0
struct mpc5200_wdt *wdt = file->private_data;
mpc5200_wdt_stop(wdt);
wdt->count = 0; /* == disabled */
#endif
clear_bit(0, &is_active);
return 0;
}
static const struct file_operations mpc5200_wdt_fops = {
.owner = THIS_MODULE,
.write = mpc5200_wdt_write,
.unlocked_ioctl = mpc5200_wdt_ioctl,
.open = mpc5200_wdt_open,
.release = mpc5200_wdt_release,
};
/* module operations */
static int mpc5200_wdt_probe(struct of_device *op,
const struct of_device_id *match)
{
struct mpc5200_wdt *wdt;
int err;
const void *has_wdt;
int size;
has_wdt = of_get_property(op->node, "has-wdt", NULL);
if (!has_wdt)
has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL);
if (!has_wdt)
return -ENODEV;
wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
err = of_address_to_resource(op->node, 0, &wdt->mem);
if (err)
goto out_free;
size = wdt->mem.end - wdt->mem.start + 1;
if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) {
err = -ENODEV;
goto out_free;
}
wdt->regs = ioremap(wdt->mem.start, size);
if (!wdt->regs) {
err = -ENODEV;
goto out_release;
}
dev_set_drvdata(&op->dev, wdt);
spin_lock_init(&wdt->io_lock);
wdt->miscdev = (struct miscdevice) {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mpc5200_wdt_fops,
.parent = &op->dev,
};
wdt_global = wdt;
err = misc_register(&wdt->miscdev);
if (!err)
return 0;
iounmap(wdt->regs);
out_release:
release_mem_region(wdt->mem.start, size);
out_free:
kfree(wdt);
return err;
}
static int mpc5200_wdt_remove(struct of_device *op)
{
struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
mpc5200_wdt_stop(wdt);
misc_deregister(&wdt->miscdev);
iounmap(wdt->regs);
release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1);
kfree(wdt);
return 0;
}
static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state)
{
struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
mpc5200_wdt_stop(wdt);
return 0;
}
static int mpc5200_wdt_resume(struct of_device *op)
{
struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
if (wdt->count)
mpc5200_wdt_start(wdt);
return 0;
}
static int mpc5200_wdt_shutdown(struct of_device *op)
{
struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
mpc5200_wdt_stop(wdt);
return 0;
}
static struct of_device_id mpc5200_wdt_match[] = {
{ .compatible = "mpc5200-gpt", },
{ .compatible = "fsl,mpc5200-gpt", },
{},
};
static struct of_platform_driver mpc5200_wdt_driver = {
.owner = THIS_MODULE,
.name = "mpc5200-gpt-wdt",
.match_table = mpc5200_wdt_match,
.probe = mpc5200_wdt_probe,
.remove = mpc5200_wdt_remove,
.suspend = mpc5200_wdt_suspend,
.resume = mpc5200_wdt_resume,
.shutdown = mpc5200_wdt_shutdown,
};
static int __init mpc5200_wdt_init(void)
{
return of_register_platform_driver(&mpc5200_wdt_driver);
}
static void __exit mpc5200_wdt_exit(void)
{
of_unregister_platform_driver(&mpc5200_wdt_driver);
}
module_init(mpc5200_wdt_init);
module_exit(mpc5200_wdt_exit);
MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);

View file

@ -0,0 +1,10 @@
#ifndef INCLUDE_MPC5200_SPI_H
#define INCLUDE_MPC5200_SPI_H
extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master,
void (*hook)(struct spi_message *m,
void *context),
void *hook_context);
#endif