diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 763c60d3d277..82692068d3cb 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -113,7 +113,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * OR if there is masked interrupt which hasn't been Acked, * it'll be ignored in irq handler, then may introduce irq storm */ - if (d->mask_buf[i] && d->chip->ack_base) { + if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { reg = d->chip->ack_base + (i * map->reg_stride * d->irq_reg_stride); ret = regmap_write(map, reg, d->mask_buf[i]); @@ -271,7 +271,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) for (i = 0; i < data->chip->num_regs; i++) { data->status_buf[i] &= ~data->mask_buf[i]; - if (data->status_buf[i] && chip->ack_base) { + if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { reg = chip->ack_base + (i * map->reg_stride * data->irq_reg_stride); ret = regmap_write(map, reg, data->status_buf[i]); @@ -448,7 +448,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } - if (d->status_buf[i] && chip->ack_base) { + if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { reg = chip->ack_base + (i * map->reg_stride * d->irq_reg_stride); ret = regmap_write(map, reg, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c2e002100949..6a19515f8a45 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1514,21 +1514,49 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { int ret = 0, i; size_t val_bytes = map->format.val_bytes; - void *wval; - if (!map->bus) - return -EINVAL; - if (!map->format.parse_inplace) + if (map->bus && !map->format.parse_inplace) return -EINVAL; if (reg % map->reg_stride) return -EINVAL; map->lock(map->lock_arg); + /* + * Some devices don't support bulk write, for + * them we have a series of single write operations. + */ + if (!map->bus || map->use_single_rw) { + for (i = 0; i < val_count; i++) { + unsigned int ival; - /* No formatting is require if val_byte is 1 */ - if (val_bytes == 1) { - wval = (void *)val; + switch (val_bytes) { + case 1: + ival = *(u8 *)(val + (i * val_bytes)); + break; + case 2: + ival = *(u16 *)(val + (i * val_bytes)); + break; + case 4: + ival = *(u32 *)(val + (i * val_bytes)); + break; +#ifdef CONFIG_64BIT + case 8: + ival = *(u64 *)(val + (i * val_bytes)); + break; +#endif + default: + ret = -EINVAL; + goto out; + } + + ret = _regmap_write(map, reg + (i * map->reg_stride), + ival); + if (ret != 0) + goto out; + } } else { + void *wval; + wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); if (!wval) { ret = -ENOMEM; @@ -1537,27 +1565,11 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); - } - /* - * Some devices does not support bulk write, for - * them we have a series of single write operations. - */ - if (map->use_single_rw) { - for (i = 0; i < val_count; i++) { - ret = _regmap_raw_write(map, - reg + (i * map->reg_stride), - val + (i * val_bytes), - val_bytes); - if (ret != 0) - goto out; - } - } else { + ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); - } - if (val_bytes != 1) kfree(wval); - + } out: map->unlock(map->lock_arg); return ret; @@ -1897,14 +1909,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_bytes = map->format.val_bytes; bool vol = regmap_volatile_range(map, reg, val_count); - if (!map->bus) - return -EINVAL; - if (!map->format.parse_inplace) - return -EINVAL; if (reg % map->reg_stride) return -EINVAL; - if (vol || map->cache_type == REGCACHE_NONE) { + if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { /* * Some devices does not support bulk read, for * them we have a series of single read operations. @@ -2173,6 +2181,10 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, int i, ret; bool bypass; + if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", + num_regs)) + return 0; + map->lock(map->lock_arg); bypass = map->cache_bypass; diff --git a/include/linux/regmap.h b/include/linux/regmap.h index e55907804d39..4149f1a9b003 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -122,9 +122,9 @@ typedef void (*regmap_unlock)(void *); * volatile_table (see below) is not, the check is performed on * such table (a register is volatile if it belongs to one of * the ranges specified by volatile_table). - * @precious_reg: Optional callback returning true if the rgister + * @precious_reg: Optional callback returning true if the register * should not be read outside of a call from the driver - * (eg, a clear on read interrupt status register). If this + * (e.g., a clear on read interrupt status register). If this * field is NULL but precious_table (see below) is not, the * check is performed on such table (a register is precious if * it belongs to one of the ranges specified by precious_table). @@ -136,9 +136,9 @@ typedef void (*regmap_unlock)(void *); * are not overridden). * @reg_read: Optional callback that if filled will be used to perform * all the reads from the registers. Should only be provided for - * devices whos read operation cannot be represented as a simple read - * operation on a bus such as SPI, I2C, etc. Most of the devices do - * not need this. + * devices whose read operation cannot be represented as a simple + * read operation on a bus such as SPI, I2C, etc. Most of the + * devices do not need this. * @reg_write: Same as above for writing. * @fast_io: Register IO is fast. Use a spinlock instead of a mutex * to perform locking. This field is ignored if custom lock/unlock @@ -497,11 +497,13 @@ struct regmap_irq { * * @status_base: Base status register address. * @mask_base: Base mask register address. - * @ack_base: Base ack address. If zero then the chip is clear on read. + * @ack_base: Base ack address. If zero then the chip is clear on read. + * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. + * @use_ack: Use @ack register even if it is zero. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @@ -520,6 +522,7 @@ struct regmap_irq_chip { unsigned int irq_reg_stride; bool init_ack_masked:1; bool mask_invert:1; + bool use_ack:1; bool wake_invert:1; bool runtime_pm:1;