1
0
Fork 0

Merge pull request #208 from zandrey/5.4-2.2.x-imx

Update 5.4-2.2.x-imx to v5.4.87
5.4-rM2-2.2.x-imx-squashed
Otavio Salvador 2021-01-09 18:57:11 -03:00 committed by GitHub
commit 192a369760
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
90 changed files with 773 additions and 350 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 86
SUBLEVEL = 88
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -217,15 +217,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
*/
static __inline__ int fls(unsigned int x)
{
return 32 - __builtin_clz(x);
int lz;
if (__builtin_constant_p(x))
return x ? 32 - __builtin_clz(x) : 0;
asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
return 32 - lz;
}
#include <asm-generic/bitops/builtin-__fls.h>
/*
* 64-bit can do this using one cntlzd (count leading zeroes doubleword)
* instruction; for 32-bit we use the generic version, which does two
* 32-bit fls calls.
*/
#ifdef CONFIG_PPC64
static __inline__ int fls64(__u64 x)
{
return 64 - __builtin_clzll(x);
int lz;
if (__builtin_constant_p(x))
return x ? 64 - __builtin_clzll(x) : 0;
asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
return 64 - lz;
}
#else
#include <asm-generic/bitops/fls64.h>
#endif
#ifdef CONFIG_PPC64
unsigned int __arch_hweight8(unsigned int w);

View File

@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;

View File

@ -47,18 +47,25 @@
/* Max request size is determined by sector mask - 32K */
#define UBD_MAX_REQUEST (8 * sizeof(long))
struct io_desc {
char *buffer;
unsigned long length;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
};
struct io_thread_req {
struct request *req;
int fds[2];
unsigned long offsets[2];
unsigned long long offset;
unsigned long length;
char *buffer;
int sectorsize;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
int error;
int desc_cnt;
/* io_desc has to be the last element of the struct */
struct io_desc io_desc[];
};
@ -524,12 +531,7 @@ static void ubd_handler(void)
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
}
if ((io_req->error) || (io_req->buffer == NULL))
blk_mq_end_request(io_req->req, io_req->error);
else {
if (!blk_update_request(io_req->req, io_req->error, io_req->length))
__blk_mq_end_request(io_req->req, io_req->error);
}
blk_mq_end_request(io_req->req, io_req->error);
kfree(io_req);
}
}
@ -945,6 +947,7 @@ static int ubd_add(int n, char **error_out)
blk_queue_write_cache(ubd_dev->queue, true, false);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
@ -1288,37 +1291,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
*cow_offset += bitmap_offset;
}
static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
unsigned long offset, unsigned long *bitmap,
__u64 bitmap_offset, __u64 bitmap_len)
{
__u64 sector = req->offset >> SECTOR_SHIFT;
__u64 sector = offset >> SECTOR_SHIFT;
int i;
if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT)
panic("Operation too long");
if (req_op(req->req) == REQ_OP_READ) {
for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
ubd_set_bit(i, (unsigned char *)
&req->sector_mask);
&segment->sector_mask);
}
} else {
cowify_bitmap(offset, segment->length, &segment->sector_mask,
&segment->cow_offset, bitmap, bitmap_offset,
segment->bitmap_words, bitmap_len);
}
else cowify_bitmap(req->offset, req->length, &req->sector_mask,
&req->cow_offset, bitmap, bitmap_offset,
req->bitmap_words, bitmap_len);
}
static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
u64 off, struct bio_vec *bvec)
static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
struct request *req)
{
struct ubd *dev = hctx->queue->queuedata;
struct io_thread_req *io_req;
int ret;
struct bio_vec bvec;
struct req_iterator iter;
int i = 0;
unsigned long byte_offset = io_req->offset;
int op = req_op(req);
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
io_req->io_desc[0].buffer = NULL;
io_req->io_desc[0].length = blk_rq_bytes(req);
} else {
rq_for_each_segment(bvec, req, iter) {
BUG_ON(i >= io_req->desc_cnt);
io_req->io_desc[i].buffer =
page_address(bvec.bv_page) + bvec.bv_offset;
io_req->io_desc[i].length = bvec.bv_len;
i++;
}
}
if (dev->cow.file) {
for (i = 0; i < io_req->desc_cnt; i++) {
cowify_req(io_req, &io_req->io_desc[i], byte_offset,
dev->cow.bitmap, dev->cow.bitmap_offset,
dev->cow.bitmap_len);
byte_offset += io_req->io_desc[i].length;
}
}
}
static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
int desc_cnt)
{
struct io_thread_req *io_req;
int i;
io_req = kmalloc(sizeof(*io_req) +
(desc_cnt * sizeof(struct io_desc)),
GFP_ATOMIC);
if (!io_req)
return -ENOMEM;
return NULL;
io_req->req = req;
if (dev->cow.file)
@ -1326,26 +1366,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
else
io_req->fds[0] = dev->fd;
io_req->error = 0;
if (bvec != NULL) {
io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
io_req->length = bvec->bv_len;
} else {
io_req->buffer = NULL;
io_req->length = blk_rq_bytes(req);
}
io_req->sectorsize = SECTOR_SIZE;
io_req->fds[1] = dev->fd;
io_req->cow_offset = -1;
io_req->offset = off;
io_req->sector_mask = 0;
io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset;
if (dev->cow.file)
cowify_req(io_req, dev->cow.bitmap,
dev->cow.bitmap_offset, dev->cow.bitmap_len);
for (i = 0 ; i < desc_cnt; i++) {
io_req->io_desc[i].sector_mask = 0;
io_req->io_desc[i].cow_offset = -1;
}
return io_req;
}
static int ubd_submit_request(struct ubd *dev, struct request *req)
{
int segs = 0;
struct io_thread_req *io_req;
int ret;
int op = req_op(req);
if (op == REQ_OP_FLUSH)
segs = 0;
else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
segs = 1;
else
segs = blk_rq_nr_phys_segments(req);
io_req = ubd_alloc_req(dev, req, segs);
if (!io_req)
return -ENOMEM;
io_req->desc_cnt = segs;
if (segs)
ubd_map_req(dev, io_req, req);
ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
if (ret != sizeof(io_req)) {
@ -1356,22 +1411,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
return ret;
}
static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
{
struct req_iterator iter;
struct bio_vec bvec;
int ret;
u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
rq_for_each_segment(bvec, req, iter) {
ret = ubd_queue_one_vec(hctx, req, off, &bvec);
if (ret < 0)
return ret;
off += bvec.bv_len;
}
return 0;
}
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@ -1384,17 +1423,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&ubd_dev->lock);
switch (req_op(req)) {
/* operations with no lentgth/offset arguments */
case REQ_OP_FLUSH:
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
ret = queue_rw_req(hctx, req);
break;
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL);
ret = ubd_submit_request(ubd_dev, req);
break;
default:
WARN_ON_ONCE(1);
@ -1482,22 +1516,22 @@ static int map_error(int error_code)
* will result in unpredictable behaviour and/or crashes.
*/
static int update_bitmap(struct io_thread_req *req)
static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
{
int n;
if(req->cow_offset == -1)
if (segment->cow_offset == -1)
return map_error(0);
n = os_pwrite_file(req->fds[1], &req->bitmap_words,
sizeof(req->bitmap_words), req->cow_offset);
if (n != sizeof(req->bitmap_words))
n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
sizeof(segment->bitmap_words), segment->cow_offset);
if (n != sizeof(segment->bitmap_words))
return map_error(-n);
return map_error(0);
}
static void do_io(struct io_thread_req *req)
static void do_io(struct io_thread_req *req, struct io_desc *desc)
{
char *buf = NULL;
unsigned long len;
@ -1512,21 +1546,20 @@ static void do_io(struct io_thread_req *req)
return;
}
nsectors = req->length / req->sectorsize;
nsectors = desc->length / req->sectorsize;
start = 0;
do {
bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
end = start;
while((end < nsectors) &&
(ubd_test_bit(end, (unsigned char *)
&req->sector_mask) == bit))
(ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit))
end++;
off = req->offset + req->offsets[bit] +
start * req->sectorsize;
len = (end - start) * req->sectorsize;
if (req->buffer != NULL)
buf = &req->buffer[start * req->sectorsize];
if (desc->buffer != NULL)
buf = &desc->buffer[start * req->sectorsize];
switch (req_op(req->req)) {
case REQ_OP_READ:
@ -1566,7 +1599,8 @@ static void do_io(struct io_thread_req *req)
start = end;
} while(start < nsectors);
req->error = update_bitmap(req);
req->offset += len;
req->error = update_bitmap(req, desc);
}
/* Changed in start_io_thread, which is serialized by being called only
@ -1599,8 +1633,13 @@ int io_thread(void *arg)
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *req = (*io_req_buffer)[count];
int i;
io_count++;
do_io((*io_req_buffer)[count]);
for (i = 0; !req->error && i < req->desc_cnt; i++)
do_io(req, &(req->io_desc[i]));
}
written = 0;

View File

@ -154,6 +154,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
return x86_stepping(best->eax);
}
static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
{
return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
}
static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
{
return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
}
static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;

View File

@ -4233,8 +4233,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
!guest_has_spec_ctrl_msr(vcpu))
return 1;
msr_info->data = svm->spec_ctrl;
@ -4318,16 +4317,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
!guest_has_spec_ctrl_msr(vcpu))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
return 1;
svm->spec_ctrl = data;
if (!data)
break;
@ -4346,18 +4342,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_PRED_CMD:
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
!guest_has_pred_cmd_msr(vcpu))
return 1;
if (data & ~PRED_CMD_IBPB)
return 1;
if (!boot_cpu_has(X86_FEATURE_IBPB))
return 1;
if (!data)
break;
wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
if (is_guest_mode(vcpu))
break;
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
break;
case MSR_AMD64_VIRT_SPEC_CTRL:

View File

@ -1788,7 +1788,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
!guest_has_spec_ctrl_msr(vcpu))
return 1;
msr_info->data = to_vmx(vcpu)->spec_ctrl;
@ -1971,15 +1971,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
!guest_has_spec_ctrl_msr(vcpu))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
return 1;
vmx->spec_ctrl = data;
if (!data)
break;
@ -2001,12 +1999,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_PRED_CMD:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
!guest_has_pred_cmd_msr(vcpu))
return 1;
if (data & ~PRED_CMD_IBPB)
return 1;
if (!boot_cpu_has(X86_FEATURE_IBPB))
return 1;
if (!data)
break;

View File

@ -10369,6 +10369,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
{
uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
/* The STIBP bit doesn't fault even if it's not advertised */
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
!boot_cpu_has(X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
return bits;
}
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);

View File

@ -368,5 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
#endif

View File

@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_SUSPENDING;
spin_unlock_irq(&q->queue_lock);
/*
* Increase the pm_only counter before checking whether any
* non-PM blk_queue_enter() calls are in progress to avoid that any
@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
/* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q);
spin_lock_irq(&q->queue_lock);
if (ret < 0)
if (ret < 0) {
spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
else
q->rpm_status = RPM_SUSPENDING;
spin_unlock_irq(&q->queue_lock);
spin_unlock_irq(&q->queue_lock);
if (ret)
blk_clear_pm_only(q);
}
return ret;
}

View File

@ -2,8 +2,7 @@
#include <linux/vmalloc.h>
#include "null_blk.h"
/* zone_size in MBs to sectors. */
#define ZONE_SIZE_SHIFT 11
#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
@ -12,7 +11,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
int null_zone_init(struct nullb_device *dev)
{
sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
sector_t dev_capacity_sects;
sector_t sector = 0;
unsigned int i;
@ -25,9 +24,12 @@ int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
dev->nr_zones = dev_size >>
(SECTOR_SHIFT + ilog2(dev->zone_size_sects));
dev_capacity_sects = MB_TO_SECTS(dev->size);
dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
if (dev_capacity_sects & (dev->zone_size_sects - 1))
dev->nr_zones++;
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
if (!dev->zones)
@ -55,7 +57,10 @@ int null_zone_init(struct nullb_device *dev)
struct blk_zone *zone = &dev->zones[i];
zone->start = zone->wp = sector;
zone->len = dev->zone_size_sects;
if (zone->start + dev->zone_size_sects > dev_capacity_sects)
zone->len = dev_capacity_sects - zone->start;
else
zone->len = dev->zone_size_sects;
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;

View File

@ -250,8 +250,12 @@ static int h5_close(struct hci_uart *hu)
if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5);
if (!hu->serdev)
kfree(h5);
if (hu->serdev)
serdev_device_close(hu->serdev);
kfree_skb(h5->rx_skb);
kfree(h5);
h5 = NULL;
return 0;
}

View File

@ -1673,9 +1673,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave)
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave) {
put_device(&dmac_pdev->dev);
return NULL;
}
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
@ -1704,8 +1706,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
if (!chan)
if (!chan) {
put_device(&dmac_pdev->dev);
kfree(atslave);
return NULL;
}
atchan = to_at_dma_chan(chan);
atchan->per_if = dma_spec->args[0] & 0xff;

View File

@ -1434,8 +1434,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_connector_update_edid_property(connector,
aconnector->edid);
aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
drm_connector_list_update(connector);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,

View File

@ -2492,7 +2492,7 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = i3c_master_bus_init(master);
if (ret)
goto err_put_dev;
goto err_destroy_wq;
ret = device_add(&master->dev);
if (ret)
@ -2523,6 +2523,9 @@ err_del_dev:
err_cleanup_bus:
i3c_master_bus_cleanup(master);
err_destroy_wq:
destroy_workqueue(master->wq);
err_put_dev:
put_device(&master->dev);

View File

@ -7,6 +7,13 @@
struct bmi160_data {
struct regmap *regmap;
struct iio_trigger *trig;
/*
* Ensure natural alignment for timestamp if present.
* Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts.
* If fewer channels are enabled, less space may be needed, as
* long as the timestamp is still aligned to 8 bytes.
*/
__le16 buf[12] __aligned(8);
};
extern const struct regmap_config bmi160_regmap_config;

View File

@ -411,8 +411,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmi160_data *data = iio_priv(indio_dev);
__le16 buf[12];
/* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
@ -422,10 +420,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
&sample, sizeof(sample));
if (ret)
goto done;
buf[j++] = sample;
data->buf[j++] = sample;
}
iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
iio_push_to_buffers_with_timestamp(indio_dev, data->buf, pf->timestamp);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;

View File

@ -533,6 +533,15 @@ static int verity_verify_io(struct dm_verity_io *io)
return 0;
}
/*
* Skip verity work in response to I/O error when system is shutting down.
*/
static inline bool verity_is_system_shutting_down(void)
{
return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
|| system_state == SYSTEM_RESTART;
}
/*
* End one "io" structure with a given error.
*/
@ -560,7 +569,8 @@ static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status);
return;
}

View File

@ -1145,7 +1145,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
if (r10_bio->devs[slot].rdev) {
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
* This is an error retry, but we cannot
* safely dereference the rdev in the r10_bio,
@ -1510,6 +1510,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
r10_bio->read_slot = -1;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
if (bio_data_dir(bio) == READ)

View File

@ -182,7 +182,7 @@ out_rel_fw:
static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 status, buf;
u8 status = 0, buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
if (onoff) {

View File

@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
return VMCI_ERROR_MORE_DATA;
}
dbells = kmalloc(data_size, GFP_ATOMIC);
dbells = kzalloc(data_size, GFP_ATOMIC);
if (!dbells)
return VMCI_ERROR_NO_MEM;

View File

@ -317,10 +317,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
buf += ret;
}
if (req->ooblen)
memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
req->ooblen);
return 0;
}

View File

@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",

View File

@ -379,8 +379,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc))
return PTR_ERR(ldata->rtc);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
goto out;
}
ldata->rtc->ops = ops;

View File

@ -276,7 +276,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
300000000);
if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n");
return;
goto err;
}
parents[0] = clk_hw_get_name(rtc->int_osc);
@ -292,7 +292,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n");
return;
goto err_register;
}
of_property_read_string_index(node, "clock-output-names", 1,
@ -303,7 +303,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
return;
goto err_register;
}
clk_data->num = 2;
@ -316,6 +316,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
err_register:
clk_hw_unregister_fixed_rate(rtc->int_osc);
err:
kfree(clk_data);
}

View File

@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
depends on ETHERNET
depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB

View File

@ -320,6 +320,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
int ret;
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_cdev->max_level))
@ -329,10 +330,12 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
if (cpufreq_cdev->cpufreq_state == state)
return 0;
cpufreq_cdev->cpufreq_state = state;
ret = freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency);
if (ret > 0)
cpufreq_cdev->cpufreq_state = state;
return freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency);
return ret;
}
/**

View File

@ -114,8 +114,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
int bar;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
res = vdev->pdev->resource + bar;
@ -1606,6 +1604,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);

View File

@ -351,7 +351,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1;
if (info->si_lasti == BFS_MAX_LASTI)
printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
else if (info->si_lasti > BFS_MAX_LASTI) {
printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id);
goto out1;

View File

@ -1256,6 +1256,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
u64 page_end;
u64 page_cnt;
u64 start = (u64)start_index << PAGE_SHIFT;
u64 search_start;
int ret;
int i;
int i_done;
@ -1352,6 +1353,40 @@ again:
lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state);
/*
* When defragmenting we skip ranges that have holes or inline extents,
* (check should_defrag_range()), to avoid unnecessary IO and wasting
* space. At btrfs_defrag_file(), we check if a range should be defragged
* before locking the inode and then, if it should, we trigger a sync
* page cache readahead - we lock the inode only after that to avoid
* blocking for too long other tasks that possibly want to operate on
* other file ranges. But before we were able to get the inode lock,
* some other task may have punched a hole in the range, or we may have
* now an inline extent, in which case we should not defrag. So check
* for that here, where we have the inode and the range locked, and bail
* out if that happened.
*/
search_start = page_start;
while (search_start < page_end) {
struct extent_map *em;
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
page_end - search_start, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_unlock_range;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
/* Ok, 0 means we did not defrag anything */
ret = 0;
goto out_unlock_range;
}
search_start = extent_map_end(em);
free_extent_map(em);
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 0, 0, &cached_state);
@ -1382,6 +1417,10 @@ again:
btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
out_unlock_range:
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state);
out:
for (i = 0; i < i_done; i++) {
unlock_page(pages[i]);

View File

@ -23,6 +23,9 @@
#define FSCRYPT_CONTEXT_V1 1
#define FSCRYPT_CONTEXT_V2 2
/* Keep this in sync with include/uapi/linux/fscrypt.h */
#define FSCRYPT_MODE_MAX FSCRYPT_MODE_ADIANTUM
struct fscrypt_context_v1 {
u8 version; /* FSCRYPT_CONTEXT_V1 */
u8 contents_encryption_mode;
@ -387,7 +390,7 @@ struct fscrypt_master_key {
spinlock_t mk_decrypted_inodes_lock;
/* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */
struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1];
struct crypto_skcipher *mk_mode_keys[FSCRYPT_MODE_MAX + 1];
} __randomize_layout;

View File

@ -58,8 +58,8 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
if (err)
return err;
/* ... in case we looked up ciphertext name before key was added */
if (dentry->d_flags & DCACHE_ENCRYPTED_NAME)
/* ... in case we looked up no-key name before key was added */
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
if (!fscrypt_has_permitted_context(dir, inode))
@ -83,9 +83,9 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
return err;
/* ... in case we looked up ciphertext name(s) before key was added */
if ((old_dentry->d_flags | new_dentry->d_flags) &
DCACHE_ENCRYPTED_NAME)
/* ... in case we looked up no-key name(s) before key was added */
if (fscrypt_is_nokey_name(old_dentry) ||
fscrypt_is_nokey_name(new_dentry))
return -ENOKEY;
if (old_dir != new_dir) {

View File

@ -55,6 +55,8 @@ static struct fscrypt_mode *
select_encryption_mode(const union fscrypt_policy *policy,
const struct inode *inode)
{
BUILD_BUG_ON(ARRAY_SIZE(available_modes) != FSCRYPT_MODE_MAX + 1);
if (S_ISREG(inode->i_mode))
return &available_modes[fscrypt_policy_contents_mode(policy)];

View File

@ -55,7 +55,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
@ -76,7 +77,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);

View File

@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(read_code);
/*
* Maps the mm_struct mm into the current task struct.
* On success, this function returns with the mutex
* exec_update_mutex locked.
* On success, this function returns with exec_update_lock
* held for writing.
*/
static int exec_mmap(struct mm_struct *mm)
{
@ -1023,7 +1023,7 @@ static int exec_mmap(struct mm_struct *mm)
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
ret = down_write_killable(&tsk->signal->exec_update_lock);
if (ret)
return ret;
@ -1038,7 +1038,7 @@ static int exec_mmap(struct mm_struct *mm)
down_read(&old_mm->mmap_sem);
if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem);
mutex_unlock(&tsk->signal->exec_update_mutex);
up_write(&tsk->signal->exec_update_lock);
return -EINTR;
}
}
@ -1450,7 +1450,7 @@ static void free_bprm(struct linux_binprm *bprm)
free_arg_pages(bprm);
if (bprm->cred) {
if (bprm->called_exec_mmap)
mutex_unlock(&current->signal->exec_update_mutex);
up_write(&current->signal->exec_update_lock);
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@ -1500,7 +1500,7 @@ void install_exec_creds(struct linux_binprm *bprm)
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
mutex_unlock(&current->signal->exec_update_mutex);
up_write(&current->signal->exec_update_lock);
mutex_unlock(&current->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(install_exec_creds);

View File

@ -2192,6 +2192,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (!dentry->d_name.len)
return -EINVAL;
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
#ifdef CONFIG_UNICODE
if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) &&
sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name))

View File

@ -455,19 +455,17 @@ static bool system_going_down(void)
static void ext4_handle_error(struct super_block *sb)
{
journal_t *journal = EXT4_SB(sb)->s_journal;
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
if (sb_rdonly(sb))
if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
return;
if (!test_opt(sb, ERRORS_CONT)) {
journal_t *journal = EXT4_SB(sb)->s_journal;
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (journal)
jbd2_journal_abort(journal, -EIO);
}
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (journal)
jbd2_journal_abort(journal, -EIO);
/*
* We force ERRORS_RO behavior when system is rebooting. Otherwise we
* could panic during 'reboot -f' as the underlying device got already

View File

@ -1596,7 +1596,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
goto out;
}
if (NM_I(sbi)->dirty_nat_cnt == 0 &&
if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
SIT_I(sbi)->dirty_sentries == 0 &&
prefree_segments(sbi) == 0) {
f2fs_flush_sit_entries(sbi, cpc);

View File

@ -107,8 +107,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->node_pages = NODE_MAPPING(sbi)->nrpages;
if (sbi->meta_inode)
si->meta_pages = META_MAPPING(sbi)->nrpages;
si->nats = NM_I(sbi)->nat_cnt;
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
@ -254,9 +254,10 @@ get_cache:
si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] *
sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
for (i = 0; i < MAX_INO_ENTRY; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);

View File

@ -797,6 +797,13 @@ enum nid_state {
MAX_NID_STATE,
};
enum nat_state {
TOTAL_NAT,
DIRTY_NAT,
RECLAIMABLE_NAT,
MAX_NAT_STATE,
};
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
@ -812,8 +819,7 @@ struct f2fs_nm_info {
struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
unsigned int nat_blocks; /* # of nat blocks */
/* free node ids management */
@ -2998,6 +3004,8 @@ bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
inode, inode->i_ino, inode->i_mode);
}

View File

@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
PAGE_SHIFT;
mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
sizeof(struct nat_entry)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
if (excess_cached_nats(sbi))
res = false;
@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
list_add_tail(&ne->list, &nm_i->nat_entries);
spin_unlock(&nm_i->nat_list_lock);
nm_i->nat_cnt++;
nm_i->nat_cnt[TOTAL_NAT]++;
nm_i->nat_cnt[RECLAIMABLE_NAT]++;
return ne;
}
@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i->nat_cnt--;
nm_i->nat_cnt[TOTAL_NAT]--;
nm_i->nat_cnt[RECLAIMABLE_NAT]--;
__free_nat_entry(e);
}
@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
if (get_nat_flag(ne, IS_DIRTY))
goto refresh_list;
nm_i->dirty_nat_cnt++;
nm_i->nat_cnt[DIRTY_NAT]++;
nm_i->nat_cnt[RECLAIMABLE_NAT]--;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
spin_lock(&nm_i->nat_list_lock);
@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
set_nat_flag(ne, IS_DIRTY, false);
set->entry_cnt--;
nm_i->dirty_nat_cnt--;
nm_i->nat_cnt[DIRTY_NAT]--;
nm_i->nat_cnt[RECLAIMABLE_NAT]++;
}
static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
@ -2881,14 +2885,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
LIST_HEAD(sets);
int err = 0;
/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
/*
* during unmount, let's flush nat_bits before checking
* nat_cnt[DIRTY_NAT].
*/
if (enabled_nat_bits(sbi, cpc)) {
down_write(&nm_i->nat_tree_lock);
remove_nats_in_journal(sbi);
up_write(&nm_i->nat_tree_lock);
}
if (!nm_i->dirty_nat_cnt)
if (!nm_i->nat_cnt[DIRTY_NAT])
return 0;
down_write(&nm_i->nat_tree_lock);
@ -2899,7 +2906,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* into nat entry set.
*/
if (enabled_nat_bits(sbi, cpc) ||
!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
!__has_cursum_space(journal,
nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
remove_nats_in_journal(sbi);
while ((found = __gang_lookup_nat_set(nm_i,
@ -3023,7 +3031,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
F2FS_RESERVED_NODE_NUM;
nm_i->nid_cnt[FREE_NID] = 0;
nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
@ -3160,7 +3167,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
__del_from_nat_cache(nm_i, natvec[idx]);
}
}
f2fs_bug_on(sbi, nm_i->nat_cnt);
f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
/* destroy nat set cache */
nid = 0;

View File

@ -123,13 +123,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
{
return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
NM_I(sbi)->dirty_nats_ratio / 100;
}
static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
{
return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
}
static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)

View File

@ -18,9 +18,7 @@ static unsigned int shrinker_run_no;
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
return count > 0 ? count : 0;
return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
}
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)

View File

@ -2523,7 +2523,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
block_t total_sections, blocks_per_seg;
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
(bh->b_data + F2FS_SUPER_OFFSET);
unsigned int blocksize;
size_t crc_offset = 0;
__u32 crc = 0;
@ -2557,10 +2556,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
}
/* Currently, support only 4KB block size */
blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
if (blocksize != F2FS_BLKSIZE) {
f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
blocksize);
if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
le32_to_cpu(raw_super->log_blocksize),
F2FS_BLKSIZE_BITS);
return -EFSCORRUPTED;
}

View File

@ -779,9 +779,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
{
struct task_struct *p;
enum pid_type type;
unsigned long flags;
struct pid *pid;
read_lock(&fown->lock);
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
@ -802,7 +803,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
read_unlock(&tasklist_lock);
}
out_unlock_fown:
read_unlock(&fown->lock);
read_unlock_irqrestore(&fown->lock, flags);
}
static void send_sigurg_to_task(struct task_struct *p,
@ -817,9 +818,10 @@ int send_sigurg(struct fown_struct *fown)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
unsigned long flags;
int ret = 0;
read_lock(&fown->lock);
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
@ -842,7 +844,7 @@ int send_sigurg(struct fown_struct *fown)
read_unlock(&tasklist_lock);
}
out_unlock_fown:
read_unlock(&fown->lock);
read_unlock_irqrestore(&fown->lock, flags);
return ret;
}

View File

@ -19,6 +19,9 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type)
void *value = NULL;
struct posix_acl *acl;
if (fuse_is_bad(inode))
return ERR_PTR(-EIO);
if (!fc->posix_acl || fc->no_getxattr)
return NULL;
@ -53,6 +56,9 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type)
const char *name;
int ret;
if (fuse_is_bad(inode))
return -EIO;
if (!fc->posix_acl || fc->no_setxattr)
return -EOPNOTSUPP;

View File

@ -201,7 +201,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
int ret;
inode = d_inode_rcu(entry);
if (inode && is_bad_inode(inode))
if (inode && fuse_is_bad(inode))
goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
(flags & LOOKUP_REVAL)) {
@ -386,6 +386,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
bool outarg_valid = true;
bool locked;
if (fuse_is_bad(dir))
return ERR_PTR(-EIO);
locked = fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
@ -529,6 +532,9 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
if (fuse_is_bad(dir))
return -EIO;
if (d_in_lookup(entry)) {
res = fuse_lookup(dir, entry, 0);
if (IS_ERR(res))
@ -577,6 +583,9 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
int err;
struct fuse_forget_link *forget;
if (fuse_is_bad(dir))
return -EIO;
forget = fuse_alloc_forget();
if (!forget)
return -ENOMEM;
@ -704,6 +713,9 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
if (fuse_is_bad(dir))
return -EIO;
args.opcode = FUSE_UNLINK;
args.nodeid = get_node_id(dir);
args.in_numargs = 1;
@ -740,6 +752,9 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
if (fuse_is_bad(dir))
return -EIO;
args.opcode = FUSE_RMDIR;
args.nodeid = get_node_id(dir);
args.in_numargs = 1;
@ -818,6 +833,9 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
struct fuse_conn *fc = get_fuse_conn(olddir);
int err;
if (fuse_is_bad(olddir))
return -EIO;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
@ -953,7 +971,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
if (!err) {
if (fuse_invalid_attr(&outarg.attr) ||
(inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
fuse_make_bad(inode);
err = -EIO;
} else {
fuse_change_attributes(inode, &outarg.attr,
@ -1155,6 +1173,9 @@ static int fuse_permission(struct inode *inode, int mask)
bool refreshed = false;
int err = 0;
if (fuse_is_bad(inode))
return -EIO;
if (!fuse_allow_current_process(fc))
return -EACCES;
@ -1250,7 +1271,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
int err;
err = -EIO;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
goto out_err;
if (fc->cache_symlinks)
@ -1298,7 +1319,7 @@ static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
return -EIO;
if (fc->no_fsyncdir)
@ -1575,7 +1596,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (fuse_invalid_attr(&outarg.attr) ||
(inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
fuse_make_bad(inode);
err = -EIO;
goto error;
}
@ -1631,6 +1652,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
int ret;
if (fuse_is_bad(inode))
return -EIO;
if (!fuse_allow_current_process(get_fuse_conn(inode)))
return -EACCES;
@ -1689,6 +1713,9 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
struct inode *inode = d_inode(path->dentry);
struct fuse_conn *fc = get_fuse_conn(inode);
if (fuse_is_bad(inode))
return -EIO;
if (!fuse_allow_current_process(fc))
return -EACCES;

View File

@ -222,6 +222,9 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
fc->atomic_o_trunc &&
fc->writeback_cache;
if (fuse_is_bad(inode))
return -EIO;
err = generic_file_open(inode, file);
if (err)
return err;
@ -443,7 +446,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
FUSE_ARGS(args);
int err;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
return -EIO;
if (fc->no_flush)
@ -506,7 +509,7 @@ static int fuse_fsync(struct file *file, loff_t start, loff_t end,
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
return -EIO;
inode_lock(inode);
@ -830,7 +833,7 @@ static int fuse_readpage(struct file *file, struct page *page)
int err;
err = -EIO;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
goto out;
err = fuse_do_readpage(file, page);
@ -973,7 +976,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
int err;
err = -EIO;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
goto out;
data.file = file;
@ -1569,7 +1572,7 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
if (is_bad_inode(file_inode(file)))
if (fuse_is_bad(file_inode(file)))
return -EIO;
if (!(ff->open_flags & FOPEN_DIRECT_IO))
@ -1583,7 +1586,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
if (is_bad_inode(file_inode(file)))
if (fuse_is_bad(file_inode(file)))
return -EIO;
if (!(ff->open_flags & FOPEN_DIRECT_IO))
@ -2133,7 +2136,7 @@ static int fuse_writepages(struct address_space *mapping,
int err;
err = -EIO;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
goto out;
data.inode = inode;
@ -2911,7 +2914,7 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd,
if (!fuse_allow_current_process(fc))
return -EACCES;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
return -EIO;
return fuse_do_ioctl(file, cmd, arg, flags);

View File

@ -158,6 +158,8 @@ enum {
FUSE_I_INIT_RDPLUS,
/** An operation changing file size is in progress */
FUSE_I_SIZE_UNSTABLE,
/* Bad inode */
FUSE_I_BAD,
};
struct fuse_conn;
@ -787,6 +789,16 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
return atomic64_read(&fc->attr_version);
}
static inline void fuse_make_bad(struct inode *inode)
{
set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
}
static inline bool fuse_is_bad(struct inode *inode)
{
return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
}
/** Device operations */
extern const struct file_operations fuse_dev_operations;

View File

@ -115,7 +115,7 @@ static void fuse_evict_inode(struct inode *inode)
fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
fi->forget = NULL;
}
if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
@ -306,7 +306,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
unlock_new_inode(inode);
} else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
/* Inode has changed type, any I/O on the old should fail */
make_bad_inode(inode);
fuse_make_bad(inode);
iput(inode);
goto retry;
}

View File

@ -207,7 +207,7 @@ retry:
dput(dentry);
goto retry;
}
if (is_bad_inode(inode)) {
if (fuse_is_bad(inode)) {
dput(dentry);
return -EIO;
}
@ -568,7 +568,7 @@ int fuse_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
int err;
if (is_bad_inode(inode))
if (fuse_is_bad(inode))
return -EIO;
mutex_lock(&ff->readdir.lock);

View File

@ -113,6 +113,9 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
struct fuse_getxattr_out outarg;
ssize_t ret;
if (fuse_is_bad(inode))
return -EIO;
if (!fuse_allow_current_process(fc))
return -EACCES;
@ -178,6 +181,9 @@ static int fuse_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
{
if (fuse_is_bad(inode))
return -EIO;
return fuse_getxattr(inode, name, value, size);
}
@ -186,6 +192,9 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
const char *name, const void *value, size_t size,
int flags)
{
if (fuse_is_bad(inode))
return -EIO;
if (!value)
return fuse_removexattr(inode, name);

View File

@ -38,6 +38,7 @@ struct jffs2_mount_opts {
* users. This is implemented simply by means of not allowing the
* latter users to write to the file system if the amount if the
* available space is less then 'rp_size'. */
bool set_rp_size;
unsigned int rp_size;
};

View File

@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
if (opts->override_compr)
seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
if (opts->rp_size)
if (opts->set_rp_size)
seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
return 0;
@ -208,11 +208,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_rp_size:
if (result.uint_32 > UINT_MAX / 1024)
return invalf(fc, "jffs2: rp_size unrepresentable");
opt = result.uint_32 * 1024;
if (opt > c->mtd->size)
return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
c->mtd->size / 1024);
c->mount_opts.rp_size = opt;
c->mount_opts.rp_size = result.uint_32 * 1024;
c->mount_opts.set_rp_size = true;
break;
default:
return -EINVAL;
@ -231,8 +228,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc)
c->mount_opts.override_compr = new_c->mount_opts.override_compr;
c->mount_opts.compr = new_c->mount_opts.compr;
}
if (new_c->mount_opts.rp_size)
if (new_c->mount_opts.set_rp_size) {
c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
c->mount_opts.rp_size = new_c->mount_opts.rp_size;
}
mutex_unlock(&c->alloc_sem);
}
@ -272,6 +271,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
c->mtd = sb->s_mtd;
c->os_priv = sb;
if (c->mount_opts.rp_size > c->mtd->size)
return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
c->mtd->size / 1024);
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
mutex_init(&c->alloc_sem);

View File

@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n)
/*
* vfsmount lock must be held for write
*/
unsigned int mnt_get_count(struct mount *mnt)
int mnt_get_count(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int count = 0;
int cpu;
for_each_possible_cpu(cpu) {
@ -1123,6 +1123,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
LIST_HEAD(list);
int count;
rcu_read_lock();
if (likely(READ_ONCE(mnt->mnt_ns))) {
@ -1146,7 +1147,9 @@ static void mntput_no_expire(struct mount *mnt)
*/
smp_mb();
mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
count = mnt_get_count(mnt);
if (count != 0) {
WARN_ON(count < 0);
rcu_read_unlock();
unlock_mount_hash();
return;

View File

@ -96,7 +96,7 @@ static void nfs4_evict_inode(struct inode *inode)
nfs_inode_return_delegation_noreclaim(inode);
/* Note that above delegreturn would trigger pnfs return-on-close */
pnfs_return_layout(inode);
pnfs_destroy_layout(NFS_I(inode));
pnfs_destroy_layout_final(NFS_I(inode));
/* First call standard NFS clear_inode() code */
nfs_clear_inode(inode);
}

View File

@ -294,6 +294,7 @@ void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct inode *inode;
unsigned long i_state;
if (!lo)
return;
@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
i_state = inode->i_state;
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
/* Notify pnfs_destroy_layout_final() that we're done */
if (i_state & (I_FREEING | I_CLEAR))
wake_up_var(lo);
}
}
@ -723,8 +728,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
}
}
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo;
LIST_HEAD(tmp_list);
@ -742,9 +746,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_put_layout_hdr(lo);
} else
spin_unlock(&nfsi->vfs_inode.i_lock);
return lo;
}
void pnfs_destroy_layout(struct nfs_inode *nfsi)
{
__pnfs_destroy_layout(nfsi);
}
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
static bool pnfs_layout_removed(struct nfs_inode *nfsi,
struct pnfs_layout_hdr *lo)
{
bool ret;
spin_lock(&nfsi->vfs_inode.i_lock);
ret = nfsi->layout != lo;
spin_unlock(&nfsi->vfs_inode.i_lock);
return ret;
}
void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
if (lo)
wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
}
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
struct list_head *layout_list)

View File

@ -255,6 +255,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_layout_final(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
struct nfs_fsid *fsid,
@ -651,6 +652,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
{
}
static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
}
static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{

View File

@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int);
void propagate_mount_unlock(struct mount *);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt);
int mnt_get_count(struct mount *mnt);
void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *);
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,

View File

@ -403,11 +403,11 @@ print0:
static int lock_trace(struct task_struct *task)
{
int err = mutex_lock_killable(&task->signal->exec_update_mutex);
int err = down_read_killable(&task->signal->exec_update_lock);
if (err)
return err;
if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
mutex_unlock(&task->signal->exec_update_mutex);
up_read(&task->signal->exec_update_lock);
return -EPERM;
}
return 0;
@ -415,7 +415,7 @@ static int lock_trace(struct task_struct *task)
static void unlock_trace(struct task_struct *task)
{
mutex_unlock(&task->signal->exec_update_mutex);
up_read(&task->signal->exec_update_lock);
}
#ifdef CONFIG_STACKTRACE
@ -2769,7 +2769,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
unsigned long flags;
int result;
result = mutex_lock_killable(&task->signal->exec_update_mutex);
result = down_read_killable(&task->signal->exec_update_lock);
if (result)
return result;
@ -2805,7 +2805,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
result = 0;
out_unlock:
mutex_unlock(&task->signal->exec_update_mutex);
up_read(&task->signal->exec_update_lock);
return result;
}

View File

@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
memset(buf, 0, info->dqi_usable_bs);
return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
}
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
ssize_t ret;
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
if (ret != info->dqi_usable_bs) {
quota_error(sb, "dquota write failed");
if (ret >= 0)
@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk);
goto out_buf;
}
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
kfree(buf);
@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
ret = -EIO;
goto out_buf;
} else {
ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:

View File

@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
"(second one): %h", ih);
return 0;
}
if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
reiserfs_warning(NULL, "reiserfs-5093",
"item entry count seems wrong %h",
ih);
return 0;
}
prev_location = ih_location(ih);
}

View File

@ -278,6 +278,15 @@ done:
return d_splice_alias(inode, dentry);
}
static int ubifs_prepare_create(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *nm)
{
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
return fscrypt_setup_filename(dir, &dentry->d_name, 0, nm);
}
static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
@ -301,7 +310,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (err)
return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;
@ -961,7 +970,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (err)
return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;
@ -1046,7 +1055,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
return err;
}
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
err = ubifs_prepare_create(dir, dentry, &nm);
if (err) {
kfree(dev);
goto out_budg;
@ -1130,7 +1139,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
if (err)
return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;

View File

@ -100,6 +100,35 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
}
/**
* fscrypt_is_nokey_name() - test whether a dentry is a no-key name
* @dentry: the dentry to check
*
* This returns true if the dentry is a no-key dentry. A no-key dentry is a
* dentry that was created in an encrypted directory that hasn't had its
* encryption key added yet. Such dentries may be either positive or negative.
*
* When a filesystem is asked to create a new filename in an encrypted directory
* and the new filename's dentry is a no-key dentry, it must fail the operation
* with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
* ->rename(), and ->link(). (However, ->rename() and ->link() are already
* handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
*
* This is necessary because creating a filename requires the directory's
* encryption key, but just checking for the key on the directory inode during
* the final filesystem operation doesn't guarantee that the key was available
* during the preceding dentry lookup. And the key must have already been
* available during the dentry lookup in order for it to have been checked
* whether the filename already exists in the directory and for the new file's
* dentry not to be invalidated due to it incorrectly having the no-key flag.
*
* Return: %true if the dentry is a no-key name
*/
static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_ENCRYPTED_NAME;
}
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
@ -290,6 +319,11 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
}
static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
{
return false;
}
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{

View File

@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
static inline bool old_valid_dev(dev_t dev)
static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
static inline u16 old_encode_dev(dev_t dev)
static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
static inline dev_t old_decode_dev(u16 val)
static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
static inline u32 new_encode_dev(dev_t dev)
static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
static inline dev_t new_decode_dev(u32 dev)
static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
static inline u64 huge_encode_dev(dev_t dev)
static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
static inline dev_t huge_decode_dev(u64 dev)
static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
static inline int sysv_valid_dev(dev_t dev)
static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
static inline u32 sysv_encode_dev(dev_t dev)
static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
static inline unsigned sysv_major(u32 dev)
static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
static inline unsigned sysv_minor(u32 dev)
static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}

View File

@ -1288,6 +1288,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \
__aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else

View File

@ -125,6 +125,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
@ -173,6 +174,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
@ -193,6 +195,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)

View File

@ -226,12 +226,13 @@ struct signal_struct {
* credential calculations
* (notably. ptrace)
* Deprecated do not use in new code.
* Use exec_update_mutex instead.
*/
struct mutex exec_update_mutex; /* Held while task_struct is being
* updated during exec, and may have
* inconsistent permissions.
* Use exec_update_lock instead.
*/
struct rw_semaphore exec_update_lock; /* Held while task_struct is
* being updated during exec,
* and may have inconsistent
* permissions.
*/
} __randomize_layout;
/*

View File

@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_CONST_H */

View File

@ -14,7 +14,7 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/types.h>
#include <linux/if_ether.h>

View File

@ -17,7 +17,6 @@
#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
#define FSCRYPT_POLICY_FLAGS_VALID 0x07
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
@ -25,7 +24,7 @@
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
#define FSCRYPT_MODE_ADIANTUM 9
#define __FSCRYPT_MODE_MAX 9
/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */
/*
* Legacy policy version; ad-hoc KDF and no key verification.
@ -162,7 +161,7 @@ struct fscrypt_get_key_status_arg {
#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */
#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */

View File

@ -3,13 +3,6 @@
#define _UAPI_LINUX_KERNEL_H
#include <linux/sysinfo.h>
/*
* 'kernel.h' contains some often-used function prototypes etc
*/
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#include <linux/const.h>
#endif /* _UAPI_LINUX_KERNEL_H */

View File

@ -21,7 +21,7 @@
#define _UAPI_LINUX_LIGHTNVM_H
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/ioctl.h>
#else /* __KERNEL__ */
#include <stdio.h>

View File

@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/in6.h> /* For struct sockaddr_in6. */

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_X_TABLES_H
#define _UAPI_X_TABLES_H
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30

View File

@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_NETLINK_H
#define _UAPI__LINUX_NETLINK_H
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>

View File

@ -23,7 +23,7 @@
#ifndef _UAPI_LINUX_SYSCTL_H
#define _UAPI_LINUX_SYSCTL_H
#include <linux/kernel.h>
#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>

View File

@ -26,7 +26,7 @@ static struct signal_struct init_signals = {
.multiprocess = HLIST_HEAD_INIT,
.rlim = INIT_RLIMITS,
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
.exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex),
.exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
#ifdef CONFIG_POSIX_TIMERS
.posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
.cputimer = {

View File

@ -914,6 +914,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) {
if (strcmp(param->key, "source") == 0) {
if (fc->source)
return invalf(fc, "Multiple sources not supported");
fc->source = param->string;
param->string = NULL;
return 0;

View File

@ -1254,7 +1254,7 @@ static void put_ctx(struct perf_event_context *ctx)
* function.
*
* Lock order:
* exec_update_mutex
* exec_update_lock
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
@ -11001,24 +11001,6 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_task;
}
if (task) {
err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
if (err)
goto err_task;
/*
* Reuse ptrace permission checks for now.
*
* We must hold exec_update_mutex across this and any potential
* perf_install_in_context() call for this new event to
* serialize against exec() altering our credentials (and the
* perf_event_exit_task() that could imply).
*/
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto err_cred;
}
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
@ -11026,7 +11008,7 @@ SYSCALL_DEFINE5(perf_event_open,
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cred;
goto err_task;
}
if (is_sampling_event(event)) {
@ -11145,6 +11127,24 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
}
if (task) {
err = down_read_interruptible(&task->signal->exec_update_lock);
if (err)
goto err_file;
/*
* Preserve ptrace permission check for backwards compatibility.
*
* We must hold exec_update_lock across this and any potential
* perf_install_in_context() call for this new event to
* serialize against exec() altering our credentials (and the
* perf_event_exit_task() that could imply).
*/
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto err_cred;
}
if (move_group) {
gctx = __perf_event_ctx_lock_double(group_leader, ctx);
@ -11298,7 +11298,7 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&ctx->mutex);
if (task) {
mutex_unlock(&task->signal->exec_update_mutex);
up_read(&task->signal->exec_update_lock);
put_task_struct(task);
}
@ -11320,7 +11320,10 @@ err_locked:
if (move_group)
perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
err_cred:
if (task)
up_read(&task->signal->exec_update_lock);
err_file:
fput(event_file);
err_context:
perf_unpin_context(ctx);
@ -11332,9 +11335,6 @@ err_alloc:
*/
if (!event_file)
free_event(event);
err_cred:
if (task)
mutex_unlock(&task->signal->exec_update_mutex);
err_task:
if (task)
put_task_struct(task);
@ -11639,7 +11639,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
/*
* When a child task exits, feed back event values to parent events.
*
* Can be called with exec_update_mutex held when called from
* Can be called with exec_update_lock held when called from
* install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)

View File

@ -1221,7 +1221,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
struct mm_struct *mm;
int err;
err = mutex_lock_killable(&task->signal->exec_update_mutex);
err = down_read_killable(&task->signal->exec_update_lock);
if (err)
return ERR_PTR(err);
@ -1231,7 +1231,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mmput(mm);
mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->exec_update_mutex);
up_read(&task->signal->exec_update_lock);
return mm;
}
@ -1586,7 +1586,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
mutex_init(&sig->cred_guard_mutex);
mutex_init(&sig->exec_update_mutex);
init_rwsem(&sig->exec_update_lock);
return 0;
}

View File

@ -75,25 +75,25 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx)
return file;
}
static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
{
if (likely(m2 != m1))
mutex_unlock(m2);
mutex_unlock(m1);
if (likely(l2 != l1))
up_read(l2);
up_read(l1);
}
static int kcmp_lock(struct mutex *m1, struct mutex *m2)
static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
{
int err;
if (m2 > m1)
swap(m1, m2);
if (l2 > l1)
swap(l1, l2);
err = mutex_lock_killable(m1);
if (!err && likely(m1 != m2)) {
err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
err = down_read_killable(l1);
if (!err && likely(l1 != l2)) {
err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
if (err)
mutex_unlock(m1);
up_read(l1);
}
return err;
@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
/*
* One should have enough rights to inspect task details.
*/
ret = kcmp_lock(&task1->signal->exec_update_mutex,
&task2->signal->exec_update_mutex);
ret = kcmp_lock(&task1->signal->exec_update_lock,
&task2->signal->exec_update_lock);
if (ret)
goto err;
if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
}
err_unlock:
kcmp_unlock(&task1->signal->exec_update_mutex,
&task2->signal->exec_update_mutex);
kcmp_unlock(&task1->signal->exec_update_lock,
&task2->signal->exec_update_lock);
err:
put_task_struct(task1);
put_task_struct(task2);

View File

@ -1348,6 +1348,18 @@ inline void __down_read(struct rw_semaphore *sem)
}
}
static inline int __down_read_interruptible(struct rw_semaphore *sem)
{
if (!rwsem_read_trylock(sem)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
return 0;
}
static inline int __down_read_killable(struct rw_semaphore *sem)
{
if (!rwsem_read_trylock(sem)) {
@ -1498,6 +1510,20 @@ void __sched down_read(struct rw_semaphore *sem)
}
EXPORT_SYMBOL(down_read);
int __sched down_read_interruptible(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
rwsem_release(&sem->dep_map, 1, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_read_interruptible);
int __sched down_read_killable(struct rw_semaphore *sem)
{
might_sleep();
@ -1608,6 +1634,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
}
EXPORT_SYMBOL(down_read_nested);
int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
rwsem_release(&sem->dep_map, 1, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_read_killable_nested);
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{
might_sleep();

View File

@ -1863,7 +1863,6 @@ static int mod_sysfs_init(struct module *mod)
if (err)
mod_kobject_put(mod);
/* delay uevent until full sysfs population */
out:
return err;
}
@ -1900,7 +1899,6 @@ static int mod_sysfs_setup(struct module *mod,
add_sect_attrs(mod, info);
add_notes_attrs(mod, info);
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
return 0;
out_unreg_modinfo_attrs:
@ -3608,6 +3606,9 @@ static noinline int do_init_module(struct module *mod)
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
/* Delay uevent until module has finished its init routine */
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
/*
* We need to finish all async code before the module init sequence
* is done. This has potential to deadlock. For example, a newly
@ -3953,6 +3954,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
bug_cleanup:
mod->state = MODULE_STATE_GOING;
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
module_bug_cleanup(mod);

View File

@ -916,13 +916,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
*/
if (tick_do_timer_cpu == cpu)
return false;
/*
* Boot safety: make sure the timekeeping duty has been
* assigned before entering dyntick-idle mode,
* tick_do_timer_cpu is TICK_DO_TIMER_BOOT
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
return false;
/* Should not happen for nohz-full */
if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))

View File

@ -1597,6 +1597,21 @@ free_sched:
return err;
}
static void taprio_reset(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int i;
hrtimer_cancel(&q->advance_timer);
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
qdisc_reset(q->qdiscs[i]);
}
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static void taprio_destroy(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
@ -1607,7 +1622,6 @@ static void taprio_destroy(struct Qdisc *sch)
list_del(&q->taprio_list);
spin_unlock(&taprio_list_lock);
hrtimer_cancel(&q->advance_timer);
taprio_disable_offload(dev, q, NULL);
@ -1954,6 +1968,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.init = taprio_init,
.change = taprio_change,
.destroy = taprio_destroy,
.reset = taprio_reset,
.peek = taprio_peek,
.dequeue = taprio_dequeue,
.enqueue = taprio_enqueue,

View File

@ -717,8 +717,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
runtime->boundary *= 2;
/* clear the buffer for avoiding possible kernel info leaks */
if (runtime->dma_area && !substream->ops->copy_user)
memset(runtime->dma_area, 0, runtime->dma_bytes);
if (runtime->dma_area && !substream->ops->copy_user) {
size_t size = runtime->dma_bytes;
if (runtime->info & SNDRV_PCM_INFO_MMAP)
size = PAGE_ALIGN(size);
memset(runtime->dma_area, 0, size);
}
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);

View File

@ -72,11 +72,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
}
}
static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
{
return runtime->avail >= runtime->avail_min;
}
static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long flags;
bool ready;
return runtime->avail >= runtime->avail_min;
spin_lock_irqsave(&runtime->lock, flags);
ready = __snd_rawmidi_ready(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
return ready;
}
static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
@ -945,7 +955,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
if (result > 0) {
if (runtime->event)
schedule_work(&runtime->event_work);
else if (snd_rawmidi_ready(substream))
else if (__snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep);
}
spin_unlock_irqrestore(&runtime->lock, flags);
@ -1024,7 +1034,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
result = 0;
while (count > 0) {
spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready(substream)) {
while (!__snd_rawmidi_ready(runtime)) {
wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
@ -1041,9 +1051,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
if (!runtime->avail) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EIO;
}
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_read1(substream,
@ -1181,7 +1193,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
runtime->avail += count;
substream->bytes += count;
if (count > 0) {
if (runtime->drain || snd_rawmidi_ready(substream))
if (runtime->drain || __snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep);
}
return count;
@ -1370,9 +1382,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail && !timeout)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
if (!runtime->avail && !timeout) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EIO;
}
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
@ -1452,6 +1466,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime;
unsigned long buffer_size, avail, xruns;
rmidi = entry->private_data;
snd_iprintf(buffer, "%s\n\n", rmidi->name);
@ -1470,13 +1485,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
spin_lock_irq(&runtime->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer,
" Mode : %s\n"
" Buffer size : %lu\n"
" Avail : %lu\n",
runtime->oss ? "OSS compatible" : "native",
(unsigned long) runtime->buffer_size,
(unsigned long) runtime->avail);
buffer_size, avail);
}
}
}
@ -1494,13 +1512,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
spin_lock_irq(&runtime->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
xruns = runtime->xruns;
spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer,
" Buffer size : %lu\n"
" Avail : %lu\n"
" Overruns : %lu\n",
(unsigned long) runtime->buffer_size,
(unsigned long) runtime->avail,
(unsigned long) runtime->xruns);
buffer_size, avail, xruns);
}
}
}

View File

@ -26,10 +26,10 @@ struct snd_seq_queue {
struct snd_seq_timer *timer; /* time keeper for this queue */
int owner; /* client that 'owns' the timer */
unsigned int locked:1, /* timer is only accesibble by owner if set */
klocked:1, /* kernel lock (after START) */
check_again:1,
check_blocked:1;
bool locked; /* timer is only accesibble by owner if set */
bool klocked; /* kernel lock (after START) */
bool check_again; /* concurrent access happened during check */
bool check_blocked; /* queue being checked */
unsigned int flags; /* status flags */
unsigned int info_flags; /* info for sync */

View File

@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_CONST_H */