1
0
Fork 0

netfilter: ipset: Fix "INFO: rcu detected stall in hash_xxx" reports

commit f66ee0410b upstream.

In the case of huge hash:* types of sets, due to the single spinlock of
a set the processing of the whole set under spinlock protection could take
too long.

There were four places where the whole hash table of the set was processed
from bucket to bucket under holding the spinlock:

- During resizing a set, the original set was locked to exclude kernel side
  add/del element operations (userspace add/del is excluded by the
  nfnetlink mutex). The original set is actually just read during the
  resize, so the spinlocking is replaced with rcu locking of regions.
  However, thus there can be parallel kernel side add/del of entries.
  In order not to loose those operations a backlog is added and replayed
  after the successful resize.
- Garbage collection of timed out entries was also protected by the spinlock.
  In order not to lock too long, region locking is introduced and a single
  region is processed in one gc go. Also, the simple timer based gc running
  is replaced with a workqueue based solution. The internal book-keeping
  (number of elements, size of extensions) is moved to region level due to
  the region locking.
- Adding elements: when the max number of the elements is reached, the gc
  was called to evict the timed out entries. The new approach is that the gc
  is called just for the matching region, assuming that if the region
  (proportionally) seems to be full, then the whole set does. We could scan
  the other regions to check every entry under rcu locking, but for huge
  sets it'd mean a slowdown at adding elements.
- Listing the set header data: when the set was defined with timeout
  support, the garbage collector was called to clean up timed out entries
  to get the correct element numbers and set size values. Now the set is
  scanned to check non-timed out entries, without actually calling the gc
  for the whole set.

Thanks to Florian Westphal for helping me to solve the SOFTIRQ-safe ->
SOFTIRQ-unsafe lock order issues during working on the patch.

Reported-by: syzbot+4b0e9d4ff3cf117837e5@syzkaller.appspotmail.com
Reported-by: syzbot+c27b8d5010f45c666ed1@syzkaller.appspotmail.com
Reported-by: syzbot+68a806795ac89df3aa1c@syzkaller.appspotmail.com
Fixes: 23c42a403a ("netfilter: ipset: Introduction of new commands and protocol version 7")
Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5.4-rM2-2.2.x-imx-squashed
Jozsef Kadlecsik 2020-02-11 23:20:43 +01:00 committed by Greg Kroah-Hartman
parent 7eaf718b83
commit 5dd9488ae4
3 changed files with 470 additions and 204 deletions

View File

@ -121,6 +121,7 @@ struct ip_set_ext {
u32 timeout;
u8 packets_op;
u8 bytes_op;
bool target;
};
struct ip_set;
@ -187,6 +188,14 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
/* Region-locking is used */
bool region_lock;
};
struct ip_set_region {
spinlock_t lock; /* Region lock */
size_t ext_size; /* Size of the dynamic extensions */
u32 elements; /* Number of elements vs timeout */
};
/* The core set type structure */
@ -681,7 +690,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
}
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
{ .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
#define IP_SET_INIT_UEXT(set) \

View File

@ -557,6 +557,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
return set;
}
static inline void
ip_set_lock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_lock_bh(&set->lock);
}
static inline void
ip_set_unlock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_unlock_bh(&set->lock);
}
int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
@ -578,9 +592,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
if (ret == -EAGAIN) {
/* Type requests element to be completed */
pr_debug("element must be completed, ADD is triggered\n");
spin_lock_bh(&set->lock);
ip_set_lock(set);
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
ret = 1;
} else {
/* --return-nomatch: invert matched element */
@ -609,9 +623,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
return ret;
}
@ -631,9 +645,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
return ret;
}
@ -1098,9 +1112,9 @@ ip_set_flush_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
spin_lock_bh(&set->lock);
ip_set_lock(set);
set->variant->flush(set);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
}
static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
@ -1523,9 +1537,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
retried = true;
} while (ret == -EAGAIN &&
set->variant->resize &&

File diff suppressed because it is too large Load Diff