bpf, lpm: Make locking RT friendly

The LPM trie map cannot be used in contexts like perf, kprobes and tracing
as this map type dynamically allocates memory.

The memory allocation happens with a raw spinlock held which is a truly
spinning lock on a PREEMPT RT enabled kernel which disables preemption and
interrupts.

As RT does not allow memory allocation from such a section for various
reasons, convert the raw spinlock to a regular spinlock.

On a RT enabled kernel these locks are substituted by 'sleeping' spinlocks
which provide the proper protection but keep the code preemptible.

On a non-RT kernel regular spinlocks map to raw spinlocks, i.e. this does
not cause any functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.602129531@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-02-24 15:01:52 +01:00 committed by Alexei Starovoitov
parent 7f805d17f1
commit 66150d0dde

View file

@ -34,7 +34,7 @@ struct lpm_trie {
size_t n_entries;
size_t max_prefixlen;
size_t data_size;
raw_spinlock_t lock;
spinlock_t lock;
};
/* This trie implements a longest prefix match algorithm that can be used to
@ -315,7 +315,7 @@ static int trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
raw_spin_lock_irqsave(&trie->lock, irq_flags);
spin_lock_irqsave(&trie->lock, irq_flags);
/* Allocate and fill a new node */
@ -422,7 +422,7 @@ out:
kfree(im_node);
}
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@ -442,7 +442,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
raw_spin_lock_irqsave(&trie->lock, irq_flags);
spin_lock_irqsave(&trie->lock, irq_flags);
/* Walk the tree looking for an exact key/length match and keeping
* track of the path we traverse. We will need to know the node
@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
kfree_rcu(node, rcu);
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@ -575,7 +575,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
if (ret)
goto out_err;
raw_spin_lock_init(&trie->lock);
spin_lock_init(&trie->lock);
return &trie->map;
out_err: