| From 0409d723b90c4918cda35753a90ff34043b58419 Mon Sep 17 00:00:00 2001 |
| From: Sasha Levin <sashal@kernel.org> |
| Date: Mon, 11 Sep 2023 15:28:14 +0200 |
| Subject: bpf: Avoid deadlock when using queue and stack maps from NMI |
| MIME-Version: 1.0 |
| Content-Type: text/plain; charset=UTF-8 |
| Content-Transfer-Encoding: 8bit |
| |
| From: Toke Høiland-Jørgensen <toke@redhat.com> |
| |
| [ Upstream commit a34a9f1a19afe9c60ca0ea61dfeee63a1c2baac8 ] |
| |
| Sysbot discovered that the queue and stack maps can deadlock if they are |
| being used from a BPF program that can be called from NMI context (such as |
| one that is attached to a perf HW counter event). To fix this, add an |
| in_nmi() check and use raw_spin_trylock() in NMI context, erroring out if |
| grabbing the lock fails. |
| |
| Fixes: f1a2e44a3aec ("bpf: add queue and stack maps") |
| Reported-by: Hsin-Wei Hung <hsinweih@uci.edu> |
| Tested-by: Hsin-Wei Hung <hsinweih@uci.edu> |
| Co-developed-by: Hsin-Wei Hung <hsinweih@uci.edu> |
| Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> |
| Link: https://lore.kernel.org/r/20230911132815.717240-1-toke@redhat.com |
| Signed-off-by: Alexei Starovoitov <ast@kernel.org> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| kernel/bpf/queue_stack_maps.c | 21 ++++++++++++++++++--- |
| 1 file changed, 18 insertions(+), 3 deletions(-) |
| |
| diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c |
| index 8a5e060de63bc..a8fe640318c6c 100644 |
| --- a/kernel/bpf/queue_stack_maps.c |
| +++ b/kernel/bpf/queue_stack_maps.c |
| @@ -102,7 +102,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete) |
| int err = 0; |
| void *ptr; |
| |
| - raw_spin_lock_irqsave(&qs->lock, flags); |
| + if (in_nmi()) { |
| + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) |
| + return -EBUSY; |
| + } else { |
| + raw_spin_lock_irqsave(&qs->lock, flags); |
| + } |
| |
| if (queue_stack_map_is_empty(qs)) { |
| memset(value, 0, qs->map.value_size); |
| @@ -132,7 +137,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete) |
| void *ptr; |
| u32 index; |
| |
| - raw_spin_lock_irqsave(&qs->lock, flags); |
| + if (in_nmi()) { |
| + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) |
| + return -EBUSY; |
| + } else { |
| + raw_spin_lock_irqsave(&qs->lock, flags); |
| + } |
| |
| if (queue_stack_map_is_empty(qs)) { |
| memset(value, 0, qs->map.value_size); |
| @@ -197,7 +207,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value, |
| if (flags & BPF_NOEXIST || flags > BPF_EXIST) |
| return -EINVAL; |
| |
| - raw_spin_lock_irqsave(&qs->lock, irq_flags); |
| + if (in_nmi()) { |
| + if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) |
| + return -EBUSY; |
| + } else { |
| + raw_spin_lock_irqsave(&qs->lock, irq_flags); |
| + } |
| |
| if (queue_stack_map_is_full(qs)) { |
| if (!replace) { |
| -- |
| 2.40.1 |
| |