| From 0476a3a1b6a6e2bef2a70de0807cf9e05193a0bd Mon Sep 17 00:00:00 2001 |
| From: Sasha Levin <sashal@kernel.org> |
| Date: Wed, 4 Jun 2025 15:39:38 -0400 |
| Subject: RDMA: hfi1: fix possible divide-by-zero in find_hw_thread_mask() |
| |
| From: Yury Norov [NVIDIA] <yury.norov@gmail.com> |
| |
| [ Upstream commit 59f7d2138591ef8f0e4e4ab5f1ab674e8181ad3a ] |
| |
| The function divides number of online CPUs by num_core_siblings, and |
| later checks the divider by zero. This implies a possibility to get |
| and divide-by-zero runtime error. Fix it by moving the check prior to |
| division. This also helps to save one indentation level. |
| |
| Signed-off-by: Yury Norov [NVIDIA] <yury.norov@gmail.com> |
| Link: https://patch.msgid.link/20250604193947.11834-3-yury.norov@gmail.com |
| Signed-off-by: Leon Romanovsky <leon@kernel.org> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| drivers/infiniband/hw/hfi1/affinity.c | 44 +++++++++++++++------------ |
| 1 file changed, 24 insertions(+), 20 deletions(-) |
| |
| diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c |
| index 77ee77d4000f..7382b85c72a6 100644 |
| --- a/drivers/infiniband/hw/hfi1/affinity.c |
| +++ b/drivers/infiniband/hw/hfi1/affinity.c |
| @@ -966,31 +966,35 @@ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, |
| struct hfi1_affinity_node_list *affinity) |
| { |
| int possible, curr_cpu, i; |
| - uint num_cores_per_socket = node_affinity.num_online_cpus / |
| + uint num_cores_per_socket; |
| + |
| + cpumask_copy(hw_thread_mask, &affinity->proc.mask); |
| + |
| + if (affinity->num_core_siblings == 0) |
| + return; |
| + |
| + num_cores_per_socket = node_affinity.num_online_cpus / |
| affinity->num_core_siblings / |
| node_affinity.num_online_nodes; |
| |
| - cpumask_copy(hw_thread_mask, &affinity->proc.mask); |
| - if (affinity->num_core_siblings > 0) { |
| - /* Removing other siblings not needed for now */ |
| - possible = cpumask_weight(hw_thread_mask); |
| - curr_cpu = cpumask_first(hw_thread_mask); |
| - for (i = 0; |
| - i < num_cores_per_socket * node_affinity.num_online_nodes; |
| - i++) |
| - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); |
| - |
| - for (; i < possible; i++) { |
| - cpumask_clear_cpu(curr_cpu, hw_thread_mask); |
| - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); |
| - } |
| + /* Removing other siblings not needed for now */ |
| + possible = cpumask_weight(hw_thread_mask); |
| + curr_cpu = cpumask_first(hw_thread_mask); |
| + for (i = 0; |
| + i < num_cores_per_socket * node_affinity.num_online_nodes; |
| + i++) |
| + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); |
| |
| - /* Identifying correct HW threads within physical cores */ |
| - cpumask_shift_left(hw_thread_mask, hw_thread_mask, |
| - num_cores_per_socket * |
| - node_affinity.num_online_nodes * |
| - hw_thread_no); |
| + for (; i < possible; i++) { |
| + cpumask_clear_cpu(curr_cpu, hw_thread_mask); |
| + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); |
| } |
| + |
| + /* Identifying correct HW threads within physical cores */ |
| + cpumask_shift_left(hw_thread_mask, hw_thread_mask, |
| + num_cores_per_socket * |
| + node_affinity.num_online_nodes * |
| + hw_thread_no); |
| } |
| |
| int hfi1_get_proc_affinity(int node) |
| -- |
| 2.39.5 |
| |