| From 8aaf8528ead045578cf1dfd17946dc6e83b06ac6 Mon Sep 17 00:00:00 2001 |
| From: Eric Dumazet <edumazet@google.com> |
| Date: Thu, 10 Oct 2019 20:17:45 -0700 |
| Subject: [PATCH] tcp: annotate sk->sk_sndbuf lockless reads |
| |
| commit e292f05e0df73f9fcc93329663936e1ded97a988 upstream. |
| |
| For the sake of tcp_poll(), there are few places where we fetch |
| sk->sk_sndbuf while this field can change from IRQ or other cpu. |
| |
| We need to add READ_ONCE() annotations, and also make sure write |
| sides use corresponding WRITE_ONCE() to avoid store-tearing. |
| |
| Note that other transports probably need similar fixes. |
| |
| Signed-off-by: Eric Dumazet <edumazet@google.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/include/net/sock.h b/include/net/sock.h |
| index 83b34f326849..a961062fc31e 100644 |
| --- a/include/net/sock.h |
| +++ b/include/net/sock.h |
| @@ -888,7 +888,7 @@ static inline int sk_stream_min_wspace(const struct sock *sk) |
| |
| static inline int sk_stream_wspace(const struct sock *sk) |
| { |
| - return sk->sk_sndbuf - sk->sk_wmem_queued; |
| + return READ_ONCE(sk->sk_sndbuf) - sk->sk_wmem_queued; |
| } |
| |
| void sk_stream_write_space(struct sock *sk); |
| @@ -1212,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) |
| |
| static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) |
| { |
| - if (sk->sk_wmem_queued >= sk->sk_sndbuf) |
| + if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf)) |
| return false; |
| |
| return sk->sk_prot->stream_memory_free ? |
| @@ -2225,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band) |
| |
| static inline void sk_stream_moderate_sndbuf(struct sock *sk) |
| { |
| - if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { |
| - sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); |
| - sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); |
| - } |
| + u32 val; |
| + |
| + if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) |
| + return; |
| + |
| + val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); |
| + |
| + WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); |
| } |
| |
| struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, |
| @@ -2261,7 +2265,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); |
| */ |
| static inline bool sock_writeable(const struct sock *sk) |
| { |
| - return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); |
| + return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); |
| } |
| |
| static inline gfp_t gfp_any(void) |
| diff --git a/net/core/filter.c b/net/core/filter.c |
| index 1f3f603c7ec3..c7d24235b6a2 100644 |
| --- a/net/core/filter.c |
| +++ b/net/core/filter.c |
| @@ -4252,7 +4252,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, |
| case SO_SNDBUF: |
| val = min_t(u32, val, sysctl_wmem_max); |
| sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
| - sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
| + WRITE_ONCE(sk->sk_sndbuf, |
| + max_t(int, val * 2, SOCK_MIN_SNDBUF)); |
| break; |
| case SO_MAX_PACING_RATE: /* 32bit version */ |
| if (val != ~0U) |
| diff --git a/net/core/sock.c b/net/core/sock.c |
| index 28e4062343c9..3d4f768e4495 100644 |
| --- a/net/core/sock.c |
| +++ b/net/core/sock.c |
| @@ -785,7 +785,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, |
| */ |
| val = min_t(int, val, INT_MAX / 2); |
| sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
| - sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
| + WRITE_ONCE(sk->sk_sndbuf, |
| + max_t(int, val * 2, SOCK_MIN_SNDBUF)); |
| /* Wake up sending tasks if we upped the value. */ |
| sk->sk_write_space(sk); |
| break; |
| @@ -2085,8 +2086,10 @@ EXPORT_SYMBOL(sock_i_ino); |
| struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, |
| gfp_t priority) |
| { |
| - if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
| + if (force || |
| + refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { |
| struct sk_buff *skb = alloc_skb(size, priority); |
| + |
| if (skb) { |
| skb_set_owner_w(skb, sk); |
| return skb; |
| @@ -2187,7 +2190,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) |
| break; |
| set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
| - if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
| + if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) |
| break; |
| if (sk->sk_shutdown & SEND_SHUTDOWN) |
| break; |
| @@ -2222,7 +2225,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, |
| if (sk->sk_shutdown & SEND_SHUTDOWN) |
| goto failure; |
| |
| - if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) |
| + if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) |
| break; |
| |
| sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
| @@ -2803,7 +2806,7 @@ static void sock_def_write_space(struct sock *sk) |
| /* Do not wake up a writer until he can make "significant" |
| * progress. --DaveM |
| */ |
| - if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
| + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) { |
| wq = rcu_dereference(sk->sk_wq); |
| if (skwq_has_sleeper(wq)) |
| wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | |
| @@ -3203,7 +3206,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem) |
| mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); |
| mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); |
| mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); |
| - mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; |
| + mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); |
| mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; |
| mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; |
| mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); |
| diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
| index c6d122ae2866..7051fb3d2ada 100644 |
| --- a/net/ipv4/tcp.c |
| +++ b/net/ipv4/tcp.c |
| @@ -450,7 +450,7 @@ void tcp_init_sock(struct sock *sk) |
| |
| icsk->icsk_sync_mss = tcp_sync_mss; |
| |
| - sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; |
| + WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); |
| WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); |
| |
| sk_sockets_allocated_inc(sk); |
| diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
| index d66186de8f3d..c709f691b9b5 100644 |
| --- a/net/ipv4/tcp_input.c |
| +++ b/net/ipv4/tcp_input.c |
| @@ -359,7 +359,8 @@ static void tcp_sndbuf_expand(struct sock *sk) |
| sndmem *= nr_segs * per_mss; |
| |
| if (sk->sk_sndbuf < sndmem) |
| - sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]); |
| + WRITE_ONCE(sk->sk_sndbuf, |
| + min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2])); |
| } |
| |
| /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) |
| -- |
| 2.7.4 |
| |