blob: 3c7fed184eb8b70d6949435dc1646a401628e211 [file] [log] [blame]
From 00dd38c57708215712f438a0846cd7a765dba405 Mon Sep 17 00:00:00 2001
From: David Howells <dhowells@redhat.com>
Date: Wed, 18 Dec 2019 16:38:49 +0000
Subject: [PATCH] rxrpc: Unlock new call in rxrpc_new_incoming_call() rather
than the caller
commit f33121cbe91973a08e68e4bde8c3f7e6e4e351c1 upstream.
Move the unlock and the ping transmission for a new incoming call into
rxrpc_new_incoming_call() rather than doing it in the caller. This makes
it clearer to see what's going on.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
cc: Ingo Molnar <mingo@redhat.com>
cc: Will Deacon <will@kernel.org>
cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 00c095d74145..0a8dab33d3fc 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -240,6 +240,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
}
/*
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
+ */
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ ktime_t now = skb->tstamp;
+
+ if (call->peer->rtt_usage < 3 ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ true, true,
+ rxrpc_propose_ack_ping_for_params);
+}
+
+/*
* Allocate a new incoming call from the prealloc pool, along with a connection
* and a peer as necessary.
*/
@@ -345,9 +361,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [close]");
- call = NULL;
- goto out;
+ goto no_call;
}
/* The peer, connection and call may all have sprung into existence due
@@ -360,9 +374,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
- _leave(" = NULL [busy]");
- call = NULL;
- goto out;
+ goto no_call;
}
trace_rxrpc_receive(call, rxrpc_receive_incoming,
@@ -431,10 +443,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
rxrpc_put_call(call, rxrpc_call_put);
- _leave(" = %p{%d}", call, call->debug_id);
-out:
spin_unlock(&rx->incoming_lock);
+
+ rxrpc_send_ping(call, skb);
+ mutex_unlock(&call->user_mutex);
+
+ _leave(" = %p{%d}", call, call->debug_id);
return call;
+
+no_call:
+ spin_unlock(&rx->incoming_lock);
+ _leave(" = NULL [%u]", skb->mark);
+ return NULL;
}
/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index dd47d465d1d3..49e7f31c9506 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -193,22 +193,6 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
}
/*
- * Ping the other end to fill our RTT cache and to retrieve the rwind
- * and MTU parameters.
- */
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- ktime_t now = skb->tstamp;
-
- if (call->peer->rtt_usage < 3 ||
- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
- true, true,
- rxrpc_propose_ack_ping_for_params);
-}
-
-/*
* Apply a hard ACK by advancing the Tx window.
*/
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
@@ -1369,8 +1353,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
- rxrpc_send_ping(call, skb);
- mutex_unlock(&call->user_mutex);
}
rxrpc_input_call_packet(call, skb);
--
2.7.4