| From a4cedf217f200329fa8983535019c3b407de668a Mon Sep 17 00:00:00 2001 |
| From: Haiyang Zhang <haiyangz@microsoft.com> |
| Date: Thu, 28 Mar 2019 19:40:36 +0000 |
| Subject: hv_netvsc: Fix unwanted wakeup after tx_disable |
| |
| [ Upstream commit 1b704c4a1ba95574832e730f23817b651db2aa59 ] |
| |
| After queue stopped, the wakeup mechanism may wake it up again |
| when ring buffer usage is lower than a threshold. This may cause |
| send path panic on NULL pointer when we stopped all tx queues in |
| netvsc_detach and start removing the netvsc device. |
| |
| This patch fix it by adding a tx_disable flag to prevent unwanted |
| queue wakeup. |
| |
| Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic") |
| Reported-by: Mohammed Gamal <mgamal@redhat.com> |
| Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| drivers/net/hyperv/hyperv_net.h | 1 + |
| drivers/net/hyperv/netvsc.c | 6 ++++-- |
| drivers/net/hyperv/netvsc_drv.c | 32 ++++++++++++++++++++++++++------ |
| 3 files changed, 31 insertions(+), 8 deletions(-) |
| |
| diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h |
| index e859ae2e42d5..49f41b64077b 100644 |
| --- a/drivers/net/hyperv/hyperv_net.h |
| +++ b/drivers/net/hyperv/hyperv_net.h |
| @@ -987,6 +987,7 @@ struct netvsc_device { |
| |
| wait_queue_head_t wait_drain; |
| bool destroy; |
| + bool tx_disable; /* if true, do not wake up queue again */ |
| |
| /* Receive buffer allocated by us but manages by NetVSP */ |
| void *recv_buf; |
| diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c |
| index 813d195bbd57..e0dce373cdd9 100644 |
| --- a/drivers/net/hyperv/netvsc.c |
| +++ b/drivers/net/hyperv/netvsc.c |
| @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) |
| |
| init_waitqueue_head(&net_device->wait_drain); |
| net_device->destroy = false; |
| + net_device->tx_disable = false; |
| |
| net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
| net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
| @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, |
| } else { |
| struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); |
| |
| - if (netif_tx_queue_stopped(txq) && |
| + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
| (hv_get_avail_to_write_percent(&channel->outbound) > |
| RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { |
| netif_tx_wake_queue(txq); |
| @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( |
| } else if (ret == -EAGAIN) { |
| netif_tx_stop_queue(txq); |
| ndev_ctx->eth_stats.stop_queue++; |
| - if (atomic_read(&nvchan->queue_sends) < 1) { |
| + if (atomic_read(&nvchan->queue_sends) < 1 && |
| + !net_device->tx_disable) { |
| netif_tx_wake_queue(txq); |
| ndev_ctx->eth_stats.wake_queue++; |
| ret = -ENOSPC; |
| diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
| index cf4897043e83..b20fb0fb595b 100644 |
| --- a/drivers/net/hyperv/netvsc_drv.c |
| +++ b/drivers/net/hyperv/netvsc_drv.c |
| @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) |
| rcu_read_unlock(); |
| } |
| |
| +static void netvsc_tx_enable(struct netvsc_device *nvscdev, |
| + struct net_device *ndev) |
| +{ |
| + nvscdev->tx_disable = false; |
| + virt_wmb(); /* ensure queue wake up mechanism is on */ |
| + |
| + netif_tx_wake_all_queues(ndev); |
| +} |
| + |
| static int netvsc_open(struct net_device *net) |
| { |
| struct net_device_context *ndev_ctx = netdev_priv(net); |
| @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) |
| rdev = nvdev->extension; |
| if (!rdev->link_state) { |
| netif_carrier_on(net); |
| - netif_tx_wake_all_queues(net); |
| + netvsc_tx_enable(nvdev, net); |
| } |
| |
| if (vf_netdev) { |
| @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) |
| } |
| } |
| |
| +static void netvsc_tx_disable(struct netvsc_device *nvscdev, |
| + struct net_device *ndev) |
| +{ |
| + if (nvscdev) { |
| + nvscdev->tx_disable = true; |
| + virt_wmb(); /* ensure txq will not wake up after stop */ |
| + } |
| + |
| + netif_tx_disable(ndev); |
| +} |
| + |
| static int netvsc_close(struct net_device *net) |
| { |
| struct net_device_context *net_device_ctx = netdev_priv(net); |
| @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) |
| struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
| int ret; |
| |
| - netif_tx_disable(net); |
| + netvsc_tx_disable(nvdev, net); |
| |
| /* No need to close rndis filter if it is removed already */ |
| if (!nvdev) |
| @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, |
| |
| /* If device was up (receiving) then shutdown */ |
| if (netif_running(ndev)) { |
| - netif_tx_disable(ndev); |
| + netvsc_tx_disable(nvdev, ndev); |
| |
| ret = rndis_filter_close(nvdev); |
| if (ret) { |
| @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) |
| if (rdev->link_state) { |
| rdev->link_state = false; |
| netif_carrier_on(net); |
| - netif_tx_wake_all_queues(net); |
| + netvsc_tx_enable(net_device, net); |
| } else { |
| notify = true; |
| } |
| @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) |
| if (!rdev->link_state) { |
| rdev->link_state = true; |
| netif_carrier_off(net); |
| - netif_tx_stop_all_queues(net); |
| + netvsc_tx_disable(net_device, net); |
| } |
| kfree(event); |
| break; |
| @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) |
| if (!rdev->link_state) { |
| rdev->link_state = true; |
| netif_carrier_off(net); |
| - netif_tx_stop_all_queues(net); |
| + netvsc_tx_disable(net_device, net); |
| event->event = RNDIS_STATUS_MEDIA_CONNECT; |
| spin_lock_irqsave(&ndev_ctx->lock, flags); |
| list_add(&event->list, &ndev_ctx->reconfig_events); |
| -- |
| 2.19.1 |
| |