Skip to content

Commit 5f50d79

Browse files
jasowangSasha Levin
authored andcommitted
virtio-net: fix the race between refill work and close
[ Upstream commit 5a15912 ] We try using cancel_delayed_work_sync() to prevent the work from enabling NAPI. This is insufficient since we don't disable the source of the refill work scheduling. This means an NAPI poll callback after cancel_delayed_work_sync() can schedule the refill work then can re-enable the NAPI that leads to use-after-free [1]. Since the work can enable NAPI, we can't simply disable NAPI before calling cancel_delayed_work_sync(). So fix this by introducing a dedicated boolean to control whether or not the work could be scheduled from NAPI. [1] ================================================================== BUG: KASAN: use-after-free in refill_work+0x43/0xd4 Read of size 2 at addr ffff88810562c92e by task kworker/2:1/42 CPU: 2 PID: 42 Comm: kworker/2:1 Not tainted 5.19.0-rc1+ torvalds#480 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 Workqueue: events refill_work Call Trace: <TASK> dump_stack_lvl+0x34/0x44 print_report.cold+0xbb/0x6ac ? _printk+0xad/0xde ? refill_work+0x43/0xd4 kasan_report+0xa8/0x130 ? refill_work+0x43/0xd4 refill_work+0x43/0xd4 process_one_work+0x43d/0x780 worker_thread+0x2a0/0x6f0 ? process_one_work+0x780/0x780 kthread+0x167/0x1a0 ? kthread_exit+0x50/0x50 ret_from_fork+0x22/0x30 </TASK> ... Fixes: b2baed6 ("virtio_net: set/cancel work on ndo_open/ndo_stop") Signed-off-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Xuan Zhuo <[email protected]> Signed-off-by: David S. Miller <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent c7b9244 commit 5f50d79

File tree

1 file changed

+34
-3
lines changed

1 file changed

+34
-3
lines changed

drivers/net/virtio_net.c

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,15 @@ struct virtnet_info {
213213
/* Packet virtio header size */
214214
u8 hdr_len;
215215

216-
/* Work struct for refilling if we run low on memory. */
216+
/* Work struct for delayed refilling if we run low on memory. */
217217
struct delayed_work refill;
218218

219+
/* Is delayed refill enabled? */
220+
bool refill_enabled;
221+
222+
/* The lock to synchronize the access to refill_enabled */
223+
spinlock_t refill_lock;
224+
219225
/* Work struct for config space updates */
220226
struct work_struct config_work;
221227

@@ -319,6 +325,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
319325
return p;
320326
}
321327

328+
static void enable_delayed_refill(struct virtnet_info *vi)
329+
{
330+
spin_lock_bh(&vi->refill_lock);
331+
vi->refill_enabled = true;
332+
spin_unlock_bh(&vi->refill_lock);
333+
}
334+
335+
static void disable_delayed_refill(struct virtnet_info *vi)
336+
{
337+
spin_lock_bh(&vi->refill_lock);
338+
vi->refill_enabled = false;
339+
spin_unlock_bh(&vi->refill_lock);
340+
}
341+
322342
static void virtqueue_napi_schedule(struct napi_struct *napi,
323343
struct virtqueue *vq)
324344
{
@@ -1388,8 +1408,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
13881408
}
13891409

13901410
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1391-
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1392-
schedule_delayed_work(&vi->refill, 0);
1411+
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
1412+
spin_lock(&vi->refill_lock);
1413+
if (vi->refill_enabled)
1414+
schedule_delayed_work(&vi->refill, 0);
1415+
spin_unlock(&vi->refill_lock);
1416+
}
13931417
}
13941418

13951419
u64_stats_update_begin(&rq->stats.syncp);
@@ -1508,6 +1532,8 @@ static int virtnet_open(struct net_device *dev)
15081532
struct virtnet_info *vi = netdev_priv(dev);
15091533
int i, err;
15101534

1535+
enable_delayed_refill(vi);
1536+
15111537
for (i = 0; i < vi->max_queue_pairs; i++) {
15121538
if (i < vi->curr_queue_pairs)
15131539
/* Make sure we have some buffers: if oom use wq. */
@@ -1878,6 +1904,8 @@ static int virtnet_close(struct net_device *dev)
18781904
struct virtnet_info *vi = netdev_priv(dev);
18791905
int i;
18801906

1907+
/* Make sure NAPI doesn't schedule refill work */
1908+
disable_delayed_refill(vi);
18811909
/* Make sure refill_work doesn't re-enable napi! */
18821910
cancel_delayed_work_sync(&vi->refill);
18831911

@@ -2417,6 +2445,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
24172445

24182446
virtio_device_ready(vdev);
24192447

2448+
enable_delayed_refill(vi);
2449+
24202450
if (netif_running(vi->dev)) {
24212451
err = virtnet_open(vi->dev);
24222452
if (err)
@@ -3140,6 +3170,7 @@ static int virtnet_probe(struct virtio_device *vdev)
31403170
vdev->priv = vi;
31413171

31423172
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
3173+
spin_lock_init(&vi->refill_lock);
31433174

31443175
/* If we can receive ANY GSO packets, we must allocate large ones. */
31453176
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||

0 commit comments

Comments
 (0)