@@ -2847,6 +2847,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
28472847 if (nr_pages < 2 )
28482848 nr_pages = 2 ;
28492849
2850+ /*
2851+ * Keep CPUs from coming online while resizing to synchronize
2852+ * with new per CPU buffers being created.
2853+ */
2854+ guard (cpus_read_lock )();
2855+
28502856 /* prevent another thread from changing buffer sizes */
28512857 mutex_lock (& buffer -> mutex );
28522858 atomic_inc (& buffer -> resizing );
@@ -2891,7 +2897,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
28912897 cond_resched ();
28922898 }
28932899
2894- cpus_read_lock ();
28952900 /*
28962901 * Fire off all the required work handlers
28972902 * We can't schedule on offline CPUs, but it's not necessary
@@ -2931,7 +2936,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
29312936 cpu_buffer -> nr_pages_to_update = 0 ;
29322937 }
29332938
2934- cpus_read_unlock ();
29352939 } else {
29362940 cpu_buffer = buffer -> buffers [cpu_id ];
29372941
@@ -2959,8 +2963,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
29592963 goto out_err ;
29602964 }
29612965
2962- cpus_read_lock ();
2963-
29642966 /* Can't run something on an offline CPU. */
29652967 if (!cpu_online (cpu_id ))
29662968 rb_update_pages (cpu_buffer );
@@ -2979,7 +2981,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
29792981 }
29802982
29812983 cpu_buffer -> nr_pages_to_update = 0 ;
2982- cpus_read_unlock ();
29832984 }
29842985
29852986 out :
0 commit comments