@@ -218,8 +218,7 @@ struct eventpoll {
218218 struct file * file ;
219219
220220 /* used to optimize loop detection check */
221- struct list_head visited_list_link ;
222- int visited ;
221+ u64 gen ;
223222
224223#ifdef CONFIG_NET_RX_BUSY_POLL
225224 /* used to track busy poll napi_id */
@@ -274,6 +273,8 @@ static long max_user_watches __read_mostly;
274273 */
275274static DEFINE_MUTEX (epmutex );
276275
276+ static u64 loop_check_gen = 0 ;
277+
277278/* Used to check for epoll file descriptor inclusion loops */
278279static struct nested_calls poll_loop_ncalls ;
279280
@@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly;
283284/* Slab cache used to allocate "struct eppoll_entry" */
284285static struct kmem_cache * pwq_cache __read_mostly ;
285286
286- /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
287- static LIST_HEAD (visited_list );
288-
289287/*
290288 * List of files with newly added links, where we may need to limit the number
291289 * of emanating paths. Protected by the epmutex.
@@ -1971,13 +1969,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
19711969 struct epitem * epi ;
19721970
19731971 mutex_lock_nested (& ep -> mtx , call_nests + 1 );
1974- ep -> visited = 1 ;
1975- list_add (& ep -> visited_list_link , & visited_list );
1972+ ep -> gen = loop_check_gen ;
19761973 for (rbp = rb_first_cached (& ep -> rbr ); rbp ; rbp = rb_next (rbp )) {
19771974 epi = rb_entry (rbp , struct epitem , rbn );
19781975 if (unlikely (is_file_epoll (epi -> ffd .file ))) {
19791976 ep_tovisit = epi -> ffd .file -> private_data ;
1980- if (ep_tovisit -> visited )
1977+ if (ep_tovisit -> gen == loop_check_gen )
19811978 continue ;
19821979 error = ep_call_nested (& poll_loop_ncalls ,
19831980 ep_loop_check_proc , epi -> ffd .file ,
@@ -2018,18 +2015,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
20182015 */
20192016static int ep_loop_check (struct eventpoll * ep , struct file * file )
20202017{
2021- int ret ;
2022- struct eventpoll * ep_cur , * ep_next ;
2023-
2024- ret = ep_call_nested (& poll_loop_ncalls ,
2018+ return ep_call_nested (& poll_loop_ncalls ,
20252019 ep_loop_check_proc , file , ep , current );
2026- /* clear visited list */
2027- list_for_each_entry_safe (ep_cur , ep_next , & visited_list ,
2028- visited_list_link ) {
2029- ep_cur -> visited = 0 ;
2030- list_del (& ep_cur -> visited_list_link );
2031- }
2032- return ret ;
20332020}
20342021
20352022static void clear_tfile_check_list (void )
@@ -2199,6 +2186,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
21992186 error = epoll_mutex_lock (& epmutex , 0 , nonblock );
22002187 if (error )
22012188 goto error_tgt_fput ;
2189+ loop_check_gen ++ ;
22022190 full_check = 1 ;
22032191 if (is_file_epoll (tf .file )) {
22042192 error = - ELOOP ;
@@ -2262,6 +2250,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
22622250error_tgt_fput :
22632251 if (full_check ) {
22642252 clear_tfile_check_list ();
2253+ loop_check_gen ++ ;
22652254 mutex_unlock (& epmutex );
22662255 }
22672256
0 commit comments