@@ -1181,14 +1181,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
11811181 }
11821182}
11831183
1184- /*
1185- * Returns true if we need to defer file table putting. This can only happen
1186- * from the error path with REQ_F_COMP_LOCKED set.
1187- */
1188- static bool io_req_clean_work (struct io_kiocb * req )
1184+ static void io_req_clean_work (struct io_kiocb * req )
11891185{
11901186 if (!(req -> flags & REQ_F_WORK_INITIALIZED ))
1191- return false ;
1187+ return ;
11921188
11931189 req -> flags &= ~REQ_F_WORK_INITIALIZED ;
11941190
@@ -1207,9 +1203,6 @@ static bool io_req_clean_work(struct io_kiocb *req)
12071203 if (req -> work .fs ) {
12081204 struct fs_struct * fs = req -> work .fs ;
12091205
1210- if (req -> flags & REQ_F_COMP_LOCKED )
1211- return true;
1212-
12131206 spin_lock (& req -> work .fs -> lock );
12141207 if (-- fs -> users )
12151208 fs = NULL ;
@@ -1218,8 +1211,6 @@ static bool io_req_clean_work(struct io_kiocb *req)
12181211 free_fs_struct (fs );
12191212 req -> work .fs = NULL ;
12201213 }
1221-
1222- return false;
12231214}
12241215
12251216static void io_prep_async_work (struct io_kiocb * req )
@@ -1699,7 +1690,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
16991690 fput (file );
17001691}
17011692
1702- static bool io_dismantle_req (struct io_kiocb * req )
1693+ static void io_dismantle_req (struct io_kiocb * req )
17031694{
17041695 io_clean_op (req );
17051696
@@ -1708,7 +1699,7 @@ static bool io_dismantle_req(struct io_kiocb *req)
17081699 if (req -> file )
17091700 io_put_file (req , req -> file , (req -> flags & REQ_F_FIXED_FILE ));
17101701
1711- return io_req_clean_work (req );
1702+ io_req_clean_work (req );
17121703}
17131704
17141705static void __io_free_req_finish (struct io_kiocb * req )
@@ -1731,21 +1722,15 @@ static void __io_free_req_finish(struct io_kiocb *req)
17311722static void io_req_task_file_table_put (struct callback_head * cb )
17321723{
17331724 struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
1734- struct fs_struct * fs = req -> work .fs ;
1735-
1736- spin_lock (& req -> work .fs -> lock );
1737- if (-- fs -> users )
1738- fs = NULL ;
1739- spin_unlock (& req -> work .fs -> lock );
1740- if (fs )
1741- free_fs_struct (fs );
1742- req -> work .fs = NULL ;
1725+
1726+ io_dismantle_req (req );
17431727 __io_free_req_finish (req );
17441728}
17451729
17461730static void __io_free_req (struct io_kiocb * req )
17471731{
1748- if (!io_dismantle_req (req )) {
1732+ if (!(req -> flags & REQ_F_COMP_LOCKED )) {
1733+ io_dismantle_req (req );
17491734 __io_free_req_finish (req );
17501735 } else {
17511736 int ret ;
@@ -2057,7 +2042,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
20572042 }
20582043 rb -> task_refs ++ ;
20592044
2060- WARN_ON_ONCE ( io_dismantle_req (req ) );
2045+ io_dismantle_req (req );
20612046 rb -> reqs [rb -> to_free ++ ] = req ;
20622047 if (unlikely (rb -> to_free == ARRAY_SIZE (rb -> reqs )))
20632048 __io_req_free_batch_flush (req -> ctx , rb );
0 commit comments