@@ -18,14 +18,10 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
1818 info -> ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED ;
1919 cachefiles_ondemand_set_object_close (object );
2020
21- /*
22- * Flush all pending READ requests since their completion depends on
23- * anon_fd.
24- */
25- xas_for_each (& xas , req , ULONG_MAX ) {
21+ /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
22+ xas_for_each_marked (& xas , req , ULONG_MAX , CACHEFILES_REQ_NEW ) {
2623 if (req -> msg .object_id == object_id &&
27- req -> msg .opcode == CACHEFILES_OP_READ ) {
28- req -> error = - EIO ;
24+ req -> msg .opcode == CACHEFILES_OP_CLOSE ) {
2925 complete (& req -> done );
3026 xas_store (& xas , NULL );
3127 }
@@ -179,6 +175,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
179175 trace_cachefiles_ondemand_copen (req -> object , id , size );
180176
181177 cachefiles_ondemand_set_object_open (req -> object );
178+ wake_up_all (& cache -> daemon_pollwq );
182179
183180out :
184181 complete (& req -> done );
@@ -222,7 +219,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
222219
223220 load = (void * )req -> msg .data ;
224221 load -> fd = fd ;
225- req -> msg .object_id = object_id ;
226222 object -> ondemand -> ondemand_id = object_id ;
227223
228224 cachefiles_get_unbind_pincount (cache );
@@ -238,6 +234,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
238234 return ret ;
239235}
240236
237+ static void ondemand_object_worker (struct work_struct * work )
238+ {
239+ struct cachefiles_ondemand_info * info =
240+ container_of (work , struct cachefiles_ondemand_info , ondemand_work );
241+
242+ cachefiles_ondemand_init_object (info -> object );
243+ }
244+
245+ /*
246+ * If there are any inflight or subsequent READ requests on the
247+ * closed object, reopen it.
248+ * Skip read requests whose related object is reopening.
249+ */
250+ static struct cachefiles_req * cachefiles_ondemand_select_req (struct xa_state * xas ,
251+ unsigned long xa_max )
252+ {
253+ struct cachefiles_req * req ;
254+ struct cachefiles_object * object ;
255+ struct cachefiles_ondemand_info * info ;
256+
257+ xas_for_each_marked (xas , req , xa_max , CACHEFILES_REQ_NEW ) {
258+ if (req -> msg .opcode != CACHEFILES_OP_READ )
259+ return req ;
260+ object = req -> object ;
261+ info = object -> ondemand ;
262+ if (cachefiles_ondemand_object_is_close (object )) {
263+ cachefiles_ondemand_set_object_reopening (object );
264+ queue_work (fscache_wq , & info -> ondemand_work );
265+ continue ;
266+ }
267+ if (cachefiles_ondemand_object_is_reopening (object ))
268+ continue ;
269+ return req ;
270+ }
271+ return NULL ;
272+ }
273+
241274ssize_t cachefiles_ondemand_daemon_read (struct cachefiles_cache * cache ,
242275 char __user * _buffer , size_t buflen )
243276{
@@ -248,16 +281,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
248281 int ret = 0 ;
249282 XA_STATE (xas , & cache -> reqs , cache -> req_id_next );
250283
284+ xa_lock (& cache -> reqs );
251285 /*
252286 * Cyclically search for a request that has not ever been processed,
253287 * to prevent requests from being processed repeatedly, and make
254288 * request distribution fair.
255289 */
256- xa_lock (& cache -> reqs );
257- req = xas_find_marked (& xas , UINT_MAX , CACHEFILES_REQ_NEW );
290+ req = cachefiles_ondemand_select_req (& xas , ULONG_MAX );
258291 if (!req && cache -> req_id_next > 0 ) {
259292 xas_set (& xas , 0 );
260- req = xas_find_marked (& xas , cache -> req_id_next - 1 , CACHEFILES_REQ_NEW );
293+ req = cachefiles_ondemand_select_req (& xas , cache -> req_id_next - 1 );
261294 }
262295 if (!req ) {
263296 xa_unlock (& cache -> reqs );
@@ -277,14 +310,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
277310 xa_unlock (& cache -> reqs );
278311
279312 id = xas .xa_index ;
280- msg -> msg_id = id ;
281313
282314 if (msg -> opcode == CACHEFILES_OP_OPEN ) {
283315 ret = cachefiles_ondemand_get_fd (req );
284- if (ret )
316+ if (ret ) {
317+ cachefiles_ondemand_set_object_close (req -> object );
285318 goto error ;
319+ }
286320 }
287321
322+ msg -> msg_id = id ;
323+ msg -> object_id = req -> object -> ondemand -> ondemand_id ;
324+
288325 if (copy_to_user (_buffer , msg , n ) != 0 ) {
289326 ret = - EFAULT ;
290327 goto err_put_fd ;
@@ -317,19 +354,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
317354 void * private )
318355{
319356 struct cachefiles_cache * cache = object -> volume -> cache ;
320- struct cachefiles_req * req ;
357+ struct cachefiles_req * req = NULL ;
321358 XA_STATE (xas , & cache -> reqs , 0 );
322359 int ret ;
323360
324361 if (!test_bit (CACHEFILES_ONDEMAND_MODE , & cache -> flags ))
325362 return 0 ;
326363
327- if (test_bit (CACHEFILES_DEAD , & cache -> flags ))
328- return - EIO ;
364+ if (test_bit (CACHEFILES_DEAD , & cache -> flags )) {
365+ ret = - EIO ;
366+ goto out ;
367+ }
329368
330369 req = kzalloc (sizeof (* req ) + data_len , GFP_KERNEL );
331- if (!req )
332- return - ENOMEM ;
370+ if (!req ) {
371+ ret = - ENOMEM ;
372+ goto out ;
373+ }
333374
334375 req -> object = object ;
335376 init_completion (& req -> done );
@@ -367,7 +408,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
367408 /* coupled with the barrier in cachefiles_flush_reqs() */
368409 smp_mb ();
369410
370- if (opcode != CACHEFILES_OP_OPEN &&
411+ if (opcode == CACHEFILES_OP_CLOSE &&
371412 !cachefiles_ondemand_object_is_open (object )) {
372413 WARN_ON_ONCE (object -> ondemand -> ondemand_id == 0 );
373414 xas_unlock (& xas );
@@ -392,7 +433,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
392433 wake_up_all (& cache -> daemon_pollwq );
393434 wait_for_completion (& req -> done );
394435 ret = req -> error ;
436+ kfree (req );
437+ return ret ;
395438out :
439+ /* Reset the object to close state in error handling path.
440+ * If error occurs after creating the anonymous fd,
441+ * cachefiles_ondemand_fd_release() will set object to close.
442+ */
443+ if (opcode == CACHEFILES_OP_OPEN )
444+ cachefiles_ondemand_set_object_close (object );
396445 kfree (req );
397446 return ret ;
398447}
@@ -439,7 +488,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
439488 if (!cachefiles_ondemand_object_is_open (object ))
440489 return - ENOENT ;
441490
442- req -> msg .object_id = object -> ondemand -> ondemand_id ;
443491 trace_cachefiles_ondemand_close (object , & req -> msg );
444492 return 0 ;
445493}
@@ -455,16 +503,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
455503 struct cachefiles_object * object = req -> object ;
456504 struct cachefiles_read * load = (void * )req -> msg .data ;
457505 struct cachefiles_read_ctx * read_ctx = private ;
458- int object_id = object -> ondemand -> ondemand_id ;
459-
460- /* Stop enqueuing requests when daemon has closed anon_fd. */
461- if (!cachefiles_ondemand_object_is_open (object )) {
462- WARN_ON_ONCE (object_id == 0 );
463- pr_info_once ("READ: anonymous fd closed prematurely.\n" );
464- return - EIO ;
465- }
466506
467- req -> msg .object_id = object_id ;
468507 load -> off = read_ctx -> off ;
469508 load -> len = read_ctx -> len ;
470509 trace_cachefiles_ondemand_read (object , & req -> msg , load );
@@ -513,6 +552,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
513552 return - ENOMEM ;
514553
515554 object -> ondemand -> object = object ;
555+ INIT_WORK (& object -> ondemand -> ondemand_work , ondemand_object_worker );
516556 return 0 ;
517557}
518558
0 commit comments