Skip to content

Commit f6e8d01

Browse files
htejunaxboe
authored andcommitted
block: add io_context->active_ref
Currently ioc->nr_tasks is used to decide two things - whether an ioc is done issuing IOs and whether it's shared by multiple tasks. This patch separate out the first into ioc->active_ref, which is acquired and released using {get|put}_io_context_active() respectively. This will be used to associate bio's with a given task. This patch doesn't introduce any visible behavior change. Signed-off-by: Tejun Heo <[email protected]> Cc: Vivek Goyal <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 3d48749 commit f6e8d01

File tree

3 files changed

+47
-15
lines changed

3 files changed

+47
-15
lines changed

block/blk-ioc.c

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -149,20 +149,20 @@ void put_io_context(struct io_context *ioc)
149149
}
150150
EXPORT_SYMBOL(put_io_context);
151151

152-
/* Called by the exiting task */
153-
void exit_io_context(struct task_struct *task)
152+
/**
153+
* put_io_context_active - put active reference on ioc
154+
* @ioc: ioc of interest
155+
*
156+
* Undo get_io_context_active(). If active reference reaches zero after
157+
* put, @ioc can never issue further IOs and ioscheds are notified.
158+
*/
159+
void put_io_context_active(struct io_context *ioc)
154160
{
155-
struct io_context *ioc;
156-
struct io_cq *icq;
157161
struct hlist_node *n;
158162
unsigned long flags;
163+
struct io_cq *icq;
159164

160-
task_lock(task);
161-
ioc = task->io_context;
162-
task->io_context = NULL;
163-
task_unlock(task);
164-
165-
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
165+
if (!atomic_dec_and_test(&ioc->active_ref)) {
166166
put_io_context(ioc);
167167
return;
168168
}
@@ -191,6 +191,20 @@ void exit_io_context(struct task_struct *task)
191191
put_io_context(ioc);
192192
}
193193

194+
/* Called by the exiting task */
195+
void exit_io_context(struct task_struct *task)
196+
{
197+
struct io_context *ioc;
198+
199+
task_lock(task);
200+
ioc = task->io_context;
201+
task->io_context = NULL;
202+
task_unlock(task);
203+
204+
atomic_dec(&ioc->nr_tasks);
205+
put_io_context_active(ioc);
206+
}
207+
194208
/**
195209
* ioc_clear_queue - break any ioc association with the specified queue
196210
* @q: request_queue being cleared
@@ -223,7 +237,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
223237

224238
/* initialize */
225239
atomic_long_set(&ioc->refcount, 1);
226-
atomic_set(&ioc->nr_tasks, 1);
240+
atomic_set(&ioc->active_ref, 1);
227241
spin_lock_init(&ioc->lock);
228242
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
229243
INIT_HLIST_HEAD(&ioc->icq_list);

block/cfq-iosched.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1865,7 +1865,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
18651865
* task has exited, don't wait
18661866
*/
18671867
cic = cfqd->active_cic;
1868-
if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
1868+
if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
18691869
return;
18701870

18711871
/*
@@ -2841,7 +2841,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
28412841

28422842
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
28432843
enable_idle = 0;
2844-
else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
2844+
else if (!atomic_read(&cic->icq.ioc->active_ref) ||
28452845
!cfqd->cfq_slice_idle ||
28462846
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
28472847
enable_idle = 0;

include/linux/iocontext.h

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ struct io_cq {
100100
*/
101101
struct io_context {
102102
atomic_long_t refcount;
103+
atomic_t active_ref;
103104
atomic_t nr_tasks;
104105

105106
/* all the fields below are protected by this lock */
@@ -120,17 +121,34 @@ struct io_context {
120121
struct work_struct release_work;
121122
};
122123

123-
static inline void ioc_task_link(struct io_context *ioc)
124+
/**
125+
* get_io_context_active - get active reference on ioc
126+
* @ioc: ioc of interest
127+
*
128+
* Only iocs with active reference can issue new IOs. This function
129+
* acquires an active reference on @ioc. The caller must already have an
130+
* active reference on @ioc.
131+
*/
132+
static inline void get_io_context_active(struct io_context *ioc)
124133
{
125134
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
126-
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
135+
WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
127136
atomic_long_inc(&ioc->refcount);
137+
atomic_inc(&ioc->active_ref);
138+
}
139+
140+
static inline void ioc_task_link(struct io_context *ioc)
141+
{
142+
get_io_context_active(ioc);
143+
144+
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
128145
atomic_inc(&ioc->nr_tasks);
129146
}
130147

131148
struct task_struct;
132149
#ifdef CONFIG_BLOCK
133150
void put_io_context(struct io_context *ioc);
151+
void put_io_context_active(struct io_context *ioc);
134152
void exit_io_context(struct task_struct *task);
135153
struct io_context *get_task_io_context(struct task_struct *task,
136154
gfp_t gfp_flags, int node);

0 commit comments

Comments
 (0)