Skip to content

Commit 9c90c6a

Browse files
axboeXiaoguang Wang
authored andcommitted
io_uring: async task poll trigger cleanup
to #28736503 commit 3106725 upstream If the request is still hashed in io_async_task_func(), then it cannot have been canceled and it's pointless to check. So save that check. Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com> Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
1 parent 1d6d088 commit 9c90c6a

File tree

1 file changed

+16
-17
lines changed

1 file changed

+16
-17
lines changed

fs/io_uring.c

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4114,7 +4114,7 @@ static void io_async_task_func(struct callback_head *cb)
41144114
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
41154115
struct async_poll *apoll = req->apoll;
41164116
struct io_ring_ctx *ctx = req->ctx;
4117-
bool canceled;
4117+
bool canceled = false;
41184118

41194119
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
41204120

@@ -4123,34 +4123,33 @@ static void io_async_task_func(struct callback_head *cb)
41234123
return;
41244124
}
41254125

4126-
if (hash_hashed(&req->hash_node))
4126+
/* If req is still hashed, it cannot have been canceled. Don't check. */
4127+
if (hash_hashed(&req->hash_node)) {
41274128
hash_del(&req->hash_node);
4128-
4129-
canceled = READ_ONCE(apoll->poll.canceled);
4130-
if (canceled) {
4131-
io_cqring_fill_event(req, -ECANCELED);
4132-
io_commit_cqring(ctx);
4129+
} else {
4130+
canceled = READ_ONCE(apoll->poll.canceled);
4131+
if (canceled) {
4132+
io_cqring_fill_event(req, -ECANCELED);
4133+
io_commit_cqring(ctx);
4134+
}
41334135
}
41344136

41354137
spin_unlock_irq(&ctx->completion_lock);
41364138

41374139
/* restore ->work in case we need to retry again */
41384140
memcpy(&req->work, &apoll->work, sizeof(req->work));
4141+
kfree(apoll);
41394142

4140-
if (canceled) {
4141-
kfree(apoll);
4143+
if (!canceled) {
4144+
__set_current_state(TASK_RUNNING);
4145+
mutex_lock(&ctx->uring_lock);
4146+
__io_queue_sqe(req, NULL);
4147+
mutex_unlock(&ctx->uring_lock);
4148+
} else {
41424149
io_cqring_ev_posted(ctx);
41434150
req_set_fail_links(req);
41444151
io_double_put_req(req);
4145-
return;
41464152
}
4147-
4148-
__set_current_state(TASK_RUNNING);
4149-
mutex_lock(&ctx->uring_lock);
4150-
__io_queue_sqe(req, NULL);
4151-
mutex_unlock(&ctx->uring_lock);
4152-
4153-
kfree(apoll);
41544153
}
41554154

41564155
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,

0 commit comments

Comments
 (0)