Skip to content

Commit 7e2014f

Browse files
isilenceXiaoguang Wang
authored andcommitted
io_uring: remove custom ->func handlers
to #28736503 commit ac45abc upstream In preparation of getting rid of work.func, this removes almost all custom instances of it, leaving only io_wq_submit_work() and io_link_work_cb(). And the last one will be dealt later. Nothing fancy, just routinely remove *_finish() function and inline what's left. E.g. remove io_fsync_finish() + inline __io_fsync() into io_fsync(). As no users of io_req_cancelled() are left, delete it as well. The patch adds extra switch lookup on cold-ish path, but that's overweighted by nice diffstat and other benefits of the following patches. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com> Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
1 parent c6ed507 commit 7e2014f

File tree

1 file changed

+27
-115
lines changed

1 file changed

+27
-115
lines changed

fs/io_uring.c

Lines changed: 27 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -2889,77 +2889,25 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
28892889
return 0;
28902890
}
28912891

2892-
static bool io_req_cancelled(struct io_kiocb *req)
2893-
{
2894-
if (req->work.flags & IO_WQ_WORK_CANCEL) {
2895-
req_set_fail_links(req);
2896-
io_cqring_add_event(req, -ECANCELED);
2897-
io_put_req(req);
2898-
return true;
2899-
}
2900-
2901-
return false;
2902-
}
2903-
2904-
static void __io_fsync(struct io_kiocb *req)
2892+
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
29052893
{
29062894
loff_t end = req->sync.off + req->sync.len;
29072895
int ret;
29082896

2897+
/* fsync always requires a blocking context */
2898+
if (force_nonblock)
2899+
return -EAGAIN;
2900+
29092901
ret = vfs_fsync_range(req->file, req->sync.off,
29102902
end > 0 ? end : LLONG_MAX,
29112903
req->sync.flags & IORING_FSYNC_DATASYNC);
29122904
if (ret < 0)
29132905
req_set_fail_links(req);
29142906
io_cqring_add_event(req, ret);
29152907
io_put_req(req);
2916-
}
2917-
2918-
static void io_fsync_finish(struct io_wq_work **workptr)
2919-
{
2920-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2921-
2922-
if (io_req_cancelled(req))
2923-
return;
2924-
__io_fsync(req);
2925-
io_steal_work(req, workptr);
2926-
}
2927-
2928-
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
2929-
{
2930-
/* fsync always requires a blocking context */
2931-
if (force_nonblock) {
2932-
req->work.func = io_fsync_finish;
2933-
return -EAGAIN;
2934-
}
2935-
__io_fsync(req);
29362908
return 0;
29372909
}
29382910

2939-
static void __io_fallocate(struct io_kiocb *req)
2940-
{
2941-
int ret;
2942-
2943-
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2944-
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2945-
req->sync.len);
2946-
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2947-
if (ret < 0)
2948-
req_set_fail_links(req);
2949-
io_cqring_add_event(req, ret);
2950-
io_put_req(req);
2951-
}
2952-
2953-
static void io_fallocate_finish(struct io_wq_work **workptr)
2954-
{
2955-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2956-
2957-
if (io_req_cancelled(req))
2958-
return;
2959-
__io_fallocate(req);
2960-
io_steal_work(req, workptr);
2961-
}
2962-
29632911
static int io_fallocate_prep(struct io_kiocb *req,
29642912
const struct io_uring_sqe *sqe)
29652913
{
@@ -2977,13 +2925,20 @@ static int io_fallocate_prep(struct io_kiocb *req,
29772925

29782926
static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
29792927
{
2928+
int ret;
2929+
29802930
/* fallocate always requiring blocking context */
2981-
if (force_nonblock) {
2982-
req->work.func = io_fallocate_finish;
2931+
if (force_nonblock)
29832932
return -EAGAIN;
2984-
}
29852933

2986-
__io_fallocate(req);
2934+
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2935+
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2936+
req->sync.len);
2937+
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2938+
if (ret < 0)
2939+
req_set_fail_links(req);
2940+
io_cqring_add_event(req, ret);
2941+
io_put_req(req);
29872942
return 0;
29882943
}
29892944

@@ -3481,38 +3436,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
34813436
return 0;
34823437
}
34833438

3484-
static void __io_sync_file_range(struct io_kiocb *req)
3439+
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
34853440
{
34863441
int ret;
34873442

3443+
/* sync_file_range always requires a blocking context */
3444+
if (force_nonblock)
3445+
return -EAGAIN;
3446+
34883447
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
34893448
req->sync.flags);
34903449
if (ret < 0)
34913450
req_set_fail_links(req);
34923451
io_cqring_add_event(req, ret);
34933452
io_put_req(req);
3494-
}
3495-
3496-
3497-
static void io_sync_file_range_finish(struct io_wq_work **workptr)
3498-
{
3499-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3500-
3501-
if (io_req_cancelled(req))
3502-
return;
3503-
__io_sync_file_range(req);
3504-
io_steal_work(req, workptr);
3505-
}
3506-
3507-
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
3508-
{
3509-
/* sync_file_range always requires a blocking context */
3510-
if (force_nonblock) {
3511-
req->work.func = io_sync_file_range_finish;
3512-
return -EAGAIN;
3513-
}
3514-
3515-
__io_sync_file_range(req);
35163453
return 0;
35173454
}
35183455

@@ -3934,52 +3871,27 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
39343871
return 0;
39353872
}
39363873

3937-
static int __io_accept(struct io_kiocb *req, bool force_nonblock)
3874+
static int io_accept(struct io_kiocb *req, bool force_nonblock)
39383875
{
39393876
struct io_accept *accept = &req->accept;
3940-
unsigned file_flags;
3877+
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
39413878
int ret;
39423879

3943-
file_flags = force_nonblock ? O_NONBLOCK : 0;
39443880
ret = __sys_accept4_file(req->file, file_flags, accept->addr,
39453881
accept->addr_len, accept->flags,
39463882
accept->nofile);
39473883
if (ret == -EAGAIN && force_nonblock)
39483884
return -EAGAIN;
3949-
if (ret == -ERESTARTSYS)
3950-
ret = -EINTR;
3951-
if (ret < 0)
3885+
if (ret < 0) {
3886+
if (ret == -ERESTARTSYS)
3887+
ret = -EINTR;
39523888
req_set_fail_links(req);
3889+
}
39533890
io_cqring_add_event(req, ret);
39543891
io_put_req(req);
39553892
return 0;
39563893
}
39573894

3958-
static void io_accept_finish(struct io_wq_work **workptr)
3959-
{
3960-
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3961-
3962-
if (io_req_cancelled(req))
3963-
return;
3964-
__io_accept(req, false);
3965-
io_steal_work(req, workptr);
3966-
}
3967-
3968-
static int io_accept(struct io_kiocb *req, bool force_nonblock)
3969-
{
3970-
int ret;
3971-
3972-
if (req->file->f_flags & O_NONBLOCK)
3973-
req->flags |= REQ_F_NOWAIT;
3974-
3975-
ret = __io_accept(req, force_nonblock);
3976-
if (ret == -EAGAIN && force_nonblock) {
3977-
req->work.func = io_accept_finish;
3978-
return -EAGAIN;
3979-
}
3980-
return 0;
3981-
}
3982-
39833895
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
39843896
{
39853897
struct io_connect *conn = &req->connect;

0 commit comments

Comments
 (0)