mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
45500dc4e0
io-wq will retry iopoll even when it failed with -EAGAIN. If that
races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
such workers might potentially infinitely spin retrying iopoll again and
again and each time failing on some allocation / waiting / etc. Don't
keep spinning if io-wq is dying.
Fixes: 561fb04a6a
("io_uring: replace workqueue usage with io-wq")
Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
#ifndef INTERNAL_IO_WQ_H
|
|
#define INTERNAL_IO_WQ_H
|
|
|
|
#include <linux/refcount.h>
|
|
#include <linux/io_uring_types.h>
|
|
|
|
struct io_wq;
|
|
|
|
enum {
|
|
IO_WQ_WORK_CANCEL = 1,
|
|
IO_WQ_WORK_HASHED = 2,
|
|
IO_WQ_WORK_UNBOUND = 4,
|
|
IO_WQ_WORK_CONCURRENT = 16,
|
|
|
|
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
|
|
};
|
|
|
|
enum io_wq_cancel {
|
|
IO_WQ_CANCEL_OK, /* cancelled before started */
|
|
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
|
|
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
|
};
|
|
|
|
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
|
|
typedef void (io_wq_work_fn)(struct io_wq_work *);
|
|
|
|
struct io_wq_hash {
|
|
refcount_t refs;
|
|
unsigned long map;
|
|
struct wait_queue_head wait;
|
|
};
|
|
|
|
static inline void io_wq_put_hash(struct io_wq_hash *hash)
|
|
{
|
|
if (refcount_dec_and_test(&hash->refs))
|
|
kfree(hash);
|
|
}
|
|
|
|
struct io_wq_data {
|
|
struct io_wq_hash *hash;
|
|
struct task_struct *task;
|
|
io_wq_work_fn *do_work;
|
|
free_work_fn *free_work;
|
|
};
|
|
|
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
|
void io_wq_exit_start(struct io_wq *wq);
|
|
void io_wq_put_and_exit(struct io_wq *wq);
|
|
|
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
|
void io_wq_hash_work(struct io_wq_work *work, void *val);
|
|
|
|
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
|
|
int io_wq_max_workers(struct io_wq *wq, int *new_count);
|
|
bool io_wq_worker_stopped(void);
|
|
|
|
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
|
{
|
|
return work->flags & IO_WQ_WORK_HASHED;
|
|
}
|
|
|
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
|
|
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|
void *data, bool cancel_all);
|
|
|
|
#if defined(CONFIG_IO_WQ)
|
|
extern void io_wq_worker_sleeping(struct task_struct *);
|
|
extern void io_wq_worker_running(struct task_struct *);
|
|
#else
|
|
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
|
|
{
|
|
}
|
|
static inline void io_wq_worker_running(struct task_struct *tsk)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline bool io_wq_current_is_worker(void)
|
|
{
|
|
return in_task() && (current->flags & PF_IO_WORKER) &&
|
|
current->worker_private;
|
|
}
|
|
#endif
|