diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a1692dad52dbff237ac1f7707c092f550d4b1d3b..0482087b7c64580d285e14c03ed3368a92dde89f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
 		 * forever, while the workqueue is stuck trying to acquire the
 		 * very same mutex.
 		 */
-		if (wq_list_empty(&ctx->iopoll_list)) {
-			u32 tail = ctx->cached_cq_tail;
-
-			mutex_unlock(&ctx->uring_lock);
-			ret = io_run_task_work_ctx(ctx);
-			mutex_lock(&ctx->uring_lock);
-			if (ret < 0)
-				break;
-
-			/* some requests don't go through iopoll_list */
-			if (tail != ctx->cached_cq_tail ||
-			    wq_list_empty(&ctx->iopoll_list))
-				break;
-		}
-
-		if (task_work_pending(current)) {
-			mutex_unlock(&ctx->uring_lock);
-			io_run_task_work();
-			mutex_lock(&ctx->uring_lock);
+		if (wq_list_empty(&ctx->iopoll_list) ||
+		    io_task_work_pending(ctx)) {
+			if (!llist_empty(&ctx->work_llist))
+				__io_run_local_work(ctx, true);
+			if (task_work_pending(current) ||
+			    wq_list_empty(&ctx->iopoll_list)) {
+				u32 tail = ctx->cached_cq_tail;
+
+				mutex_unlock(&ctx->uring_lock);
+				ret = io_run_task_work();
+				mutex_lock(&ctx->uring_lock);
+
+				if (ret < 0)
+					break;
+
+				/* some requests don't go through iopoll_list */
+				if (tail != ctx->cached_cq_tail ||
+				    wq_list_empty(&ctx->iopoll_list))
+					break;
+			}
 		}
 		ret = io_do_iopoll(ctx, !min);
 		if (ret < 0)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 0f90d1dfa42b982cc4c8161d1f078f9a1997bbb3..9d89425292b72ca502457033f6e8d08283ddc096 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -236,6 +236,12 @@ static inline int io_run_task_work(void)
 	return 0;
 }
 
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+{
+	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+		!wq_list_empty(&ctx->work_llist);
+}
+
 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
 {
 	int ret = 0;