summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2025-05-01 14:43:44 +0900
committerMark Brown <broonie@kernel.org>2025-05-01 14:43:44 +0900
commit844af9911a5d1dc41f3478dc312a404b38cbc83b (patch)
treed6aef04723f2265235c3d7fb846e522269bacbab /io_uring/io_uring.c
parent7f91f012c1df07af6b915d1f8cece202774bb50e (diff)
parentcce34d113e2a592806abcdc02c7f8513775d8b20 (diff)
ASoC: stm32: sai: fix kernel rate configuration
Merge series from Olivier Moysan <olivier.moysan@foss.st.com>: This patchset adds some checks on kernel minimum rate requirements. This avoids potential clock rate misconfiguration, when setting the kernel frequency on STM32MP2 SoCs.
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c6209fe44cb1..a2b256e96d5d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -872,10 +872,15 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
lockdep_assert(!io_wq_current_is_worker());
lockdep_assert_held(&ctx->uring_lock);
- __io_cq_lock(ctx);
- posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
+ if (!ctx->lockless_cq) {
+ spin_lock(&ctx->completion_lock);
+ posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
+ spin_unlock(&ctx->completion_lock);
+ } else {
+ posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
+ }
+
ctx->submit_state.cq_flush = true;
- __io_cq_unlock_post(ctx);
return posted;
}
@@ -1078,21 +1083,22 @@ static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
- if (sync && last_ctx != req->ctx) {
+ if (last_ctx != req->ctx) {
if (last_ctx) {
- flush_delayed_work(&last_ctx->fallback_work);
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
last_ctx = req->ctx;
percpu_ref_get(&last_ctx->refs);
}
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
+ if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
+ schedule_delayed_work(&last_ctx->fallback_work, 1);
}
if (last_ctx) {
- flush_delayed_work(&last_ctx->fallback_work);
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
}