diff options
author | Miquel Raynal <miquel.raynal@bootlin.com> | 2025-01-24 10:52:35 +0100 |
---|---|---|
committer | Miquel Raynal <miquel.raynal@bootlin.com> | 2025-01-24 10:52:35 +0100 |
commit | 0ddeb4fe9d3b501c2c6a3522325d88ee166e02ea (patch) | |
tree | c7eb9bcfdd32baf6ef11e330c0f72807e3c0116d /kernel/workqueue.c | |
parent | b44574c7da71e03792de51daf4d5fa5435a64a54 (diff) | |
parent | 98b34d52004b5a35db1c1b2c2133f52d67bede0f (diff) |
Merge tag 'nand/for-6.14' into mtd/next
* Raw NAND changes
A new controller driver, from Nuvoton, has been merged.
Bastien Curutchet has contributed a series improving the Davinci
controller driver, both on the organization of the code, but also on the
performance side. The binding has also been converted to yaml, received
a new OOB layout and now supports on-die ECC engines.
The Qualcomm controller driver has been deeply cleaned to extract some
parts of the code into a shared file with the Qualcomm SPI memory
controller.
Aside from these main changes, the Cadence binding has been converted to
yaml, the brcmnand controller driver has received a small fix, otherwise
some more minor changes have also made their way in.
* SPI NAND changes
The SPI NAND subsystem has seen a great improvement, with the advent of
DTR operations (DDR operations, which may be extended to the address
cycles). The first vendor driver to benefit from these improvements is
the Winbond driver.
A new manufacturer driver is added SkyHigh, with a new constraint for
the core, it is impossible to disable the on-die ECC engine.
A Foresee device is also now supported.
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8b07576814a5..f7d8fc204579 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int cpu) * check_flush_dependency - check for flush dependency sanity * @target_wq: workqueue being flushed * @target_work: work item being flushed (NULL for workqueue flushes) + * @from_cancel: are we called from the work cancel path * * %current is trying to flush the whole @target_wq or @target_work on it. - * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not - * reclaiming memory or running on a workqueue which doesn't have - * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to - * a deadlock. + * If this is not the cancel path (which implies work being flushed is either + * already running, or will not be at all), check if @target_wq doesn't have + * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running + * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward- + * progress guarantee leading to a deadlock. */ static void check_flush_dependency(struct workqueue_struct *target_wq, - struct work_struct *target_work) + struct work_struct *target_work, + bool from_cancel) { - work_func_t target_func = target_work ? target_work->func : NULL; + work_func_t target_func; struct worker *worker; - if (target_wq->flags & WQ_MEM_RECLAIM) + if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM) return; worker = current_wq_worker(); + target_func = target_work ? target_work->func : NULL; WARN_ONCE(current->flags & PF_MEMALLOC, "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", @@ -3980,7 +3984,7 @@ void __flush_workqueue(struct workqueue_struct *wq) list_add_tail(&this_flusher.list, &wq->flusher_overflow); } - check_flush_dependency(wq, NULL); + check_flush_dependency(wq, NULL, false); mutex_unlock(&wq->mutex); @@ -4155,7 +4159,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, } wq = pwq->wq; - check_flush_dependency(wq, work); + check_flush_dependency(wq, work, from_cancel); insert_wq_barrier(pwq, barr, work, worker); raw_spin_unlock_irq(&pool->lock); @@ -5641,6 +5645,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) } while (activated); } +__printf(1, 0) static struct workqueue_struct *__alloc_workqueue(const char *fmt, unsigned int flags, int max_active, va_list args) |