summaryrefslogtreecommitdiff
path: root/fs/bcachefs/clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/clock.c')
-rw-r--r--fs/bcachefs/clock.c138
1 files changed, 63 insertions, 75 deletions
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 363644451106..8e9264b5a84e 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -6,44 +6,45 @@
#include <linux/kthread.h>
#include <linux/preempt.h>
-static inline long io_timer_cmp(io_timer_heap *h,
- struct io_timer *l,
- struct io_timer *r)
+static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
{
- return l->expire - r->expire;
+ struct io_timer **_l = (struct io_timer **)l;
+ struct io_timer **_r = (struct io_timer **)r;
+
+ return (*_l)->expire < (*_r)->expire;
}
+static const struct min_heap_callbacks callbacks = {
+ .less = io_timer_cmp,
+ .swp = NULL,
+};
+
void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
{
- size_t i;
-
spin_lock(&clock->timer_lock);
- if (time_after_eq((unsigned long) atomic64_read(&clock->now),
- timer->expire)) {
+ if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
spin_unlock(&clock->timer_lock);
timer->fn(timer);
return;
}
- for (i = 0; i < clock->timers.used; i++)
+ for (size_t i = 0; i < clock->timers.nr; i++)
if (clock->timers.data[i] == timer)
goto out;
- BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
+ BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
out:
spin_unlock(&clock->timer_lock);
}
void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
{
- size_t i;
-
spin_lock(&clock->timer_lock);
- for (i = 0; i < clock->timers.used; i++)
+ for (size_t i = 0; i < clock->timers.nr; i++)
if (clock->timers.data[i] == timer) {
- heap_del(&clock->timers, i, io_timer_cmp, NULL);
+ min_heap_del(&clock->timers, i, &callbacks, NULL);
break;
}
@@ -52,7 +53,6 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
struct io_clock_wait {
struct io_timer io_timer;
- struct timer_list cpu_timer;
struct task_struct *task;
int expired;
};
@@ -66,105 +66,93 @@ static void io_clock_wait_fn(struct io_timer *timer)
wake_up_process(wait->task);
}
-static void io_clock_cpu_timeout(struct timer_list *timer)
-{
- struct io_clock_wait *wait = container_of(timer,
- struct io_clock_wait, cpu_timer);
-
- wait->expired = 1;
- wake_up_process(wait->task);
-}
-
-void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
+void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until)
{
- struct io_clock_wait wait;
+ struct io_clock_wait wait = {
+ .io_timer.expire = until,
+ .io_timer.fn = io_clock_wait_fn,
+ .io_timer.fn2 = (void *) _RET_IP_,
+ .task = current,
+ };
- /* XXX: calculate sleep time rigorously */
- wait.io_timer.expire = until;
- wait.io_timer.fn = io_clock_wait_fn;
- wait.task = current;
- wait.expired = 0;
bch2_io_timer_add(clock, &wait.io_timer);
-
schedule();
-
bch2_io_timer_del(clock, &wait.io_timer);
}
-void bch2_kthread_io_clock_wait(struct io_clock *clock,
- unsigned long io_until,
- unsigned long cpu_timeout)
+unsigned long bch2_kthread_io_clock_wait_once(struct io_clock *clock,
+ u64 io_until, unsigned long cpu_timeout)
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
- struct io_clock_wait wait;
+ struct io_clock_wait wait = {
+ .io_timer.expire = io_until,
+ .io_timer.fn = io_clock_wait_fn,
+ .io_timer.fn2 = (void *) _RET_IP_,
+ .task = current,
+ };
- wait.io_timer.expire = io_until;
- wait.io_timer.fn = io_clock_wait_fn;
- wait.task = current;
- wait.expired = 0;
bch2_io_timer_add(clock, &wait.io_timer);
- timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
-
- if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
- mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
-
- do {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread && kthread_should_stop())
- break;
-
- if (wait.expired)
- break;
-
- schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!(kthread && kthread_should_stop())) {
+ cpu_timeout = schedule_timeout(cpu_timeout);
try_to_freeze();
- } while (0);
+ }
__set_current_state(TASK_RUNNING);
- del_timer_sync(&wait.cpu_timer);
- destroy_timer_on_stack(&wait.cpu_timer);
bch2_io_timer_del(clock, &wait.io_timer);
+ return cpu_timeout;
}
-static struct io_timer *get_expired_timer(struct io_clock *clock,
- unsigned long now)
+void bch2_kthread_io_clock_wait(struct io_clock *clock,
+ u64 io_until, unsigned long cpu_timeout)
{
- struct io_timer *ret = NULL;
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
- spin_lock(&clock->timer_lock);
+ while (!(kthread && kthread_should_stop()) &&
+ cpu_timeout &&
+ atomic64_read(&clock->now) < io_until)
+ cpu_timeout = bch2_kthread_io_clock_wait_once(clock, io_until, cpu_timeout);
+}
- if (clock->timers.used &&
- time_after_eq(now, clock->timers.data[0]->expire))
- heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
+static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
+{
+ struct io_timer *ret = NULL;
- spin_unlock(&clock->timer_lock);
+ if (clock->timers.nr &&
+ time_after_eq64(now, clock->timers.data[0]->expire)) {
+ ret = *min_heap_peek(&clock->timers);
+ min_heap_pop(&clock->timers, &callbacks, NULL);
+ }
return ret;
}
-void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
+void __bch2_increment_clock(struct io_clock *clock, u64 sectors)
{
struct io_timer *timer;
- unsigned long now = atomic64_add_return(sectors, &clock->now);
+ u64 now = atomic64_add_return(sectors, &clock->now);
+ spin_lock(&clock->timer_lock);
while ((timer = get_expired_timer(clock, now)))
timer->fn(timer);
+ spin_unlock(&clock->timer_lock);
}
void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
{
- unsigned long now;
- unsigned i;
-
out->atomic++;
spin_lock(&clock->timer_lock);
- now = atomic64_read(&clock->now);
+ u64 now = atomic64_read(&clock->now);
+
+ printbuf_tabstop_push(out, 40);
+ prt_printf(out, "current time:\t%llu\n", now);
- for (i = 0; i < clock->timers.used; i++)
- prt_printf(out, "%ps:\t%li\n",
+ for (unsigned i = 0; i < clock->timers.nr; i++)
+ prt_printf(out, "%ps %ps:\t%llu\n",
clock->timers.data[i]->fn,
- clock->timers.data[i]->expire - now);
+ clock->timers.data[i]->fn2,
+ clock->timers.data[i]->expire);
spin_unlock(&clock->timer_lock);
--out->atomic;
}