diff --git a/core/sched.cc b/core/sched.cc
index 31e4c242d520311b7c6d5d3a025c642c8bb92967..424708b7714fe44c813a1c90eed3c446f5721ac5 100644
--- a/core/sched.cc
+++ b/core/sched.cc
@@ -27,6 +27,7 @@ __thread char* percpu_base;
 extern char _percpu_start[], _percpu_end[];
 
 using namespace osv;
+using namespace osv::clock::literals;
 
 void cancel_this_thread_alarm();
 
@@ -61,7 +62,7 @@ inter_processor_interrupt wakeup_ipi{[] {}};
 // In particular, it can be seen that if a thread has been monopolizing the
 // CPU, and a long-sleeping thread wakes up (or new thread is created),
 // the new thread will get to run for ln2*tau. (ln2 is roughly 0.7).
-constexpr s64 tau = 200_ms;
+constexpr thread_runtime::duration tau = 200_ms;
 
 // "thyst" controls the hysteresis algorithm which temporarily gives a
 // running thread some extra runtime before preempting it. We subtract thyst
@@ -69,23 +70,23 @@ constexpr s64 tau = 200_ms;
 // out. In particular, it can be shown that when two cpu-busy threads at equal
 // priority compete, they will alternate at time-slices of 2*thyst; Also,
 // the distance between two preemption interrupts cannot be lower than thyst.
-constexpr s64 thyst = 2_ms;
+constexpr thread_runtime::duration thyst = 2_ms;
 
-constexpr s64 context_switch_penalty = 10_us;
+constexpr thread_runtime::duration context_switch_penalty = 10_us;
 
 constexpr float cmax = 0x1P63;
 constexpr float cinitial = 0x1P-63;
 
-static inline float exp_tau(s64 t) {
+static inline float exp_tau(thread_runtime::duration t) {
     // return expf((float)t/(float)tau);
     // Approximate e^x as much faster 1+x for x<0.001 (the error is O(x^2)).
     // Further speed up by comparing and adding integers as much as we can:
-    static constexpr int m = tau / 1000;
-    static constexpr float invtau = 1.0f / tau;
-    if (t < m && t > -m)
-        return (tau + t) * invtau;
+    static constexpr int m = tau.count() / 1000;
+    static constexpr float invtau = 1.0f / tau.count();
+    if (t.count() < m && t.count() > -m)
+        return (tau.count() + t.count()) * invtau;
     else
-        return expf(t * invtau);
+        return expf(t.count() * invtau);
 }
 
 // fastlog2() is an approximation of log2, designed for speed over accuracy
@@ -109,7 +110,7 @@ static inline float taulog(float f) {
     // where it's fine to overshoot, even significantly, the correct time
     // because a thread running a bit too much will "pay" in runtime.
     // We multiply by 1.01 to ensure overshoot, not undershoot.
-    static constexpr float tau2 = tau * 0.69314718f * 1.01;
+    static constexpr float tau2 = tau.count() * 0.69314718f * 1.01;
     return tau2 * fastlog2(f);
 }
 
@@ -157,7 +158,7 @@ cpu::cpu(unsigned _id)
 
 void cpu::init_idle_thread()
 {
-    running_since = clock::get()->time();
+    running_since = osv::clock::uptime::now();
     idle_thread = new thread([this] { idle(); }, thread::attr().pin(this));
     idle_thread->set_priority(thread::priority_idle);
 }
@@ -195,8 +196,8 @@ void cpu::reschedule_from_interrupt(bool preempt)
 
     need_reschedule = false;
     handle_incoming_wakeups();
-    auto now = clock::get()->time();
 
+    auto now = osv::clock::uptime::now();
     auto interval = now - running_since;
     running_since = now;
     if (interval <= 0) {
@@ -414,7 +415,7 @@ void cpu::load_balance()
     notifier::fire();
     timer tmr(*thread::current());
     while (true) {
-        tmr.set(clock::get()->time() + 100_ms);
+        tmr.set(osv::clock::uptime::now() + 100_ms);
         thread::wait_until([&] { return tmr.expired(); });
         if (runqueue.empty()) {
             continue;
@@ -1201,7 +1202,7 @@ void thread_runtime::update_after_sleep()
     _renormalize_count = cpu_renormalize_count;
 }
 
-void thread_runtime::ran_for(s64 time)
+void thread_runtime::ran_for(thread_runtime::duration time)
 {
     assert (_priority > 0);
     assert (time >= 0);
@@ -1299,19 +1300,20 @@ void thread_runtime::add_context_switch_penalty()
 
 }
 
-s64 thread_runtime::time_until(runtime_t target_local_runtime) const
+thread_runtime::duration
+thread_runtime::time_until(runtime_t target_local_runtime) const
 {
     if (_priority == inf) {
-        return -1;
+        return thread_runtime::duration(-1);
     }
     if (target_local_runtime == inf) {
-        return -1;
+        return thread_runtime::duration(-1);
     }
     auto ret = taulog(runtime_t(1) +
             (target_local_runtime - _Rtt) / _priority / cpu::current()->c);
-    if (ret > (runtime_t)std::numeric_limits<s64>::max())
-        return -1;
-    return (s64) ret;
+    if (ret > thread_runtime::duration::max().count())
+        return thread_runtime::duration(-1);
+    return thread_runtime::duration((thread_runtime::duration::rep) ret);
 }
 
 }
diff --git a/include/osv/sched.hh b/include/osv/sched.hh
index 330d76566566e12b620d99338e928486c94f6427..8f3187afab6fcf60d443df943c20a61af6431b38 100644
--- a/include/osv/sched.hh
+++ b/include/osv/sched.hh
@@ -229,6 +229,7 @@ private:
 // https://docs.google.com/document/d/1W7KCxOxP-1Fy5EyF2lbJGE2WuKmu5v0suYqoHas1jRM
 class thread_runtime {
 public:
+    using duration = osv::clock::uptime::duration;
     // Get the thread's CPU-local runtime, a number used to sort the runqueue
     // on this CPU (lowest runtime will be run first). local runtime cannot be
     // compared between different different CPUs - see export_runtime().
@@ -248,7 +249,7 @@ public:
     // nanoseconds at the current priority.
     // Remember that the run queue is ordered by local runtime, so never call
     // ran_for() or hysteresis_*() on a thread which is already in the queue.
-    void ran_for(s64 time);
+    void ran_for(duration time);
     // Temporarily decrease the running thread's runtime to provide hysteresis
     // (avoid switching threads quickly after deciding on one).
     // Use hystersis_run_start() when switching to a thread, and
@@ -260,7 +261,7 @@ public:
     // time (in nanoseconds) it would take until ran_for(time) would bring our
     // thread to the given target. Returns -1 if the time is too long to
     // express in s64.
-    s64 time_until(runtime_t target_local_runtime) const;
+    duration time_until(runtime_t target_local_runtime) const;
 
     void set_priority(runtime_t priority) {
         _priority = priority;
@@ -521,7 +522,7 @@ private:
     std::atomic<bool> _interrupted;
     std::function<void ()> _cleanup;
     std::vector<std::unique_ptr<char[]>> _tls;
-    u64 _total_cpu_time = 0;
+    thread_runtime::duration _total_cpu_time {0};
     void destroy();
     friend class thread_ref_guard;
     friend void thread_main_c(thread* t);
@@ -536,7 +537,7 @@ private:
     friend void init(std::function<void ()> cont);
 public:
     std::atomic<thread *> _joiner;
-    u64 thread_clock() { return _total_cpu_time; }
+    thread_runtime::duration thread_clock() { return _total_cpu_time; }
     bi::set_member_hook<> _runqueue_link;
     // see cpu class
     lockless_queue_link<thread> _wakeup_link;
@@ -614,7 +615,7 @@ struct cpu : private timer_base::client {
     cpu_set incoming_wakeups_mask;
     incoming_wakeup_queue* incoming_wakeups;
     thread* terminating_thread;
-    s64 running_since;
+    osv::clock::uptime::time_point running_since;
     char* percpu_base;
     static cpu* current();
     void init_on_cpu();
diff --git a/libc/time.cc b/libc/time.cc
index 87b9fbbe5651c66e0e4da1923b1b66133863bc95..f5c718c98de0f1ab7bf088998a1e4d0912beee23 100644
--- a/libc/time.cc
+++ b/libc/time.cc
@@ -44,14 +44,6 @@ int usleep(useconds_t usec)
     return 0;
 }
 
-// Temporary until all clock primitives functions on std::chrono
-static void fill_ts(s64 time, struct timespec *ts)
-{
-    ts->tv_sec  =  time / 1000000000;
-    ts->tv_nsec =  time % 1000000000;
-}
-
-
 // Convenient inline function for converting std::chrono::duration,
 // of a clock with any period, into the classic Posix "struct timespec":
 template <class Rep, class Period>