From dbbc32efd7f2edcb81ecf8f6b9180db35d5bce29 Mon Sep 17 00:00:00 2001 From: RobotMan2412 Date: Wed, 28 Aug 2024 17:10:50 +0200 Subject: [PATCH] Timer test --- common/badgelib | 2 +- kernel/CMakeLists.txt | 1 - kernel/cpu/riscv/include/cpu/riscv_pmp.h | 1 + kernel/cpu/riscv/src/scheduler.c | 4 +- kernel/include/badgelib/hwtimer.h | 49 +++ kernel/include/badgelib/time.h | 49 +-- kernel/include/scheduler/isr.h | 9 - kernel/include/scheduler/scheduler.h | 34 ++- kernel/include/scheduler/types.h | 7 +- kernel/include/smp.h | 2 + kernel/port/esp32c6/include/port/hardware.h | 20 -- kernel/port/esp32c6/include/port/interrupt.h | 4 +- kernel/port/esp32c6/src/interrupt.c | 27 +- kernel/port/esp32c6/src/smp.c | 6 + kernel/port/esp32p4/include/port/hardware.h | 8 - kernel/port/esp32p4/include/port/interrupt.h | 6 +- kernel/port/esp32p4/src/interrupt.c | 53 ++-- kernel/port/esp32p4/src/smp.c | 8 +- kernel/port/esp_common/CMakeLists.txt | 1 + kernel/port/esp_common/src/hwtimer.c | 213 +++++++++++++ kernel/port/esp_common/src/i2c.c | 2 +- kernel/port/esp_common/src/time.c | 306 ++++++++----------- kernel/src/badgelib/mutex.c | 6 +- kernel/src/freestanding/int_routines.c | 37 +++ kernel/src/freestanding/string.c | 4 +- kernel/src/housekeeping.c | 2 +- kernel/src/main.c | 81 ++--- kernel/src/process/process.c | 2 +- kernel/src/process/sighandler.c | 4 +- kernel/src/process/syscall_impl.c | 2 +- kernel/src/scheduler/scheduler.c | 245 ++++++++++----- kernel/src/scheduler/syscall_impl.c | 9 - 32 files changed, 746 insertions(+), 458 deletions(-) create mode 100644 kernel/include/badgelib/hwtimer.h create mode 100644 kernel/port/esp_common/src/hwtimer.c delete mode 100644 kernel/src/scheduler/syscall_impl.c diff --git a/common/badgelib b/common/badgelib index a35dc97..73b26a5 160000 --- a/common/badgelib +++ b/common/badgelib @@ -1 +1 @@ -Subproject commit a35dc97036c5f634ff075df494485dc0c565a983 +Subproject commit 73b26a5c86c91171770047db00f627714c5493b2 diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 09f697c..959ab17 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -93,7 +93,6 @@ add_executable(${target} ${CMAKE_CURRENT_LIST_DIR}/src/process/syscall_util.c ${CMAKE_CURRENT_LIST_DIR}/src/scheduler/scheduler.c - ${CMAKE_CURRENT_LIST_DIR}/src/scheduler/syscall_impl.c ${CMAKE_CURRENT_LIST_DIR}/src/housekeeping.c ${CMAKE_CURRENT_LIST_DIR}/src/main.c diff --git a/kernel/cpu/riscv/include/cpu/riscv_pmp.h b/kernel/cpu/riscv/include/cpu/riscv_pmp.h index 7022ee2..e80f052 100644 --- a/kernel/cpu/riscv/include/cpu/riscv_pmp.h +++ b/kernel/cpu/riscv/include/cpu/riscv_pmp.h @@ -6,6 +6,7 @@ // This file expects RISCV_PMP_REGION_COUNT to be defined by "port/hardware.h". #include "assertions.h" +#include "attributes.h" #include "meta.h" #include "port/hardware.h" #include "port/hardware_allocation.h" diff --git a/kernel/cpu/riscv/src/scheduler.c b/kernel/cpu/riscv/src/scheduler.c index e678987..e6dd38d 100644 --- a/kernel/cpu/riscv/src/scheduler.c +++ b/kernel/cpu/riscv/src/scheduler.c @@ -59,7 +59,7 @@ void sched_raise_from_isr(sched_thread_t *thread, bool syscall, void *entry_poin // Requests the scheduler to prepare a switch from kernel to userland for a user thread. // Resumes the userland thread where it left off. void sched_lower_from_isr() { - sched_thread_t *thread = sched_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread(); process_t *process = thread->process; assert_dev_drop(!(thread->flags & THREAD_KERNEL) && (thread->flags & THREAD_PRIVILEGED)); atomic_fetch_and(&thread->flags, ~THREAD_PRIVILEGED); @@ -144,7 +144,7 @@ bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum) { // Exits a signal handler in the current thread. // Returns false if the process cannot be resumed. bool sched_signal_exit() { - sched_thread_t *thread = sched_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread(); if (!(atomic_fetch_and(&thread->flags, ~THREAD_SIGHANDLER) & THREAD_SIGHANDLER)) { return false; } diff --git a/kernel/include/badgelib/hwtimer.h b/kernel/include/badgelib/hwtimer.h new file mode 100644 index 0000000..43ee020 --- /dev/null +++ b/kernel/include/badgelib/hwtimer.h @@ -0,0 +1,49 @@ + +// SPDX-License-Identifier: MIT + +#pragma once + +#include "attributes.h" + +#include +#include +#include + +#define FREQUENCY_HZ_MIN INT32_MIN +#define FREQUENCY_HZ_MAX INT32_MAX +#define TIMER_VALUE_MIN INT64_MIN +#define TIMER_VALUE_MAX INT64_MAX + +typedef int32_t frequency_hz_t; +typedef int64_t timer_value_t; + +// Initialise timer and watchdog subsystem. +void timer_init(); +// Get the number of hardware timers. +int timer_count() CONST; +// Get the IRQ number for a timer. +int timer_get_irq(int timerno); +// Set the counting frequency of a hardware timer. +void timer_set_freq(int timerno, frequency_hz_t frequency); + +// Configure and enable timer alarm. +void timer_alarm_config(int timerno, timer_value_t threshold, bool reset_on_alarm); +// Disable timer alarm. +void timer_alarm_disable(int timerno); +// Get the current value of timer. +timer_value_t timer_value_get(int timerno); +// Set the current value of timer. +void timer_value_set(int timerno, timer_value_t value); +// Enable the timer counting. +void timer_start(int timerno); +// Disable the timer counting. +void timer_stop(int timerno); + +// Check whether timer has interrupts enabled. +bool timer_int_enabled(int timerno); +// Enable / disable timer interrupts. +void timer_int_enable(int timerno, bool enable); +// Check whether timer interrupt had fired. +bool timer_int_pending(int timerno); +// Clear timer interrupt. +void timer_int_clear(int timerno); diff --git a/kernel/include/badgelib/time.h b/kernel/include/badgelib/time.h index 07b2ed5..2dbd0f0 100644 --- a/kernel/include/badgelib/time.h +++ b/kernel/include/badgelib/time.h @@ -3,57 +3,24 @@ #pragma once -#include "attributes.h" -#include "port/hardware_allocation.h" - -#include #include #include #include -#define FREQUENCY_HZ_MIN INT32_MIN -#define FREQUENCY_HZ_MAX INT32_MAX #define TIMESTAMP_US_MIN INT64_MIN #define TIMESTAMP_US_MAX INT64_MAX -#define TIMER_VALUE_MIN INT64_MIN -#define TIMER_VALUE_MAX INT64_MAX -typedef int32_t frequency_hz_t; typedef int64_t timestamp_us_t; -typedef int64_t timer_value_t; + +typedef void (*timer_fn_t)(void *cookie); // Initialise timer and watchdog subsystem. -void time_init(); +void time_init(); // Sets the alarm time when the next task switch should occur. -void time_set_next_task_switch(timestamp_us_t timestamp); - -// Get the number of hardware timers. -#define timer_count() (2) -// Set the counting frequency of a hardware timer. -void timer_set_freq(int timerno, frequency_hz_t frequency); - - -// Configure and enable timer alarm. -void timer_alarm_config(int timerno, timer_value_t threshold, bool reset_on_alarm); -// Disable timer alarm. -void timer_alarm_disable(int timerno); -// Get the current value of timer. -timer_value_t timer_value_get(int timerno); -// Set the current value of timer. -void timer_value_set(int timerno, timer_value_t value); -// Enable the timer counting. -void timer_start(int timerno); -// Disable the timer counting. -void timer_stop(int timerno); - -// Check whether timer has interrupts enabled. -bool timer_int_enabled(int timerno); -// Enable / disable timer interrupts. -void timer_int_enable(int timerno, bool enable); -// Check whether timer interrupt had fired. -bool timer_int_pending(int timerno); -// Clear timer interrupt. -void timer_int_clear(int timerno); - +void time_set_next_task_switch(timestamp_us_t timestamp); +// Attach a task to a timer interrupt. +int64_t time_add_async_task(timestamp_us_t timestamp, timer_fn_t task, void *cookie); +// Cancel a task created with `time_add_async_task`. +bool time_cancel_async_task(int64_t taskno); // Get current time in microseconds. timestamp_us_t time_us(); diff --git a/kernel/include/scheduler/isr.h b/kernel/include/scheduler/isr.h index 3b63f2c..fcb28cb 100644 --- a/kernel/include/scheduler/isr.h +++ b/kernel/include/scheduler/isr.h @@ -3,14 +3,5 @@ #pragma once -#include "attributes.h" -#include "badge_err.h" - -#include -#include -#include - - - // Requests the scheduler to prepare a switch from inside an interrupt routine. void sched_request_switch_from_isr(); diff --git a/kernel/include/scheduler/scheduler.h b/kernel/include/scheduler/scheduler.h index 5c4eaf5..5149b3a 100644 --- a/kernel/include/scheduler/scheduler.h +++ b/kernel/include/scheduler/scheduler.h @@ -54,6 +54,14 @@ void sched_exec() NORETURN; // Exit the scheduler and subsequenty shut down the CPU. void sched_exit(int cpu); + +// Returns the current thread ID. +tid_t sched_current_tid(); +// Returns the current thread struct. +sched_thread_t *sched_current_thread(); +// Returns the associated thread struct. +sched_thread_t *sched_get_thread(tid_t thread); + // Create a new suspended userland thread. // If `kernel_stack_bottom` is NULL, the scheduler will allocate a stack. tid_t thread_new_user( @@ -65,26 +73,28 @@ tid_t thread_new_kernel(badge_err_t *ec, char const *name, sched_entry_t entry_p // Do not wait for thread to be joined; clean up immediately. void thread_detach(badge_err_t *ec, tid_t thread); -// Pauses execution of a user thread. -void thread_suspend(badge_err_t *ec, tid_t thread); +// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. +// Use this function to reduce the CPU time used by a thread. +void thread_yield(void); +// Sleep for an amount of microseconds. +void thread_sleep(timestamp_us_t delay); + +// Pauses execution of a thread. +// If `suspend_kernel` is false, the thread won't be suspended until it enters user mode. +void thread_suspend(badge_err_t *ec, tid_t thread, bool suspend_kernel); // Resumes a previously suspended thread or starts it. void thread_resume(badge_err_t *ec, tid_t thread); // Resumes a previously suspended thread or starts it. // Immediately schedules the thread instead of putting it in the queue first. void thread_resume_now(badge_err_t *ec, tid_t thread); +// Resumes a previously suspended thread or starts it from an ISR. +void thread_resume_from_isr(badge_err_t *ec, tid_t thread); +// Resumes a previously suspended thread or starts it from an ISR. +// Immediately schedules the thread instead of putting it in the queue first. +void thread_resume_now_from_isr(badge_err_t *ec, tid_t thread); // Returns whether a thread is running; it is neither suspended nor has it exited. bool thread_is_running(badge_err_t *ec, tid_t thread); -// Returns the current thread ID. -tid_t sched_current_tid(); -// Returns the current thread struct. -sched_thread_t *sched_current_thread(); -// Returns the associated thread struct. -sched_thread_t *sched_get_thread(tid_t thread); - -// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. -// Use this function to reduce the CPU time used by a thread. -void sched_yield(void); // Exits the current thread. // If the thread is detached, resources will be cleaned up. void thread_exit(int code) NORETURN; diff --git a/kernel/include/scheduler/types.h b/kernel/include/scheduler/types.h index 97b6ce6..28d3488 100644 --- a/kernel/include/scheduler/types.h +++ b/kernel/include/scheduler/types.h @@ -42,8 +42,10 @@ #define THREAD_STARTNOW (1 << 6) // The thread should be suspended. #define THREAD_SUSPENDING (1 << 7) +// The thread should be suspended even if it is a kernel thread. +#define THREAD_KSUSPEND (1 << 8) // The thread has exited and is awaiting join. -#define THREAD_EXITED (1 << 8) +#define THREAD_EXITED (1 << 9) // The scheduler is starting on this CPU. #define SCHED_STARTING (1 << 0) @@ -107,6 +109,3 @@ struct sched_cpulocal_t { // Idle thread. sched_thread_t idle_thread; }; - -// Returns the current thread without using a critical section. -sched_thread_t *sched_current_thread_unsafe(); diff --git a/kernel/include/smp.h b/kernel/include/smp.h index c543465..879176f 100644 --- a/kernel/include/smp.h +++ b/kernel/include/smp.h @@ -29,3 +29,5 @@ bool smp_poweroff(); bool smp_pause(); // Resume another CPU, if supported. bool smp_resume(int cpu); +// Whether a CPU can be powered off at runtime. +bool smp_can_poweroff(int cpu); diff --git a/kernel/port/esp32c6/include/port/hardware.h b/kernel/port/esp32c6/include/port/hardware.h index 5e1d521..b3da9db 100644 --- a/kernel/port/esp32c6/include/port/hardware.h +++ b/kernel/port/esp32c6/include/port/hardware.h @@ -27,26 +27,6 @@ #define RISCV_VT_TCAUSE_MASK 31 -/* ==== SOC INFO ==== */ - -// Number of timer groups. -#define ESP_TIMG_COUNT 2 -// Number of timers per timer group. -#define ESP_TIMG_TIMER_COUNT 1 -// Speed of RC_FAST_CLK in hertz. -#define ESP_FREQ_RC_FAST_CLK 20000000 -// Speed of XTAL_CLK in hertz. -#define ESP_FREQ_XTAL_CLK 40000000 -// Speed of RC32K_CLK in hertz. -#define ESP_FREQ_RC32K_CLK 32000 -// Speed of XTAL32K_CLK in hertz. -#define ESP_FREQ_XTAL32K_CLK 32000 -// Speed of RC_SLOW_CLK in hertz. -#define ESP_FREQ_RC_SLOW_CLK 150000 -// Speed of OSC_SLOW_CLK in hertz. -#define ESP_FREQ_OSC_SLOW_CLK 32000 - - /* ==== PERIPHERAL MAP ==== */ // Base address of peripherals. diff --git a/kernel/port/esp32c6/include/port/interrupt.h b/kernel/port/esp32c6/include/port/interrupt.h index 7c6c25e..b7de3f8 100644 --- a/kernel/port/esp32c6/include/port/interrupt.h +++ b/kernel/port/esp32c6/include/port/interrupt.h @@ -9,5 +9,5 @@ #define EXT_IRQ_COUNT ETS_MAX_INTR_SOURCE -// Set the external interrupt signal for CPU0 timer IRQs. -void set_cpu0_timer_irq(int timer_irq); +// Set the external interrupt signal for CPU timer IRQs. +void set_cpu_timer_irq(int cpu, int timer_irq); diff --git a/kernel/port/esp32c6/src/interrupt.c b/kernel/port/esp32c6/src/interrupt.c index 0fbe4b7..1de7659 100644 --- a/kernel/port/esp32c6/src/interrupt.c +++ b/kernel/port/esp32c6/src/interrupt.c @@ -84,18 +84,20 @@ void riscv_interrupt_handler() { return; } - // Check pending interrupts. for (int i = 0; i < ETS_MAX_INTR_SOURCE / 32; i++) { uint32_t pending = INTMTX.status[i]; - int lsb_pos = __builtin_clz(pending); - int irq = i * 32 + lsb_pos; - if (irq_ch_is_enabled(irq)) { - // Jump to ISR. - if (isr_table[mcause]) { - isr_table[mcause](irq); - } else { - logkf_from_isr(LOG_FATAL, "Unhandled interrupt %{d}", mcause); - panic_abort(); + while (pending) { + int lsb_pos = __builtin_ctz(pending); + pending ^= 1 << lsb_pos; + int irq = i * 32 + lsb_pos; + if (irq && irq_ch_is_enabled(irq)) { + // Jump to ISR. + if (isr_table[irq]) { + isr_table[irq](irq); + } else { + logkf_from_isr(LOG_FATAL, "Unhandled interrupt %{d}", irq); + panic_abort(); + } } } } @@ -104,7 +106,8 @@ void riscv_interrupt_handler() { // Set the external interrupt signal for CPU0 timer IRQs. -void set_cpu0_timer_irq(int timer_irq) { +void set_cpu_timer_irq(int cpu, int timer_irq) { + (void)cpu; INTMTX.route[timer_irq] = TIMER_IRQ_CH; } @@ -123,7 +126,7 @@ void irq_ch_disable(int irq) { // Query whether the IRQ is enabled. bool irq_ch_is_enabled(int irq) { assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); - return INTMTX.route[irq] != 0; + return INTMTX.route[irq] == EXT_IRQ_CH; } // Set the interrupt service routine for an interrupt on this CPU. diff --git a/kernel/port/esp32c6/src/smp.c b/kernel/port/esp32c6/src/smp.c index 8804898..0a766a7 100644 --- a/kernel/port/esp32c6/src/smp.c +++ b/kernel/port/esp32c6/src/smp.c @@ -49,3 +49,9 @@ bool smp_resume(int cpu) { (void)cpu; return false; } + +// Whether a CPU can be powered off at runtime. +bool smp_can_poweroff(int cpu) { + (void)cpu; + return false; +} diff --git a/kernel/port/esp32p4/include/port/hardware.h b/kernel/port/esp32p4/include/port/hardware.h index 77f3b13..c9495cd 100644 --- a/kernel/port/esp32p4/include/port/hardware.h +++ b/kernel/port/esp32p4/include/port/hardware.h @@ -33,11 +33,3 @@ #define RISCV_VT_ICAUSE_MASK 63 // Bitmask for trap cause. #define RISCV_VT_TCAUSE_MASK 31 - - -/* ==== SOC INFO ==== */ - -// Number of timer groups. -#define ESP_TIMG_COUNT 2 -// Number of timers per timer group. -#define ESP_TIMG_TIMER_COUNT 1 diff --git a/kernel/port/esp32p4/include/port/interrupt.h b/kernel/port/esp32p4/include/port/interrupt.h index 7966b1e..86a6168 100644 --- a/kernel/port/esp32p4/include/port/interrupt.h +++ b/kernel/port/esp32p4/include/port/interrupt.h @@ -8,7 +8,5 @@ #define EXT_IRQ_COUNT ETS_MAX_INTR_SOURCE -// Set the external interrupt signal for CPU0 timer IRQs. -void set_cpu0_timer_irq(int timer_irq); -// Set the external interrupt signal for CPU1 timer IRQs. -void set_cpu1_timer_irq(int timer_irq); +// Set the external interrupt signal for CPU timer IRQs. +void set_cpu_timer_irq(int cpu, int timer_irq); diff --git a/kernel/port/esp32p4/src/interrupt.c b/kernel/port/esp32p4/src/interrupt.c index 154d06b..5bb1e85 100644 --- a/kernel/port/esp32p4/src/interrupt.c +++ b/kernel/port/esp32p4/src/interrupt.c @@ -39,7 +39,9 @@ extern clic_ctl_dev_t CLIC_CTL; #define IRQ_GROUPS ((ETS_MAX_INTR_SOURCE + 31) / 32) // Interrupt claiming bitmask. -static atomic_int claim_mask[IRQ_GROUPS] = {1, 0, 0, 0}; +static atomic_int claim_mask[IRQ_GROUPS] = {0}; +// Interrupt enabled bitmask +static atomic_int enable_mask[IRQ_GROUPS] = {0}; // Get INTMTX for this CPU. static inline intmtx_t *intmtx_local() CONST; @@ -103,36 +105,28 @@ void irq_init() { // Enable the IRQ. void irq_ch_enable(int irq) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); + assert_dev_drop(irq >= 0 && irq < ETS_MAX_INTR_SOURCE); + atomic_fetch_or(&enable_mask[irq / 32], 1 << (irq % 32)); INTMTX0.map[irq].map = EXT_IRQ_CH; INTMTX1.map[irq].map = EXT_IRQ_CH; } // Disable the IRQ. void irq_ch_disable(int irq) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); + assert_dev_drop(irq >= 0 && irq < ETS_MAX_INTR_SOURCE); + atomic_fetch_and(&enable_mask[irq / 32], ~(1 << (irq % 32))); INTMTX0.map[irq].map = 0; INTMTX1.map[irq].map = 0; } -// Set the external interrupt signal for CPU0 timer IRQs. -void set_cpu0_timer_irq(int irq) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); - INTMTX0.map[irq].map = TIMER_IRQ_CH; - CLIC_CTL.irq_ctl[TIMER_IRQ_CH] = (clic_int_ctl_reg_t){ - .pending = false, - .enable = true, - .attr_shv = false, - .attr_mode = 3, - .attr_trig = false, - .ctl = 127, - }; -} - -// Set the external interrupt signal for CPU1 timer IRQs. -void set_cpu1_timer_irq(int irq) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); - INTMTX1.map[irq].map = TIMER_IRQ_CH; +// Set the external interrupt signal for CPU timer IRQs. +void set_cpu_timer_irq(int cpu, int irq) { + assert_dev_drop(irq >= 0 && irq < ETS_MAX_INTR_SOURCE); + if (cpu) { + INTMTX1.map[irq].map = TIMER_IRQ_CH; + } else { + INTMTX0.map[irq].map = TIMER_IRQ_CH; + } CLIC_CTL.irq_ctl[TIMER_IRQ_CH] = (clic_int_ctl_reg_t){ .pending = false, .enable = true, @@ -145,13 +139,13 @@ void set_cpu1_timer_irq(int irq) { // Query whether the IRQ is enabled. bool irq_ch_is_enabled(int irq) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); - return intmtx_local()->map[irq].val != 0; + assert_dev_drop(irq >= 0 && irq < ETS_MAX_INTR_SOURCE); + return (enable_mask[irq / 32] >> (irq % 32)) & 1; } // Set the interrupt service routine for an interrupt on this CPU. void irq_ch_set_isr(int irq, isr_t isr) { - assert_dev_drop(irq > 0 && irq < ETS_MAX_INTR_SOURCE); + assert_dev_drop(irq >= 0 && irq < ETS_MAX_INTR_SOURCE); isr_table[irq] = isr; } @@ -169,13 +163,14 @@ void riscv_interrupt_handler() { intmtx_t *intmtx = intmtx_local(); for (size_t i = 0; i < IRQ_GROUPS; i++) { - uint32_t pending = intmtx->pending[i]; + uint32_t pending = intmtx->pending[i] & atomic_load(&enable_mask[i]); while (pending) { - int lsb_pos = __builtin_clz(pending); - uint32_t lsb_mask = 1 << lsb_pos; - int prev = atomic_fetch_or(&claim_mask[i], lsb_mask); + int lsb_pos = __builtin_ctz(pending); + uint32_t lsb_mask = 1 << lsb_pos; + pending ^= lsb_mask; + int irq = i * 32 + lsb_pos; + int prev = atomic_fetch_or(&claim_mask[i], lsb_mask); if (!(prev & lsb_mask)) { - int irq = i * 32 + lsb_pos; if (!isr_table[irq]) { logkf(LOG_FATAL, "Unhandled interrupt #%{u32;d}", irq); panic_abort(); diff --git a/kernel/port/esp32p4/src/smp.c b/kernel/port/esp32p4/src/smp.c index dfdeb61..f2ad6d7 100644 --- a/kernel/port/esp32p4/src/smp.c +++ b/kernel/port/esp32p4/src/smp.c @@ -92,9 +92,6 @@ bool smp_poweroff() { // Pause this CPU, if supported. bool smp_pause() { - if (smp_cur_cpu() == 0) { - return false; - } cpu_utility_ll_stall_cpu(smp_cur_cpu()); return true; } @@ -104,3 +101,8 @@ bool smp_resume(int cpu) { cpu_utility_ll_unstall_cpu(cpu); return true; } + +// Whether a CPU can be powered off at runtime. +bool smp_can_poweroff(int cpu) { + return cpu == 1; +} diff --git a/kernel/port/esp_common/CMakeLists.txt b/kernel/port/esp_common/CMakeLists.txt index 18c7895..99abd50 100644 --- a/kernel/port/esp_common/CMakeLists.txt +++ b/kernel/port/esp_common/CMakeLists.txt @@ -6,6 +6,7 @@ cmake_minimum_required(VERSION 3.10.0) set(port_src ${port_src} ${CMAKE_CURRENT_LIST_DIR}/src/gpio.c ${CMAKE_CURRENT_LIST_DIR}/src/i2c.c + ${CMAKE_CURRENT_LIST_DIR}/src/hwtimer.c ${CMAKE_CURRENT_LIST_DIR}/src/time.c ) set(port_include ${port_include} diff --git a/kernel/port/esp_common/src/hwtimer.c b/kernel/port/esp_common/src/hwtimer.c new file mode 100644 index 0000000..9ffee10 --- /dev/null +++ b/kernel/port/esp_common/src/hwtimer.c @@ -0,0 +1,213 @@ + +// SPDX-License-Identifier: MIT + +#include "hwtimer.h" + +#include "assertions.h" +#include "interrupt.h" +#include "log.h" +#include "port/hardware.h" +#include "port/hardware_allocation.h" +#include "scheduler/isr.h" +#include "smp.h" + +// NOLINTBEGIN +#define __DECLARE_RCC_RC_ATOMIC_ENV 0 +#define __DECLARE_RCC_ATOMIC_ENV 0 +// NOLINTEND + +#include +#include +#include +#include +#include + +#ifdef CONFIG_TARGET_esp32c6 +#include +#endif +#ifdef CONFIG_TARGET_esp32p4 +#include +#endif + +#define GET_TIMER_INFO(timerno) \ + assert_dev_drop((timerno) >= 0 && (timerno) < SOC_TIMER_GROUPS * SOC_TIMER_GROUP_TIMERS_PER_GROUP); \ + timg_dev_t *timg = (timerno) / SOC_TIMER_GROUP_TIMERS_PER_GROUP ? &TIMERG1 : &TIMERG0; \ + int timer = (timerno) % SOC_TIMER_GROUP_TIMERS_PER_GROUP; + + + +// Initialise timer and watchdog subsystem. +void timer_init() { +#ifdef CONFIG_TARGET_esp32c6 + // Power up timers. + PCR.timergroup0_conf.tg0_rst_en = false; + PCR.timergroup0_conf.tg0_clk_en = true; + PCR.timergroup0_timer_clk_conf.tg0_timer_clk_sel = 0; + PCR.timergroup0_timer_clk_conf.tg0_timer_clk_en = true; + PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_sel = 0; + PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_en = true; +#endif +#ifdef CONFIG_TARGET_esp32p4 + HP_SYS_CLKRST.peri_clk_ctrl20.reg_timergrp0_t0_clk_en = true; + HP_SYS_CLKRST.peri_clk_ctrl20.reg_timergrp0_t1_clk_en = true; + HP_SYS_CLKRST.peri_clk_ctrl21.reg_timergrp1_t0_clk_en = true; + HP_SYS_CLKRST.peri_clk_ctrl21.reg_timergrp1_t1_clk_en = true; +#endif + TIMERG0.regclk.clk_en = false; + TIMERG1.regclk.clk_en = false; + + // Turn off watchdogs. + LP_WDT.wprotect.val = 0x50D83AA1; + LP_WDT.config0.val = 0; + TIMERG0.wdtwprotect.val = 0x50D83AA1; + TIMERG0.wdtconfig0.val = 0; + TIMERG1.wdtwprotect.val = 0x50D83AA1; + TIMERG1.wdtconfig0.val = 0; + TIMERG0.int_ena_timers.val = 0; + TIMERG1.int_ena_timers.val = 0; +} + + + +// Get the number of hardware timers. +int timer_count() { + return SOC_TIMER_GROUP_TOTAL_TIMERS; +} + +// Get the IRQ number for a timer. +int timer_get_irq(int timerno) { +#ifdef CONFIG_TARGET_esp32p4 + switch (timerno) { + default: return -1; + case 0: return ETS_TG0_T0_INTR_SOURCE; + case 1: return ETS_TG0_T1_INTR_SOURCE; + case 2: return ETS_TG1_T0_INTR_SOURCE; + case 3: return ETS_TG1_T1_INTR_SOURCE; + } +#endif +#ifdef CONFIG_TARGET_esp32c6 + switch (timerno) { + default: return -1; + case 0: return ETS_TG1_T0_LEVEL_INTR_SOURCE; + case 1: return ETS_TG0_T0_LEVEL_INTR_SOURCE; + } +#endif +} + +// Set timer frequency. +void timer_set_freq(int timerno, frequency_hz_t freq) { + GET_TIMER_INFO(timerno) + frequency_hz_t base_freq; +#ifdef CONFIG_TARGET_esp32p4 + uint32_t clksrc; + switch (timerno) { + case 0: clksrc = HP_SYS_CLKRST.peri_clk_ctrl20.reg_timergrp0_t0_src_sel; break; + case 1: clksrc = HP_SYS_CLKRST.peri_clk_ctrl20.reg_timergrp0_t1_src_sel; break; + case 2: clksrc = HP_SYS_CLKRST.peri_clk_ctrl21.reg_timergrp1_t0_src_sel; break; + case 3: clksrc = HP_SYS_CLKRST.peri_clk_ctrl21.reg_timergrp1_t1_src_sel; break; + } +#endif +#ifdef CONFIG_TARGET_esp32c6 + uint32_t clksrc; + if (timerno / SOC_TIMER_GROUP_TIMERS_PER_GROUP) { + clksrc = PCR.timergroup1_timer_clk_conf.tg1_timer_clk_sel; + } else { + clksrc = PCR.timergroup0_timer_clk_conf.tg0_timer_clk_sel; + } +#endif + switch (clksrc) { + case 0: base_freq = XTAL_CLK_FREQ; break; + case 1: base_freq = 80000000; break; + case 2: base_freq = SOC_CLK_RC_FAST_FREQ_APPROX; break; + default: __builtin_unreachable(); + } + + uint32_t divider = base_freq / freq; + if (divider < 1) { + logkf(LOG_WARN, "Timer clock divider unreachable: %{u32;d}", divider); + divider = 1; + } else if (divider > 32767) { + logkf(LOG_WARN, "Timer clock divider unreachable: %{u32;d}", divider); + divider = 32767; + } + timg->hw_timer[timer].config.tx_divider = divider; +} + +// Start timer. +void timer_start(int timerno) { + GET_TIMER_INFO(timerno) + timg->hw_timer[timer].config.tx_divcnt_rst = true; + timg->hw_timer[timer].config.tx_increase = true; + timg->hw_timer[timer].config.tx_en = true; +} + +// Stop timer. +void timer_stop(int timerno) { + GET_TIMER_INFO(timerno) + timg->hw_timer[timer].config.tx_en = false; +} + +// Configure timer alarm. +void timer_alarm_config(int timerno, int64_t threshold, bool reset_on_alarm) { + GET_TIMER_INFO(timerno) + timg_txconfig_reg_t tmp = timg->hw_timer[timer].config; + timg->hw_timer[timer].config.tx_alarm_en = false; + timg->hw_timer[timer].alarmlo.val = threshold; + timg->hw_timer[timer].alarmhi.val = threshold >> 32; + tmp.tx_autoreload = reset_on_alarm; + tmp.tx_alarm_en = true; + timg->hw_timer[timer].config = tmp; +} + +// Disable timer alarm. +void timer_alarm_disable(int timerno) { + GET_TIMER_INFO(timerno) + timg->hw_timer[timer].config.tx_alarm_en = false; +} + +// Get timer value. +int64_t timer_value_get(int timerno) { + GET_TIMER_INFO(timerno) + uint32_t lo = timg->hw_timer[timer].lo.val; + timg->hw_timer[timer].update.val = true; + for (int div = 256; lo == timg->hw_timer[timer].lo.val && div; div--) continue; + return ((int64_t)timg->hw_timer[timer].hi.val << 32) | timg->hw_timer[timer].lo.val; +} + +// Set timer value. +void timer_value_set(int timerno, int64_t time) { + GET_TIMER_INFO(timerno) + timg->hw_timer[timer].loadlo.val = time; + timg->hw_timer[timer].loadhi.val = time >> 32; + timg->hw_timer[timer].load.val = true; +} + + + +// Check whether timer has interrupts enabled. +bool timer_int_enabled(int timerno) { + GET_TIMER_INFO(timerno) + return (timg->int_ena_timers.val >> timer) & 1; +} + +// Enable / disable timer interrupts. +void timer_int_enable(int timerno, bool enable) { + GET_TIMER_INFO(timerno) + if (enable) { + timg->int_ena_timers.val |= 1 << timer; + } else { + timg->int_ena_timers.val &= ~(1 << timer); + } +} + +// Check whether timer interrupt had fired. +bool timer_int_pending(int timerno) { + GET_TIMER_INFO(timerno) + return (timg->int_raw_timers.val >> timer) & 1; +} + +// Clear timer interrupt. +void timer_int_clear(int timerno) { + GET_TIMER_INFO(timerno) + timg->int_clr_timers.val = 1 << timer; +} diff --git a/kernel/port/esp_common/src/i2c.c b/kernel/port/esp_common/src/i2c.c index 33bde27..085a844 100644 --- a/kernel/port/esp_common/src/i2c.c +++ b/kernel/port/esp_common/src/i2c.c @@ -400,7 +400,7 @@ static size_t i2c_sync_trans(badge_err_t *ec, int i2c_num, i2c_fsm_cmd_t *cmd, s badge_err_set(ec, ELOC_I2C, ECAUSE_TIMEOUT); return i2c_ctx[i2c_num].trans_bytes; } - sched_yield(); + thread_yield(); } logkf(LOG_DEBUG, "SR: %{u32;x}", I2C0.sr.val); diff --git a/kernel/port/esp_common/src/time.c b/kernel/port/esp_common/src/time.c index 45c293b..64bcff8 100644 --- a/kernel/port/esp_common/src/time.c +++ b/kernel/port/esp_common/src/time.c @@ -3,34 +3,24 @@ #include "time.h" +#include "arrays.h" #include "assertions.h" +#include "config.h" +#include "hwtimer.h" #include "interrupt.h" -#include "log.h" -#include "port/hardware.h" -#include "port/hardware_allocation.h" +#include "mutex.h" #include "scheduler/isr.h" #include "smp.h" -// NOLINTBEGIN -#define __DECLARE_RCC_RC_ATOMIC_ENV 0 -#define __DECLARE_RCC_ATOMIC_ENV 0 -// NOLINTEND - -#include -#include -#include - -#ifdef CONFIG_TARGET_esp32c6 -#include +#ifdef CONFIG_TARGET_esp32p4 +// Timer used for task list items. +#define TT_TIMER 2 +#else +// Timer used for task list items. +#define TT_TIMER 1 #endif -#define GET_TIMER_INFO(timerno) \ - assert_dev_drop((timerno) >= 0 && (timerno) < ESP_TIMG_COUNT * ESP_TIMG_TIMER_COUNT); \ - timg_dev_t *timg = (timerno) / ESP_TIMG_TIMER_COUNT ? &TIMERG1 : &TIMERG0; \ - int timer = (timerno) % ESP_TIMG_TIMER_COUNT; - - // Callback to the timer driver for when a timer alarm fires. void timer_isr_timer_alarm() { @@ -40,183 +30,153 @@ void timer_isr_timer_alarm() { sched_request_switch_from_isr(); } -// Initialise timer and watchdog subsystem. -void time_init() { -#ifdef CONFIG_TARGET_esp32c6 - // Power up timers. - PCR.timergroup0_conf.tg0_rst_en = false; - PCR.timergroup0_conf.tg0_clk_en = true; - PCR.timergroup0_timer_clk_conf.tg0_timer_clk_sel = 0; - PCR.timergroup0_timer_clk_conf.tg0_timer_clk_en = true; - PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_sel = 0; - PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_en = true; -#endif - TIMERG0.regclk.clk_en = true; - TIMERG1.regclk.clk_en = true; - - // Turn off watchdogs. - LP_WDT.wprotect.val = 0x50D83AA1; - LP_WDT.config0.val = 0; - TIMERG0.wdtwprotect.val = 0x50D83AA1; - TIMERG0.wdtconfig0.val = 0; - TIMERG1.wdtwprotect.val = 0x50D83AA1; - TIMERG1.wdtconfig0.val = 0; - TIMERG0.int_ena_timers.val = 0; - TIMERG1.int_ena_timers.val = 0; - // Configure system timers. - timer_stop(0); - timer_value_set(0, 0); - timer_alarm_disable(0); - timer_int_clear(0); - timer_int_enable(0, true); - timer_set_freq(0, 1000000); -#ifdef CONFIG_TARGET_esp32p4 - timer_stop(1); - timer_value_set(1, 0); - timer_alarm_disable(1); - timer_int_clear(1); - timer_int_enable(1, true); - timer_set_freq(1, 1000000); -#endif - - // Configure timer interrupts. -#ifdef CONFIG_TARGET_esp32c6 - set_cpu0_timer_irq(ETS_TG0_T0_LEVEL_INTR_SOURCE); -#endif -#ifdef CONFIG_TARGET_esp32p4 - set_cpu0_timer_irq(ETS_TG0_T0_INTR_SOURCE); - set_cpu1_timer_irq(ETS_TG1_T0_INTR_SOURCE); -#endif - // Start timers at close to the same time. - timer_start(0); -#ifdef CONFIG_TARGET_esp32p4 - timer_start(1); -#endif -} - -// Sets the alarm time when the next task switch should occur. -void time_set_next_task_switch(timestamp_us_t timestamp) { - int cpu = smp_cur_cpu(); - timer_alarm_config(cpu, timestamp, false); +// Timer task list entry. +typedef struct { + // Task ID. + int64_t taskno; + // Task timestamp; + timestamp_us_t time; + // Task function. + timer_fn_t callback; + // Task cookie. + void *cookie; +} timertask_t; + +// Timer task mutex. +static mutex_t tt_mtx = MUTEX_T_INIT; +// Next timer task ID. +static int64_t next_taskno = 1; +// Timer task list capacity. +static size_t tt_list_cap; +// Timer task list length. +static size_t tt_list_len; +// Timer task list. +static timertask_t *tt_list; +// Current limit of task list timer. +static timestamp_us_t tt_limit = TIMESTAMP_US_MAX; + +// Sort `timertask_t` by timestamp. +int tt_cmp(void const *a, void const *b) { + timertask_t const *a_ptr = a; + timertask_t const *b_ptr = b; + if (a_ptr->time < b_ptr->time) { + return -1; + } else if (a_ptr->time > b_ptr->time) { + return 1; + } else { + return 0; + } } -// Get current time in microseconds. -timestamp_us_t time_us() { - return timer_value_get(smp_cur_cpu()); -} +// Timer task ISR. +void tt_isr(int irq) { + (void)irq; + mutex_acquire_from_isr(NULL, &tt_mtx, TIMESTAMP_US_MAX); + // Disable alarm. + timer_alarm_disable(TT_TIMER); + timer_int_clear(TT_TIMER); + // Consume all tasks for this timestamp. + timestamp_us_t now = time_us(); + size_t i; + for (i = 0; i < tt_list_len && tt_list[i].time <= now; i++) { + tt_list[i].callback(tt_list[i].cookie); + } + array_lencap_remove_n(&tt_list, sizeof(timertask_t), &tt_list_len, &tt_list_cap, NULL, 0, i); -// Set timer frequency. -void timer_set_freq(int timerno, frequency_hz_t freq) { - GET_TIMER_INFO(timerno) - frequency_hz_t base_freq; -#ifdef CONFIG_TARGET_esp32p4 - // TODO: Determine what selects timer clock source. - base_freq = 40000000; -#endif -#ifdef CONFIG_TARGET_esp32c6 - uint32_t clksrc; - if (timerno) { - clksrc = PCR.timergroup1_timer_clk_conf.tg1_timer_clk_sel; + if (tt_list_len) { + // Set next timer. + tt_limit = tt_list[0].time; + timer_alarm_config(TT_TIMER, tt_limit, false); } else { - clksrc = PCR.timergroup0_timer_clk_conf.tg0_timer_clk_sel; - } - switch (clksrc) { - case 0: base_freq = ESP_FREQ_XTAL_CLK; break; - case 1: base_freq = 80000000; break; - case 2: base_freq = ESP_FREQ_RC_FAST_CLK; break; - default: __builtin_unreachable(); + tt_limit = TIMESTAMP_US_MAX; } -#endif - uint32_t divider = base_freq / freq; - if (divider < 1) { - logkf(LOG_WARN, "Timer clock divider unreachable: %{u32;d}", divider); - divider = 1; - } else if (divider > 32767) { - logkf(LOG_WARN, "Timer clock divider unreachable: %{u32;d}", divider); - divider = 32767; - } - timg->hw_timer[timer].config.tx_divider = divider; + mutex_release_from_isr(NULL, &tt_mtx); } -// Start timer. -void timer_start(int timerno) { - GET_TIMER_INFO(timerno) - timg->hw_timer[timer].config.tx_divcnt_rst = false; - timg->hw_timer[timer].config.tx_increase = true; - timg->hw_timer[timer].config.tx_en = true; -} -// Stop timer. -void timer_stop(int timerno) { - GET_TIMER_INFO(timerno) - timg->hw_timer[timer].config.tx_en = false; -} -// Configure timer alarm. -void timer_alarm_config(int timerno, int64_t threshold, bool reset_on_alarm) { - GET_TIMER_INFO(timerno) - timg->hw_timer[timer].alarmlo.val = threshold; - timg->hw_timer[timer].alarmhi.val = threshold >> 32; - timg_txconfig_reg_t tmp = timg->hw_timer[timer].config; - tmp.tx_autoreload = reset_on_alarm; - tmp.tx_alarm_en = true; - timg->hw_timer[timer].config = tmp; -} +// Initialise timer and watchdog subsystem. +void time_init() { + timer_init(); -// Disable timer alarm. -void timer_alarm_disable(int timerno) { - GET_TIMER_INFO(timerno) - timg->hw_timer[timer].config.tx_alarm_en = false; -} + // Configure system timers. + for (int i = 0; i <= TT_TIMER; i++) { + timer_stop(i); + timer_value_set(i, 0); + timer_alarm_disable(i); + timer_int_clear(i); + timer_int_enable(i, true); + timer_set_freq(i, 1000000); + } -// Get timer value. -int64_t timer_value_get(int timerno) { - GET_TIMER_INFO(timerno) - uint32_t lo = timg->hw_timer[timer].lo.val; - timg->hw_timer->update.val = true; - for (int div = 32; lo == timg->hw_timer[timer].lo.val && div; div--) continue; - return ((int64_t)timg->hw_timer[timer].hi.val << 32) | timg->hw_timer[timer].lo.val; -} + // Assign timer IRQs. + for (int i = 0; i < TT_TIMER; i++) { + set_cpu_timer_irq(i, timer_get_irq(i)); + } + int tt_irq = timer_get_irq(TT_TIMER); + irq_ch_set_isr(tt_irq, tt_isr); + irq_ch_enable(tt_irq); -// Set timer value. -void timer_value_set(int timerno, int64_t time) { - GET_TIMER_INFO(timerno) - timg->hw_timer[timer].loadlo.val = time; - timg->hw_timer[timer].loadhi.val = time >> 32; - timg->hw_timer[timer].load.val = true; + // Start timers at close to the same time. + for (int i = 0; i <= TT_TIMER; i++) { + timer_start(i); + } } - - -// Check whether timer has interrupts enabled. -bool timer_int_enabled(int timerno) { - GET_TIMER_INFO(timerno) - return (timg->int_ena_timers.val >> timer) & 1; +// Sets the alarm time when the next task switch should occur. +void time_set_next_task_switch(timestamp_us_t timestamp) { + timer_alarm_config(smp_cur_cpu(), timestamp, false); } -// Enable / disable timer interrupts. -void timer_int_enable(int timerno, bool enable) { - GET_TIMER_INFO(timerno) - if (enable) { - timg->int_ena_timers.val |= 1 << timer; - } else { - timg->int_ena_timers.val &= ~(1 << timer); +// Attach a task to a timer interrupt. +int64_t time_add_async_task(timestamp_us_t timestamp, timer_fn_t taskfn, void *cookie) { + if (timestamp <= 0) { + return 0; + } + bool ie = irq_disable(); + mutex_acquire_from_isr(NULL, &tt_mtx, TIMESTAMP_US_MAX); + timertask_t task = { + .callback = taskfn, + .cookie = cookie, + .time = timestamp, + .taskno = next_taskno, + }; + int64_t taskno = 0; + if (array_lencap_sorted_insert(&tt_list, sizeof(timertask_t), &tt_list_len, &tt_list_cap, &task, tt_cmp)) { + taskno = next_taskno; + next_taskno++; } + if (tt_limit > timestamp) { + tt_limit = timestamp; + timer_alarm_config(TT_TIMER, tt_limit, false); + } + mutex_release_from_isr(NULL, &tt_mtx); + irq_enable_if(ie); + return taskno; } -// Check whether timer interrupt had fired. -bool timer_int_pending(int timerno) { - GET_TIMER_INFO(timerno) - return (timg->int_raw_timers.val >> timer) & 1; +// Cancel a task created with `time_add_async_task`. +bool time_cancel_async_task(int64_t taskno) { + bool ie = irq_disable(); + bool success = false; + mutex_acquire_from_isr(NULL, &tt_mtx, TIMESTAMP_US_MAX); + for (size_t i = 0; i < tt_list_len; i++) { + if (tt_list[i].taskno == taskno) { + array_lencap_remove(&tt_list, sizeof(timertask_t), &tt_list_len, &tt_list_cap, NULL, i); + success = true; + } + } + mutex_release_from_isr(NULL, &tt_mtx); + irq_enable_if(ie); + return success; } -// Clear timer interrupt. -void timer_int_clear(int timerno) { - GET_TIMER_INFO(timerno) - timg->int_clr_timers.val = 1 << timer; +// Get current time in microseconds. +timestamp_us_t time_us() { + return timer_value_get(smp_cur_cpu()); } diff --git a/kernel/src/badgelib/mutex.c b/kernel/src/badgelib/mutex.c index 22792df..8168134 100644 --- a/kernel/src/badgelib/mutex.c +++ b/kernel/src/badgelib/mutex.c @@ -25,7 +25,7 @@ static inline bool await_swap_atomic_int( } else if (from_isr) { isr_pause(); } else { - sched_yield(); + thread_yield(); } } while (time_us() < timeout); return false; @@ -43,7 +43,7 @@ static inline bool } else if (from_isr) { isr_pause(); } else { - sched_yield(); + thread_yield(); } } while (time_us() < timeout); return false; @@ -61,7 +61,7 @@ static inline bool } else if (from_isr) { isr_pause(); } else { - sched_yield(); + thread_yield(); } } } diff --git a/kernel/src/freestanding/int_routines.c b/kernel/src/freestanding/int_routines.c index b701b97..3645380 100644 --- a/kernel/src/freestanding/int_routines.c +++ b/kernel/src/freestanding/int_routines.c @@ -217,6 +217,7 @@ int __clzdi2(uint64_t a) { return hash_table[(a * 0x0218a392cd3d5dbf) >> 58]; } #ifdef do_ti_math +int __clzti2(__uint128_t a) __attribute__((weak)); int __clzti2(__uint128_t a) { static uint8_t const hash_table[128] = { 0, 127, 126, 120, 125, 113, 119, 106, 124, 99, 112, 92, 118, 85, 105, 78, 123, 95, 98, 71, 111, 64, @@ -235,4 +236,40 @@ int __clzti2(__uint128_t a) { } #endif +int __ctzsi2(uint32_t a) __attribute__((weak)); +int __ctzsi2(uint32_t a) { + static uint8_t const hash_table[32] = { + 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, + 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14, + }; + a &= ~(a - 1); + return hash_table[(a * 0x076be629) >> 27]; +} +int __ctzdi2(uint64_t a) __attribute__((weak)); +int __ctzdi2(uint64_t a) { + static uint8_t const hash_table[64] = { + 0, 1, 2, 7, 3, 13, 8, 19, 4, 25, 14, 28, 9, 34, 20, 40, 5, 17, 26, 38, 15, 46, + 29, 48, 10, 31, 35, 54, 21, 50, 41, 57, 63, 6, 12, 18, 24, 27, 33, 39, 16, 37, 45, 47, + 30, 53, 49, 56, 62, 11, 23, 32, 36, 44, 52, 55, 61, 22, 43, 51, 60, 42, 59, 58, + }; + a &= ~(a - 1); + return hash_table[(a * 0x0218a392cd3d5dbf) >> 58]; +} +#ifdef do_ti_math +int __ctzti2(__uint128_t a) __attribute__((weak)); +int __ctzti2(__uint128_t a) { + static uint8_t const hash_table[128] = { + 0, 1, 2, 8, 3, 15, 9, 22, 4, 29, 16, 36, 10, 43, 23, 50, 5, 33, 30, 57, 17, 64, + 37, 71, 11, 60, 44, 78, 24, 85, 51, 92, 6, 20, 34, 48, 31, 69, 58, 90, 18, 67, 65, 99, + 38, 101, 72, 106, 12, 40, 61, 82, 45, 103, 79, 113, 25, 74, 86, 116, 52, 108, 93, 120, 127, 7, + 14, 21, 28, 35, 42, 49, 32, 56, 63, 70, 59, 77, 84, 91, 19, 47, 68, 89, 66, 98, 100, 105, + 39, 81, 102, 112, 73, 115, 107, 119, 126, 13, 27, 41, 55, 62, 76, 83, 46, 88, 97, 104, 80, 111, + 114, 118, 125, 26, 54, 75, 87, 96, 110, 117, 124, 53, 95, 109, 123, 94, 122, 121, + }; + a &= ~(a - 1); + __uint128_t mul = ((__uint128_t)0x0106143891634793 << 64) | 0x2a5cd9d3ead7b77f; + return hash_table[(a * mul) >> 121]; +} +#endif + // NOLINTEND diff --git a/kernel/src/freestanding/string.c b/kernel/src/freestanding/string.c index f134205..97a245d 100644 --- a/kernel/src/freestanding/string.c +++ b/kernel/src/freestanding/string.c @@ -24,8 +24,8 @@ size_t strnlen(char const *string, size_t max_len) { // Implementation of the mem_copy loop with variable access size. #define MEM_COPY_IMPL(type, alignment, dest, src, size) \ { \ - type *dest_ptr = (dest); /* NOLINT*/ \ - type const *src_ptr = (src); /* NOLINT*/ \ + type *dest_ptr = (dest); /* NOLINT */ \ + type const *src_ptr = (src); /* NOLINT */ \ size_t _size = (size) / (alignment); \ if ((dest) < (src)) { \ /* Forward iteration. */ \ diff --git a/kernel/src/housekeeping.c b/kernel/src/housekeeping.c index c5ab14c..5d83229 100644 --- a/kernel/src/housekeeping.c +++ b/kernel/src/housekeeping.c @@ -80,7 +80,7 @@ int hk_thread_func(void *ignored) { } mutex_release(NULL, &hk_mtx); - sched_yield(); + thread_yield(); } } diff --git a/kernel/src/main.c b/kernel/src/main.c index e6d358c..3601a93 100644 --- a/kernel/src/main.c +++ b/kernel/src/main.c @@ -43,42 +43,7 @@ static void kernel_init(); static void userland_init(); static void userland_shutdown(); static void kernel_shutdown(); - -// Manages the kernel's lifetime after basic runtime initialization. -static void kernel_lifetime_func() { - // Start the kernel services. - kernel_init(); - // Start other CPUs. - sched_start_altcpus(); - // Start userland. - userland_init(); - - // The boot process is now complete, this thread will wait until a shutdown is issued. - int shutdown_mode; - do { - sched_yield(); - shutdown_mode = atomic_load(&kernel_shutdown_mode); - } while (shutdown_mode == 0); - - // Shut down the userland. - userland_shutdown(); - // Tie up loose ends. - kernel_shutdown(); - // Power off. - if (kernel_shutdown_mode == 2) { - logkf(LOG_INFO, "Restarting"); - port_poweroff(true); - } else { - logkf(LOG_INFO, "Powering off"); - port_poweroff(false); - } -} - -// Shutdown system call implementation. -void syscall_sys_shutdown(bool is_reboot) { - logk(LOG_INFO, is_reboot ? "Reboot requested" : "Shutdown requested"); - atomic_store(&kernel_shutdown_mode, 1 + is_reboot); -} +static void kernel_lifetime_func(); @@ -120,6 +85,43 @@ void basic_runtime_init() { } +// Manages the kernel's lifetime after basic runtime initialization. +static void kernel_lifetime_func() { + // Start the kernel services. + kernel_init(); + // Start other CPUs. + sched_start_altcpus(); + // Start userland. + userland_init(); + + // The boot process is now complete, this thread will wait until a shutdown is issued. + int shutdown_mode; + do { + thread_yield(); + shutdown_mode = atomic_load(&kernel_shutdown_mode); + } while (shutdown_mode == 0); + + // Shut down the userland. + userland_shutdown(); + // Tie up loose ends. + kernel_shutdown(); + // Power off. + if (kernel_shutdown_mode == 2) { + logkf(LOG_INFO, "Restarting"); + port_poweroff(true); + } else { + logkf(LOG_INFO, "Powering off"); + port_poweroff(false); + } +} + +// Shutdown system call implementation. +void syscall_sys_shutdown(bool is_reboot) { + logk(LOG_INFO, is_reboot ? "Reboot requested" : "Shutdown requested"); + atomic_store(&kernel_shutdown_mode, 1 + is_reboot); +} + + // After basic runtime initialization, the booting CPU core continues here. // This finishes the initialization of all kernel systems, resources and services. @@ -131,6 +133,9 @@ static void kernel_init() { // Full hardware initialization. port_init(); + logk(LOG_DEBUG, "Waiting for a second"); + thread_sleep(1000000); + // Temporary filesystem image. fs_mount(&ec, FS_TYPE_RAMFS, NULL, "/", 0); badge_err_assert_always(&ec); @@ -167,7 +172,7 @@ static void userland_shutdown() { proc_signal_all(SIGHUP); // Wait for one second to give them time. timestamp_us_t lim = time_us() + 1000000; - while (time_us() < lim && proc_has_noninit()) sched_yield(); + while (time_us() < lim && proc_has_noninit()) thread_yield(); if (proc_has_noninit()) { // Forcibly terminate all processes. @@ -185,7 +190,7 @@ static void userland_shutdown() { if (!(proc_getflags(NULL, 1) & PROC_RUNNING)) { return; } - sched_yield(); + thread_yield(); } // If init didn't stop by this point we're probably out of luck. diff --git a/kernel/src/process/process.c b/kernel/src/process/process.c index 07a016c..ac63814 100644 --- a/kernel/src/process/process.c +++ b/kernel/src/process/process.c @@ -477,7 +477,7 @@ void proc_suspend(process_t *process, tid_t current) { mutex_acquire(NULL, &process->mtx, TIMESTAMP_US_MAX); for (size_t i = 0; i < process->threads_len; i++) { if (process->threads[i] != current) { - thread_suspend(NULL, process->threads[i]); + thread_suspend(NULL, process->threads[i], false); } } mutex_release(NULL, &process->mtx); diff --git a/kernel/src/process/sighandler.c b/kernel/src/process/sighandler.c index 867d5c5..b49f9f7 100644 --- a/kernel/src/process/sighandler.c +++ b/kernel/src/process/sighandler.c @@ -50,7 +50,7 @@ static inline void memmap_info(process_t *const proc, size_t vaddr) { // Runs the appropriate handler for a signal. static void run_sighandler(int signum, uint64_t cause) { - sched_thread_t *thread = sched_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread(); process_t *const proc = thread->process; // Check for signal handler. if (signum == SIGKILL || proc->sighandlers[signum] == SIG_DFL) { @@ -103,7 +103,7 @@ void proc_signal_handler() { // If the thread is already running a signal handler, the process is killed. static void trap_signal_handler(int signum, uint64_t cause) NORETURN; static void trap_signal_handler(int signum, uint64_t cause) { - sched_thread_t *thread = sched_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread(); process_t *const proc = thread->process; int current = sched_is_sighandler(); if (current) { diff --git a/kernel/src/process/syscall_impl.c b/kernel/src/process/syscall_impl.c index 757a2a2..5bc215d 100644 --- a/kernel/src/process/syscall_impl.c +++ b/kernel/src/process/syscall_impl.c @@ -201,7 +201,7 @@ NOASAN int syscall_proc_waitpid(int pid, int *wstatus, int options) { // No children with matching PIDs exist. return -ECHILD; } - sched_yield(); + thread_yield(); } // Nothing found in non-blocking wait. diff --git a/kernel/src/scheduler/scheduler.c b/kernel/src/scheduler/scheduler.c index 21e8693..42a6a4f 100644 --- a/kernel/src/scheduler/scheduler.c +++ b/kernel/src/scheduler/scheduler.c @@ -41,23 +41,10 @@ static mutex_t unused_mtx = MUTEX_T_INIT; // Pool of unused thread handles. static dlist_t dead_threads; -// Compare the ID of `sched_thread_t *` to an `int`. -static int tid_int_cmp(void const *a, void const *b) { - sched_thread_t *thread = *(sched_thread_t **)a; - tid_t tid = (tid_t)(ptrdiff_t)b; - return thread->id - tid; -} - -// Find a thread by TID. -static sched_thread_t *find_thread(tid_t tid) { - array_binsearch_t res = array_binsearch(threads, sizeof(void *), threads_len, (void *)(ptrdiff_t)tid, tid_int_cmp); - return res.found ? threads[res.index] : NULL; -} - // Set the context switch to a certain thread. -static inline void set_switch(sched_cpulocal_t *info, sched_thread_t *thread) { +static void set_switch(sched_cpulocal_t *info, sched_thread_t *thread) { int pflags = thread->process ? atomic_load(&thread->process->flags) : 0; int tflags = atomic_load(&thread->flags); @@ -255,7 +242,7 @@ void sched_request_switch_from_isr() { } // Account thread time usage. - sched_thread_t *cur_thread = sched_current_thread_unsafe(); + sched_thread_t *cur_thread = sched_current_thread(); if (cur_thread) { timestamp_us_t used = now - info->last_preempt; cur_thread->timeusage.cycle_time += used; @@ -310,9 +297,17 @@ void sched_request_switch_from_isr() { dlist_append(&dead_threads, &thread->node); assert_dev_keep(mutex_release_from_isr(NULL, &unused_mtx)); - } else if (!(flags & THREAD_PRIVILEGED) && (flags & THREAD_SUSPENDING)) { - // Userspace thread being suspended. - atomic_fetch_and(&thread->flags, ~(THREAD_RUNNING | THREAD_SUSPENDING)); + } else if (((flags & THREAD_KSUSPEND) || !(flags & THREAD_PRIVILEGED)) && (flags & THREAD_SUSPENDING)) { + // Userspace and/or kernel thread being suspended. + int newval; + do { + if (!((flags & THREAD_KSUSPEND) || !(flags & THREAD_PRIVILEGED)) || !(flags & THREAD_SUSPENDING)) { + // Suspend cancelled; set as switch target. + set_switch(info, thread); + return; + } + newval = flags & ~(THREAD_RUNNING | THREAD_KSUSPEND | THREAD_SUSPENDING); + } while (!atomic_compare_exchange_strong(&thread->flags, &flags, newval)); } else { // Runnable thread found; perform context switch. @@ -327,14 +322,37 @@ void sched_request_switch_from_isr() { set_switch(info, &info->idle_thread); } + + +// Compare the ID of `sched_thread_t *` to an `int`. +static int tid_int_cmp(void const *a, void const *b) { + sched_thread_t *thread = *(sched_thread_t **)a; + tid_t tid = (tid_t)(ptrdiff_t)b; + return thread->id - tid; +} + +// Find a thread by TID. +static sched_thread_t *find_thread(tid_t tid) { + array_binsearch_t res = array_binsearch(threads, sizeof(void *), threads_len, (void *)(ptrdiff_t)tid, tid_int_cmp); + return res.found ? threads[res.index] : NULL; +} + // Scheduler housekeeping. static void sched_housekeeping(int taskno, void *arg) { (void)taskno; (void)arg; - assert_dev_keep(mutex_acquire(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + + // Acquire the mutex with interrupts disabled without blocking other threads. + while (1) { + irq_disable(); + if (mutex_acquire_from_isr(NULL, &threads_mtx, 500)) { + break; + } + irq_enable(); + thread_yield(); + } // Get list of dead threads. - irq_disable(); assert_dev_keep(mutex_acquire_from_isr(NULL, &unused_mtx, TIMESTAMP_US_MAX)); dlist_t tmp = DLIST_EMPTY; sched_thread_t *node = (void *)dead_threads.head; @@ -347,7 +365,6 @@ static void sched_housekeeping(int taskno, void *arg) { node = next; } assert_dev_keep(mutex_release_from_isr(NULL, &unused_mtx)); - irq_enable(); // Clean up all dead threads. while (tmp.len) { @@ -360,19 +377,19 @@ static void sched_housekeeping(int taskno, void *arg) { array_binsearch(threads, sizeof(void *), threads_len, (void *)(ptrdiff_t)thread->id, tid_int_cmp); assert_dev_drop(res.found); array_lencap_remove(&threads, sizeof(void *), &threads_len, &threads_cap, NULL, res.index); + free(thread); } - assert_dev_keep(mutex_release(NULL, &threads_mtx)); + assert_dev_keep(mutex_release_from_isr(NULL, &threads_mtx)); + irq_enable(); } - - // Idle function ran when a CPU has no threads. static void idle_func(void *arg) { (void)arg; while (1) { isr_pause(); - sched_yield(); + thread_yield(); } } @@ -453,6 +470,26 @@ void sched_exit(int cpu) { } + +// Returns the current thread ID. +tid_t sched_current_tid() { + return isr_ctx_get()->thread->id; +} + +// Returns the current thread struct. +sched_thread_t *sched_current_thread() { + return isr_ctx_get()->thread; +} + +// Returns the associated thread struct. +sched_thread_t *sched_get_thread(tid_t tid) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + assert_always(mutex_release_shared(NULL, &threads_mtx)); + return thread; +} + + // Create a new suspended userland thread. tid_t thread_new_user( badge_err_t *ec, char const *name, process_t *process, size_t user_entrypoint, size_t user_arg, int priority @@ -580,24 +617,90 @@ void thread_detach(badge_err_t *ec, tid_t tid) { } -// Pauses execution of a user thread. -void thread_suspend(badge_err_t *ec, tid_t tid) { - assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); - sched_thread_t *thread = find_thread(tid); +// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. +// Use this function to reduce the CPU time used by a thread. +void thread_yield() { + irq_disable(); + sched_request_switch_from_isr(); + isr_context_switch(); +} + +// Resume a thread from a timer ISR. +static void thread_resume_from_timer(void *cookie) { + tid_t tid = (tid_t)(long)cookie; + thread_resume_now_from_isr(NULL, tid); +} + +// Set thread wakeup timer. +static void thread_set_wake_time(timestamp_us_t time) { + sched_thread_t *thread = sched_current_thread(); + time_add_async_task(time, thread_resume_from_timer, (void *)(long)thread->id); +} + +// Sleep for an amount of microseconds. +void thread_sleep(timestamp_us_t delay) { + // Set the sleep timer; the kernel thread will yield and wake up later. + thread_set_wake_time(time_us() + delay); + thread_yield(); +} + +// Implementation of thread yield system call. +void syscall_thread_yield() { + thread_yield(); +} + +// Implementation of usleep system call. +void syscall_thread_sleep(timestamp_us_t delay) { + // Set the sleep timer; the thread will drop to user mode and then pause. + thread_set_wake_time(time_us() + delay); +} + + +// Pauses execution of a thread. +// If `suspend_kernel` is false, the thread won't be suspended until it enters user mode. +void thread_suspend(badge_err_t *ec, tid_t tid, bool suspend_kernel) { + sched_thread_t *self = sched_current_thread(); + sched_thread_t *thread; + + if (tid == self->id) { + // If suspending self, disable IRQs to guard suspension. + irq_disable(); + thread = self; + } else { + // If suspending another thread, acquire mutex to guard existance. + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + thread = find_thread(tid); + } + if (thread) { - if (thread->flags & THREAD_KERNEL) { + if ((thread->flags & THREAD_KERNEL) && !suspend_kernel) { badge_err_set(ec, ELOC_THREADS, ECAUSE_ILLEGAL); } else { - int exp; + int setfl = THREAD_SUSPENDING + suspend_kernel * THREAD_KSUSPEND; + int exp = atomic_load(&thread->flags); do { - exp = atomic_load(&thread->flags); - } while (!atomic_compare_exchange_strong(&thread->flags, &exp, exp | THREAD_SUSPENDING)); + if (!(exp & THREAD_RUNNING)) { + break; + } + } while (!atomic_compare_exchange_strong(&thread->flags, &exp, exp | setfl)); badge_err_set_ok(ec); } } else { badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); } - assert_always(mutex_release_shared(NULL, &threads_mtx)); + + if (tid == self->id) { + if (suspend_kernel) { + // Yield to suspend and implicitly re-enable IRQs. + thread_yield(); + } else { + // Re-enable IRQs and wait for the drop to user mode to suspend. + irq_enable(); + } + } else { + // If suspending another thread, release mutex. + assert_always(mutex_release_shared(NULL, &threads_mtx)); + } } // Try to mark a thread as running if a thread is allowed to be resumed. @@ -613,35 +716,54 @@ static bool thread_try_mark_running(sched_thread_t *thread, bool now) { nextval |= THREAD_STARTNOW; } } while (!atomic_compare_exchange_strong(&thread->flags, &cur, nextval)); - return true; + return !(cur & THREAD_RUNNING); } // Resumes a previously suspended thread or starts it. -static void thread_resume_impl(badge_err_t *ec, tid_t tid, bool now) { - assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); +static void thread_resume_impl(badge_err_t *ec, tid_t tid, bool now, bool from_isr) { + if (from_isr) { + assert_always(mutex_acquire_shared_from_isr(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + } else { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + } sched_thread_t *thread = find_thread(tid); if (thread) { if (thread_try_mark_running(thread, now)) { - irq_disable(); - thread_handoff(thread, smp_cur_cpu(), true, __INT_MAX__); - irq_enable(); + irq_disable_if(!from_isr); + thread_handoff(thread, smp_cur_cpu(), true, 0); + irq_enable_if(!from_isr); } badge_err_set_ok(ec); } else { badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); } - assert_always(mutex_release_shared(NULL, &threads_mtx)); + if (from_isr) { + assert_always(mutex_release_shared_from_isr(NULL, &threads_mtx)); + } else { + assert_always(mutex_release_shared(NULL, &threads_mtx)); + } } // Resumes a previously suspended thread or starts it. void thread_resume(badge_err_t *ec, tid_t tid) { - thread_resume_impl(ec, tid, false); + thread_resume_impl(ec, tid, false, false); } // Resumes a previously suspended thread or starts it. // Immediately schedules the thread instead of putting it in the queue first. void thread_resume_now(badge_err_t *ec, tid_t tid) { - thread_resume_impl(ec, tid, true); + thread_resume_impl(ec, tid, true, false); +} + +// Resumes a previously suspended thread or starts it from an ISR. +void thread_resume_from_isr(badge_err_t *ec, tid_t tid) { + thread_resume_impl(ec, tid, false, true); +} + +// Resumes a previously suspended thread or starts it from an ISR. +// Immediately schedules the thread instead of putting it in the queue first. +void thread_resume_now_from_isr(badge_err_t *ec, tid_t tid) { + thread_resume_impl(ec, tid, true, true); } // Returns whether a thread is running; it is neither suspended nor has it exited. @@ -660,41 +782,6 @@ bool thread_is_running(badge_err_t *ec, tid_t tid) { } -// Returns the current thread ID. -tid_t sched_current_tid() { - return isr_ctx_get()->thread->id; -} - -// Returns the current thread struct. -sched_thread_t *sched_current_thread() { - irq_disable(); - sched_thread_t *thread = isr_ctx_get()->thread; - irq_enable(); - return thread; -} - -// Returns the current thread without using a critical section. -sched_thread_t *sched_current_thread_unsafe() { - return isr_ctx_get()->thread; -} - -// Returns the associated thread struct. -sched_thread_t *sched_get_thread(tid_t tid) { - assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); - sched_thread_t *thread = find_thread(tid); - assert_always(mutex_release_shared(NULL, &threads_mtx)); - return thread; -} - - -// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. -// Use this function to reduce the CPU time used by a thread. -void sched_yield() { - irq_disable(); - sched_request_switch_from_isr(); - isr_context_switch(); -} - // Exits the current thread. // If the thread is detached, resources will be cleaned up. void thread_exit(int code) { @@ -723,6 +810,6 @@ void thread_join(tid_t tid) { return; } assert_always(mutex_release_shared(NULL, &threads_mtx)); - sched_yield(); + thread_yield(); } } diff --git a/kernel/src/scheduler/syscall_impl.c b/kernel/src/scheduler/syscall_impl.c deleted file mode 100644 index 834e20b..0000000 --- a/kernel/src/scheduler/syscall_impl.c +++ /dev/null @@ -1,9 +0,0 @@ - -// SPDX-License-Identifier: MIT - -#include "scheduler/scheduler.h" -#include "syscall.h" - -void syscall_thread_yield() { - sched_yield(); -}