diff --git a/arch/arm/core/cortex_a_r/CMakeLists.txt b/arch/arm/core/cortex_a_r/CMakeLists.txt index d4e18a614f0a..7d18e0e610d8 100644 --- a/arch/arm/core/cortex_a_r/CMakeLists.txt +++ b/arch/arm/core/cortex_a_r/CMakeLists.txt @@ -24,4 +24,4 @@ zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c) zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S) zephyr_library_sources_ifdef(CONFIG_ARCH_CACHE cache.c) zephyr_library_sources_ifdef(CONFIG_USE_SWITCH switch.S) -zephyr_library_sources_ifndef(CONFIG_USE_SWITCH swap.c swap_helper.S exc_exit.S) +zephyr_library_sources_ifndef(CONFIG_USE_SWITCH swap_helper.S exc_exit.S) diff --git a/arch/arm/core/cortex_a_r/swap.c b/arch/arm/core/cortex_a_r/swap.c deleted file mode 100644 index cf123e8ed932..000000000000 --- a/arch/arm/core/cortex_a_r/swap.c +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2018 Linaro, Limited - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include - -/* The 'key' actually represents the BASEPRI register - * prior to disabling interrupts via the BASEPRI mechanism. - * - * arch_swap() itself does not do much. - */ -int arch_swap(unsigned int key) -{ - /* store off key and return value */ - arch_current_thread()->arch.basepri = key; - arch_current_thread()->arch.swap_return_value = -EAGAIN; - - z_arm_cortex_r_svc(); - irq_unlock(key); - - /* Context switch is performed here. Returning implies the - * thread has been context-switched-in again. - */ - return arch_current_thread()->arch.swap_return_value; -} diff --git a/arch/arm/core/cortex_m/CMakeLists.txt b/arch/arm/core/cortex_m/CMakeLists.txt index b220e6c81e84..05723811929a 100644 --- a/arch/arm/core/cortex_m/CMakeLists.txt +++ b/arch/arm/core/cortex_m/CMakeLists.txt @@ -11,7 +11,6 @@ zephyr_library_sources( scb.c thread_abort.c vector_table.S - swap.c swap_helper.S irq_manage.c prep_c.c diff --git a/arch/arm/core/cortex_m/swap.c b/arch/arm/core/cortex_m/swap.c deleted file mode 100644 index 72eade765596..000000000000 --- a/arch/arm/core/cortex_m/swap.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2018 Linaro, Limited - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include - -/* The 'key' actually represents the BASEPRI register - * prior to disabling interrupts via the BASEPRI mechanism. - * - * arch_swap() itself does not do much. - * - * It simply stores the intlock key (the BASEPRI value) parameter into - * current->basepri, and then triggers a PendSV exception, which does - * the heavy lifting of context switching. - - * This is the only place we have to save BASEPRI since the other paths to - * z_arm_pendsv all come from handling an interrupt, which means we know the - * interrupts were not locked: in that case the BASEPRI value is 0. - * - * Given that arch_swap() is called to effect a cooperative context switch, - * only the caller-saved integer registers need to be saved in the thread of the - * outgoing thread. This is all performed by the hardware, which stores it in - * its exception stack frame, created when handling the z_arm_pendsv exception. - * - * On ARMv6-M, the intlock key is represented by the PRIMASK register, - * as BASEPRI is not available. - */ -int arch_swap(unsigned int key) -{ - /* store off key and return value */ - arch_current_thread()->arch.basepri = key; - arch_current_thread()->arch.swap_return_value = -EAGAIN; - - /* set pending bit to make sure we will take a PendSV exception */ - SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; - - /* clear mask or enable all irqs to take a pendsv */ - irq_unlock(0); - - /* Context switch is performed here. Returning implies the - * thread has been context-switched-in again. - */ - return arch_current_thread()->arch.swap_return_value; -} diff --git a/arch/arm/include/cortex_a_r/kernel_arch_func.h b/arch/arm/include/cortex_a_r/kernel_arch_func.h index ecd467f3c91e..9ac2b2a1d908 100644 --- a/arch/arm/include/cortex_a_r/kernel_arch_func.h +++ b/arch/arm/include/cortex_a_r/kernel_arch_func.h @@ -37,6 +37,21 @@ static ALWAYS_INLINE void arch_kernel_init(void) #ifndef CONFIG_USE_SWITCH +static ALWAYS_INLINE int arch_swap(unsigned int key) +{ + /* store off key and return value */ + arch_current_thread()->arch.basepri = key; + arch_current_thread()->arch.swap_return_value = -EAGAIN; + + z_arm_cortex_r_svc(); + irq_unlock(key); + + /* Context switch is performed here. Returning implies the + * thread has been context-switched-in again. + */ + return arch_current_thread()->arch.swap_return_value; +} + static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { diff --git a/arch/arm/include/cortex_m/kernel_arch_func.h b/arch/arm/include/cortex_m/kernel_arch_func.h index bb79e3941066..9183eb691b14 100644 --- a/arch/arm/include/cortex_m/kernel_arch_func.h +++ b/arch/arm/include/cortex_m/kernel_arch_func.h @@ -84,6 +84,25 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); +static ALWAYS_INLINE int arch_swap(unsigned int key) +{ + /* store off key and return value */ + arch_current_thread()->arch.basepri = key; + arch_current_thread()->arch.swap_return_value = -EAGAIN; + + /* set pending bit to make sure we will take a PendSV exception */ + SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; + + /* clear mask or enable all irqs to take a pendsv */ + irq_unlock(0); + + /* Context switch is performed here. Returning implies the + * thread has been context-switched-in again. + */ + return arch_current_thread()->arch.swap_return_value; +} + + #endif /* _ASMLANGUAGE */ #ifdef __cplusplus diff --git a/arch/mips/include/kernel_arch_func.h b/arch/mips/include/kernel_arch_func.h index 7c35d1bf864a..63ed7a65cf26 100644 --- a/arch/mips/include/kernel_arch_func.h +++ b/arch/mips/include/kernel_arch_func.h @@ -47,6 +47,8 @@ static inline bool arch_is_in_isr(void) return _current_cpu->nested != 0U; } +int arch_swap(unsigned int key); + #ifdef CONFIG_IRQ_OFFLOAD void z_irq_do_offload(void); #endif diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h index 464ba32a7a73..c325ea49b49b 100644 --- a/arch/nios2/include/kernel_arch_func.h +++ b/arch/nios2/include/kernel_arch_func.h @@ -51,6 +51,8 @@ static inline bool arch_is_in_isr(void) return _kernel.cpus[0].nested != 0U; } +int arch_swap(unsigned int key); + #ifdef CONFIG_IRQ_OFFLOAD void z_irq_do_offload(void); #endif diff --git a/arch/posix/include/kernel_arch_func.h b/arch/posix/include/kernel_arch_func.h index 98289d5d7c68..ceba8a850934 100644 --- a/arch/posix/include/kernel_arch_func.h +++ b/arch/posix/include/kernel_arch_func.h @@ -42,6 +42,8 @@ static inline bool arch_is_in_isr(void) return _kernel.cpus[0].nested != 0U; } +int arch_swap(unsigned int key); + #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_FUNC_H_ */ diff --git a/arch/x86/include/ia32/kernel_arch_func.h b/arch/x86/include/ia32/kernel_arch_func.h index 878281c7ba89..686bc18989b7 100644 --- a/arch/x86/include/ia32/kernel_arch_func.h +++ b/arch/x86/include/ia32/kernel_arch_func.h @@ -37,6 +37,8 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value) extern void arch_cpu_atomic_idle(unsigned int key); +int arch_swap(unsigned int key); + /* ASM code to fiddle with registers to enable the MMU with PAE paging */ void z_x86_enable_paging(void); diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h index 0b7504c5b9ec..05629c26a5f6 100644 --- a/kernel/include/kernel_arch_interface.h +++ b/kernel/include/kernel_arch_interface.h @@ -129,7 +129,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, * location, which must be updated. */ static inline void arch_switch(void *switch_to, void **switched_from); -#else +#endif /* CONFIG_USE_SWITCH */ + +#if !defined(CONFIG_USE_SWITCH) || defined(__DOXYGEN__) +#if defined(__DOXYGEN__) /** * Cooperatively context switch * @@ -143,6 +146,7 @@ static inline void arch_switch(void *switch_to, void **switched_from); * blocking operation. */ int arch_swap(unsigned int key); +#endif /* __DOXYGEN__ */ /** * Set the return value for the specified thread. @@ -154,7 +158,7 @@ int arch_swap(unsigned int key); */ static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value); -#endif /* CONFIG_USE_SWITCH */ +#endif /* !CONFIG_USE_SWITCH || __DOXYGEN__ */ #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN /** diff --git a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c index 433a6bb9d9a1..6a06c226063a 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c @@ -429,6 +429,13 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) "Alternative thread: switch flag not false on thread exit\n"); } +#if !defined(CONFIG_NO_OPTIMIZATIONS) +static int __noinline arch_swap_wrapper(void) +{ + return arch_swap(BASEPRI_MODIFIED_1); +} +#endif + ZTEST(arm_thread_swap, test_arm_thread_swap) { int test_flag; @@ -609,9 +616,11 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* Fake a different irq_unlock key when performing swap. * This will be verified by the alternative test thread. + * + * Force an indirect call to arch_swap() to prevent the compiler from + * changing the saved callee registers as arch_swap() is inlined. */ - register int swap_return_val __asm__("r0") = - arch_swap(BASEPRI_MODIFIED_1); + register int swap_return_val __asm__("r0") = arch_swap_wrapper(); #endif /* CONFIG_NO_OPTIMIZATIONS */