From b25702e3d788a8f50d6a13831f52e9fc6701de7a Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Mon, 13 Oct 2025 23:02:27 +0800 Subject: [PATCH 01/23] feat: Add adpter's C bindings and cmsis systick --- .gitignore | 2 + adapter/BUILD.gn | 22 + adapter/cmsis/BUILD.gn | 75 ++ adapter/cmsis/cmsis_header/BUILD.gn | 68 ++ adapter/cmsis/cmsis_header/RTOS1/cmsis_os.h | 677 ++++++++++++++++ adapter/cmsis/cmsis_header/RTOS2/cmsis_os2.h | 756 ++++++++++++++++++ adapter/cmsis/cmsis_header/RTOS2/os_tick.h | 80 ++ adapter/cmsis/cmsis_header/RTOS2/wrapper.h | 16 + adapter/cmsis/cmsis_header/src/lib.rs | 23 + adapter/cmsis/src/lib.rs | 87 ++ adapter/cmsis/src/os1/mod.rs | 15 + adapter/cmsis/src/os1/tick.rs | 42 + adapter/cmsis/src/os2/mod.rs | 15 + adapter/cmsis/src/os2/tick.rs | 69 ++ adapter/coverage.checker | 3 + adapter/unittests.checker | 3 + kconfig/config/.config | 61 ++ kconfig/config/Kconfig | 21 + .../config/qemu_mps2_an385/coverage/defconfig | 7 + .../config/qemu_mps2_an385/debug/defconfig | 7 + .../config/qemu_mps2_an385/release/defconfig | 7 + .../config/qemu_mps3_an547/debug/defconfig | 8 + .../config/qemu_mps3_an547/release/defconfig | 8 + kernel/src/arch/aarch64/mod.rs | 2 +- kernel/src/arch/arm/mod.rs | 4 +- kernel/src/arch/mod.rs | 6 +- kernel/src/arch/riscv64/mod.rs | 2 +- kernel/src/lib.rs | 4 +- kernel/src/support.rs | 2 +- kernel/src/sync/event_flags.rs | 2 +- kernel/src/time/mod.rs | 2 +- 31 files changed, 2084 insertions(+), 12 deletions(-) create mode 100644 adapter/BUILD.gn create mode 100644 adapter/cmsis/BUILD.gn create mode 100644 adapter/cmsis/cmsis_header/BUILD.gn create mode 100644 adapter/cmsis/cmsis_header/RTOS1/cmsis_os.h create mode 100644 adapter/cmsis/cmsis_header/RTOS2/cmsis_os2.h create mode 100644 adapter/cmsis/cmsis_header/RTOS2/os_tick.h create mode 100644 adapter/cmsis/cmsis_header/RTOS2/wrapper.h create mode 100644 adapter/cmsis/cmsis_header/src/lib.rs create mode 100644 adapter/cmsis/src/lib.rs create mode 100644 adapter/cmsis/src/os1/mod.rs create mode 100644 adapter/cmsis/src/os1/tick.rs create mode 100644 adapter/cmsis/src/os2/mod.rs create mode 100644 adapter/cmsis/src/os2/tick.rs create mode 100644 adapter/coverage.checker create mode 100644 adapter/unittests.checker create mode 100644 kconfig/config/.config diff --git a/.gitignore b/.gitignore index 9138c8e3..1baa32c6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ boards/src/bindings.rs target *.rej *.orig +*.config +*.config.old diff --git a/adapter/BUILD.gn b/adapter/BUILD.gn new file mode 100644 index 00000000..2b98242f --- /dev/null +++ b/adapter/BUILD.gn @@ -0,0 +1,22 @@ +# Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/templates/build_template.gni") +import("//build/toolchain/blueos.gni") +import("//kernel/common_crate_rustflags.gni") + +group("check_adapter") { + testonly = true + deps = [ "cmsis:run_cmsis_unittest($libc_toolchain)" ] +} diff --git a/adapter/cmsis/BUILD.gn b/adapter/cmsis/BUILD.gn new file mode 100644 index 00000000..dc83ae8c --- /dev/null +++ b/adapter/cmsis/BUILD.gn @@ -0,0 +1,75 @@ +# Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/templates/build_template.gni") +import("//build/toolchain/blueos.gni") +import("//kernel/common_crate_rustflags.gni") + +adapter_deps = [ + "cmsis_header:cmsis_os", + "cmsis_header:cmsis_os2", + "//kernel/infra:blueos_infra", + "//kernel/kconfig:blueos_kconfig", + "//kernel/kernel:blueos", +] + +build_rust("cmsis_adapter") { + sources = [ "src/lib.rs" ] + edition = "2021" + proc_macro_deps = [ "//external/rust-delegate/v0.13.3:delegate" ] + deps = adapter_deps + configs += [ "//kernel/kconfig:kconfigs" ] +} + +build_rust("cmsis_adapter_unittest") { + testonly = true + crate_name = "cmsis_adapter_unittest" + crate_type = "bin" + sources = [ "src/lib.rs" ] + edition = "2021" + proc_macro_deps = [ + "//external/rust-delegate/v0.13.3:delegate", + "//kernel/test_harness:blueos_test_macro", + ] + deps = adapter_deps + deps += [ + ":cmsis_adapter", + "//external/semihosting/v0.1.20:semihosting", + ] + configs += [ "//kernel/kconfig:kconfigs" ] + rustflags = common_crate_rustflags + rustflags += test_image_rustflags +} + +gen_qemu_runner("cmsis_unittest_runner") { + testonly = true + img = ":cmsis_adapter_unittest" + qemu = "$qemu_exe" + machine = "$machine" + qemu_args = qemu_extra_args + block_img = "cmsis_unittest_block.img" + block_args = qemu_block_args + semihosting = true +} + +run_qemu_check("run_cmsis_unittest") { + testonly = true + runner = ":cmsis_unittest_runner" + if (coverage) { + img = ":cmsis_adapter_unittest" + checker = "//kernel/adapter/coverage.checker" + } else { + checker = "//kernel/adapter/unittests.checker" + } +} \ No newline at end of file diff --git a/adapter/cmsis/cmsis_header/BUILD.gn b/adapter/cmsis/cmsis_header/BUILD.gn new file mode 100644 index 00000000..865a9d3a --- /dev/null +++ b/adapter/cmsis/cmsis_header/BUILD.gn @@ -0,0 +1,68 @@ +# Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/templates/build_template.gni") + +bindgen("cmsis_os2_header") { + bindgen_flags = [ + "--use-core", + "--ctypes-prefix", + "core::ffi", + ] + sources = [ "RTOS2/wrapper.h" ] + inputs = [ + "RTOS2/cmsis_os2.h", + "RTOS2/os_tick.h", + ] + cflags = [ + "--target=$llvm_target", + "-fshort-enums", + "-fsigned-char", + "-nostdlib", + "-isystem", + "$sysroot_path/include", + ] +} + +bindgen("cmsis_os_header") { + bindgen_flags = [ + "--use-core", + "--ctypes-prefix", + "core::ffi", + ] + sources = [ "RTOS1/cmsis_os.h" ] + cflags = [ + "--target=$llvm_target", + "-fshort-enums", + "-fsigned-char", + "-isystem", + "$sysroot_path/include", + ] +} + +build_rust("cmsis_os") { + crate_type = "rlib" + sources = [ "src/lib.rs" ] + deps = [ ":cmsis_os_header" ] + output_file = get_target_outputs(":cmsis_os_header") + rustenv = [ "BINDGEN_DIR=" + rebase_path(output_file[0]) ] +} + +build_rust("cmsis_os2") { + crate_type = "rlib" + sources = [ "src/lib.rs" ] + deps = [ ":cmsis_os2_header" ] + output_file = get_target_outputs(":cmsis_os2_header") + rustenv = [ "BINDGEN_DIR=" + rebase_path(output_file[0]) ] +} diff --git a/adapter/cmsis/cmsis_header/RTOS1/cmsis_os.h b/adapter/cmsis/cmsis_header/RTOS1/cmsis_os.h new file mode 100644 index 00000000..11ce59ca --- /dev/null +++ b/adapter/cmsis/cmsis_header/RTOS1/cmsis_os.h @@ -0,0 +1,677 @@ +/* ---------------------------------------------------------------------- + * $Date: 5. February 2013 + * $Revision: V1.02 + * + * Project: CMSIS-RTOS API + * Title: cmsis_os.h RTX header file + * + * Version 0.02 + * Initial Proposal Phase + * Version 0.03 + * osKernelStart added, optional feature: main started as thread + * osSemaphores have standard behavior + * osTimerCreate does not start the timer, added osTimerStart + * osThreadPass is renamed to osThreadYield + * Version 1.01 + * Support for C++ interface + * - const attribute removed from the osXxxxDef_t typedef's + * - const attribute added to the osXxxxDef macros + * Added: osTimerDelete, osMutexDelete, osSemaphoreDelete + * Added: osKernelInitialize + * Version 1.02 + * Control functions for short timeouts in microsecond resolution: + * Added: osKernelSysTick, osKernelSysTickFrequency, osKernelSysTickMicroSec + * Removed: osSignalGet + *---------------------------------------------------------------------------- + * + * Copyright (c) 2013-2017 ARM LIMITED. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *---------------------------------------------------------------------------*/ + + + #ifndef _CMSIS_OS_H + #define _CMSIS_OS_H + + #define osCMSIS 0x10002U ///< CMSIS-RTOS API version (main [31:16] .sub [15:0]) + + #define osCMSIS_RTX ((4<<16)|82) ///< RTOS identification and version (main [31:16] .sub [15:0]) + + #define osKernelSystemId "RTX V4.82" ///< RTOS identification string + + + #define osFeature_MainThread 1 ///< main can be thread + #define osFeature_Pool 1 ///< Memory Pools available + #define osFeature_MailQ 1 ///< Mail Queues available + #define osFeature_MessageQ 1 ///< Message Queues available + #define osFeature_Signals 16 ///< 16 Signal Flags available per thread + #define osFeature_Semaphore 65535 ///< Maximum count for \ref osSemaphoreCreate function + #define osFeature_Wait 0 ///< osWait not available + #define osFeature_SysTick 1 ///< osKernelSysTick functions available + + #if defined(__CC_ARM) + #define os_InRegs __value_in_regs // Compiler specific: force struct in registers + #else + #define os_InRegs + #endif + + #if defined(__CC_ARM) + #define __NO_RETURN __declspec(noreturn) + #elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define __NO_RETURN __attribute__((noreturn)) + #elif defined(__GNUC__) || defined(__clang__) + #define __NO_RETURN __attribute__((noreturn)) + #elif defined(__ICCARM__) + #define __NO_RETURN __noreturn + #else + #define __NO_RETURN + #endif + + #include + #include + + #ifdef __cplusplus + extern "C" + { + #endif + + + // ==== Enumeration, structures, defines ==== + + /// Priority used for thread control. + typedef enum { + osPriorityIdle = -3, ///< priority: idle (lowest) + osPriorityLow = -2, ///< priority: low + osPriorityBelowNormal = -1, ///< priority: below normal + osPriorityNormal = 0, ///< priority: normal (default) + osPriorityAboveNormal = +1, ///< priority: above normal + osPriorityHigh = +2, ///< priority: high + osPriorityRealtime = +3, ///< priority: realtime (highest) + osPriorityError = 0x84, ///< system cannot determine priority or thread has illegal priority + os_priority_reserved = 0x7FFFFFFF ///< prevent from enum down-size compiler optimization. + } osPriority; + + /// Timeout value. + #define osWaitForever 0xFFFFFFFFU ///< wait forever timeout value + + /// Status code values returned by CMSIS-RTOS functions. + typedef enum { + osOK = 0, ///< function completed; no error or event occurred. + osEventSignal = 0x08, ///< function completed; signal event occurred. + osEventMessage = 0x10, ///< function completed; message event occurred. + osEventMail = 0x20, ///< function completed; mail event occurred. + osEventTimeout = 0x40, ///< function completed; timeout occurred. + osErrorParameter = 0x80, ///< parameter error: a mandatory parameter was missing or specified an incorrect object. + osErrorResource = 0x81, ///< resource not available: a specified resource was not available. + osErrorTimeoutResource = 0xC1, ///< resource not available within given time: a specified resource was not available within the timeout period. + osErrorISR = 0x82, ///< not allowed in ISR context: the function cannot be called from interrupt service routines. + osErrorISRRecursive = 0x83, ///< function called multiple times from ISR with same object. + osErrorPriority = 0x84, ///< system cannot determine priority or thread has illegal priority. + osErrorNoMemory = 0x85, ///< system is out of memory: it was impossible to allocate or reserve memory for the operation. + osErrorValue = 0x86, ///< value of a parameter is out of range. + osErrorOS = 0xFF, ///< unspecified RTOS error: run-time error but no other error message fits. + os_status_reserved = 0x7FFFFFFF ///< prevent from enum down-size compiler optimization. + } osStatus; + + + /// Timer type value for the timer definition. + typedef enum { + osTimerOnce = 0, ///< one-shot timer + osTimerPeriodic = 1 ///< repeating timer + } os_timer_type; + + /// Entry point of a thread. + typedef void (*os_pthread) (void const *argument); + + /// Entry point of a timer call back function. + typedef void (*os_ptimer) (void const *argument); + + // >>> the following data type definitions may shall adapted towards a specific RTOS + + /// Thread ID identifies the thread (pointer to a thread control block). + typedef struct os_thread_cb *osThreadId; + + /// Timer ID identifies the timer (pointer to a timer control block). + typedef struct os_timer_cb *osTimerId; + + /// Mutex ID identifies the mutex (pointer to a mutex control block). + typedef struct os_mutex_cb *osMutexId; + + /// Semaphore ID identifies the semaphore (pointer to a semaphore control block). + typedef struct os_semaphore_cb *osSemaphoreId; + + /// Pool ID identifies the memory pool (pointer to a memory pool control block). + typedef struct os_pool_cb *osPoolId; + + /// Message ID identifies the message queue (pointer to a message queue control block). + typedef struct os_messageQ_cb *osMessageQId; + + /// Mail ID identifies the mail queue (pointer to a mail queue control block). + typedef struct os_mailQ_cb *osMailQId; + + + /// Thread Definition structure contains startup information of a thread. + typedef struct os_thread_def { + os_pthread pthread; ///< start address of thread function + osPriority tpriority; ///< initial thread priority + uint32_t instances; ///< maximum number of instances of that thread function + uint32_t stacksize; ///< stack size requirements in bytes; 0 is default stack size + } osThreadDef_t; + + /// Timer Definition structure contains timer parameters. + typedef struct os_timer_def { + os_ptimer ptimer; ///< start address of a timer function + void *timer; ///< pointer to internal data + } osTimerDef_t; + + /// Mutex Definition structure contains setup information for a mutex. + typedef struct os_mutex_def { + void *mutex; ///< pointer to internal data + } osMutexDef_t; + + /// Semaphore Definition structure contains setup information for a semaphore. + typedef struct os_semaphore_def { + void *semaphore; ///< pointer to internal data + } osSemaphoreDef_t; + + /// Definition structure for memory block allocation. + typedef struct os_pool_def { + uint32_t pool_sz; ///< number of items (elements) in the pool + uint32_t item_sz; ///< size of an item + void *pool; ///< pointer to memory for pool + } osPoolDef_t; + + /// Definition structure for message queue. + typedef struct os_messageQ_def { + uint32_t queue_sz; ///< number of elements in the queue + void *pool; ///< memory array for messages + } osMessageQDef_t; + + /// Definition structure for mail queue. + typedef struct os_mailQ_def { + uint32_t queue_sz; ///< number of elements in the queue + uint32_t item_sz; ///< size of an item + void *pool; ///< memory array for mail + } osMailQDef_t; + + /// Event structure contains detailed information about an event. + typedef struct { + osStatus status; ///< status code: event or error information + union { + uint32_t v; ///< message as 32-bit value + void *p; ///< message or mail as void pointer + int32_t signals; ///< signal flags + } value; ///< event value + union { + osMailQId mail_id; ///< mail id obtained by \ref osMailCreate + osMessageQId message_id; ///< message id obtained by \ref osMessageCreate + } def; ///< event definition + } osEvent; + + + // ==== Kernel Control Functions ==== + + /// Initialize the RTOS Kernel for creating objects. + /// \return status code that indicates the execution status of the function. + osStatus osKernelInitialize (void); + + /// Start the RTOS Kernel. + /// \return status code that indicates the execution status of the function. + osStatus osKernelStart (void); + + /// Check if the RTOS kernel is already started. + /// \return 0 RTOS is not started, 1 RTOS is started. + int32_t osKernelRunning(void); + + #if (defined (osFeature_SysTick) && (osFeature_SysTick != 0)) // System Timer available + + /// \cond INTERNAL_VARIABLES + extern uint32_t const os_tickfreq; + extern uint16_t const os_tickus_i; + extern uint16_t const os_tickus_f; + /// \endcond + + /// Get the RTOS kernel system timer counter. + /// \return RTOS kernel system timer as 32-bit value + uint32_t osKernelSysTick (void); + + /// The RTOS kernel system timer frequency in Hz. + /// \note Reflects the system timer setting and is typically defined in a configuration file. + #define osKernelSysTickFrequency os_tickfreq + + /// Convert a microseconds value to a RTOS kernel system timer value. + /// \param microsec time value in microseconds. + /// \return time value normalized to the \ref osKernelSysTickFrequency + #define osKernelSysTickMicroSec(microsec) (((uint64_t)microsec * (osKernelSysTickFrequency)) / 1000000) + /* + #define osKernelSysTickMicroSec(microsec) ((microsec * os_tickus_i) + ((microsec * os_tickus_f) >> 16)) + */ + + #endif // System Timer available + + // ==== Thread Management ==== + + /// Create a Thread Definition with function, priority, and stack requirements. + /// \param name name of the thread function. + /// \param priority initial priority of the thread function. + /// \param instances number of possible thread instances. + /// \param stacksz stack size (in bytes) requirements for the thread function. + /// macro body is implementation specific in every CMSIS-RTOS. + #if defined (osObjectsExternal) // object is external + #define osThreadDef(name, priority, instances, stacksz) \ + extern const osThreadDef_t os_thread_def_##name + #else // define the object + #define osThreadDef(name, priority, instances, stacksz) \ + const osThreadDef_t os_thread_def_##name = \ + { (name), (priority), (instances), (stacksz) } + #endif + + /// Access a Thread definition. + /// \param name name of the thread definition object. + /// macro body is implementation specific in every CMSIS-RTOS. + #define osThread(name) \ + &os_thread_def_##name + + /// Create a thread and add it to Active Threads and set it to state READY. + /// \param[in] thread_def thread definition referenced with \ref osThread. + /// \param[in] argument pointer that is passed to the thread function as start argument. + /// \return thread ID for reference by other functions or NULL in case of error. + osThreadId osThreadCreate (const osThreadDef_t *thread_def, void *argument); + + /// Return the thread ID of the current running thread. + /// \return thread ID for reference by other functions or NULL in case of error. + osThreadId osThreadGetId (void); + + /// Terminate execution of a thread and remove it from Active Threads. + /// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus osThreadTerminate (osThreadId thread_id); + + /// Pass control to next thread that is in state \b READY. + /// \return status code that indicates the execution status of the function. + osStatus osThreadYield (void); + + /// Change priority of an active thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. + /// \param[in] priority new priority value for the thread function. + /// \return status code that indicates the execution status of the function. + osStatus osThreadSetPriority (osThreadId thread_id, osPriority priority); + + /// Get current priority of an active thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. + /// \return current priority value of the thread function. + osPriority osThreadGetPriority (osThreadId thread_id); + + + // ==== Generic Wait Functions ==== + + /// Wait for Timeout (Time Delay). + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue "Time delay" value + /// \return status code that indicates the execution status of the function. + osStatus osDelay (uint32_t millisec); + + #if (defined (osFeature_Wait) && (osFeature_Wait != 0)) // Generic Wait available + + /// Wait for Signal, Message, Mail, or Timeout. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out + /// \return event that contains signal, message, or mail information or error code. + os_InRegs osEvent osWait (uint32_t millisec); + + #endif // Generic Wait available + + + // ==== Timer Management Functions ==== + /// Define a Timer object. + /// \param name name of the timer object. + /// \param function name of the timer call back function. + #if defined (osObjectsExternal) // object is external + #define osTimerDef(name, function) \ + extern const osTimerDef_t os_timer_def_##name + #else // define the object + #define osTimerDef(name, function) \ + uint32_t os_timer_cb_##name[6]; \ + const osTimerDef_t os_timer_def_##name = \ + { (function), (os_timer_cb_##name) } + #endif + + /// Access a Timer definition. + /// \param name name of the timer object. + #define osTimer(name) \ + &os_timer_def_##name + + /// Create a timer. + /// \param[in] timer_def timer object referenced with \ref osTimer. + /// \param[in] type osTimerOnce for one-shot or osTimerPeriodic for periodic behavior. + /// \param[in] argument argument to the timer call back function. + /// \return timer ID for reference by other functions or NULL in case of error. + osTimerId osTimerCreate (const osTimerDef_t *timer_def, os_timer_type type, void *argument); + + /// Start or restart a timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue "Time delay" value of the timer. + /// \return status code that indicates the execution status of the function. + osStatus osTimerStart (osTimerId timer_id, uint32_t millisec); + + /// Stop the timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerCreate. + /// \return status code that indicates the execution status of the function. + osStatus osTimerStop (osTimerId timer_id); + + /// Delete a timer that was created by \ref osTimerCreate. + /// \param[in] timer_id timer ID obtained by \ref osTimerCreate. + /// \return status code that indicates the execution status of the function. + osStatus osTimerDelete (osTimerId timer_id); + + + // ==== Signal Management ==== + + /// Set the specified Signal Flags of an active thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. + /// \param[in] signals specifies the signal flags of the thread that should be set. + /// \return previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters. + int32_t osSignalSet (osThreadId thread_id, int32_t signals); + + /// Clear the specified Signal Flags of an active thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. + /// \param[in] signals specifies the signal flags of the thread that shall be cleared. + /// \return previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters or call from ISR. + int32_t osSignalClear (osThreadId thread_id, int32_t signals); + + /// Wait for one or more Signal Flags to become signaled for the current \b RUNNING thread. + /// \param[in] signals wait until all specified signal flags set or 0 for any single signal flag. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return event flag information or error code. + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define osSignalWait __osSignalWait + osEvent __osSignalWait (int32_t signals, uint32_t millisec); + #else + os_InRegs osEvent osSignalWait (int32_t signals, uint32_t millisec); + #endif + + + // ==== Mutex Management ==== + + /// Define a Mutex. + /// \param name name of the mutex object. + #if defined (osObjectsExternal) // object is external + #define osMutexDef(name) \ + extern const osMutexDef_t os_mutex_def_##name + #else // define the object + #define osMutexDef(name) \ + uint32_t os_mutex_cb_##name[4] = { 0 }; \ + const osMutexDef_t os_mutex_def_##name = { (os_mutex_cb_##name) } + #endif + + /// Access a Mutex definition. + /// \param name name of the mutex object. + #define osMutex(name) \ + &os_mutex_def_##name + + /// Create and Initialize a Mutex object. + /// \param[in] mutex_def mutex definition referenced with \ref osMutex. + /// \return mutex ID for reference by other functions or NULL in case of error. + osMutexId osMutexCreate (const osMutexDef_t *mutex_def); + + /// Wait until a Mutex becomes available. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus osMutexWait (osMutexId mutex_id, uint32_t millisec); + + /// Release a Mutex that was obtained by \ref osMutexWait. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. + /// \return status code that indicates the execution status of the function. + osStatus osMutexRelease (osMutexId mutex_id); + + /// Delete a Mutex that was created by \ref osMutexCreate. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. + /// \return status code that indicates the execution status of the function. + osStatus osMutexDelete (osMutexId mutex_id); + + + // ==== Semaphore Management Functions ==== + + #if (defined (osFeature_Semaphore) && (osFeature_Semaphore != 0)) // Semaphore available + + /// Define a Semaphore object. + /// \param name name of the semaphore object. + #if defined (osObjectsExternal) // object is external + #define osSemaphoreDef(name) \ + extern const osSemaphoreDef_t os_semaphore_def_##name + #else // define the object + #define osSemaphoreDef(name) \ + uint32_t os_semaphore_cb_##name[2] = { 0 }; \ + const osSemaphoreDef_t os_semaphore_def_##name = { (os_semaphore_cb_##name) } + #endif + + /// Access a Semaphore definition. + /// \param name name of the semaphore object. + #define osSemaphore(name) \ + &os_semaphore_def_##name + + /// Create and Initialize a Semaphore object used for managing resources. + /// \param[in] semaphore_def semaphore definition referenced with \ref osSemaphore. + /// \param[in] count number of available resources. + /// \return semaphore ID for reference by other functions or NULL in case of error. + osSemaphoreId osSemaphoreCreate (const osSemaphoreDef_t *semaphore_def, int32_t count); + + /// Wait until a Semaphore token becomes available. + /// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return number of available tokens, or -1 in case of incorrect parameters. + int32_t osSemaphoreWait (osSemaphoreId semaphore_id, uint32_t millisec); + + /// Release a Semaphore token. + /// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. + /// \return status code that indicates the execution status of the function. + osStatus osSemaphoreRelease (osSemaphoreId semaphore_id); + + /// Delete a Semaphore that was created by \ref osSemaphoreCreate. + /// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. + /// \return status code that indicates the execution status of the function. + osStatus osSemaphoreDelete (osSemaphoreId semaphore_id); + + #endif // Semaphore available + + + // ==== Memory Pool Management Functions ==== + + #if (defined (osFeature_Pool) && (osFeature_Pool != 0)) // Memory Pool Management available + + /// \brief Define a Memory Pool. + /// \param name name of the memory pool. + /// \param no maximum number of blocks (objects) in the memory pool. + /// \param type data type of a single block (object). + #if defined (osObjectsExternal) // object is external + #define osPoolDef(name, no, type) \ + extern const osPoolDef_t os_pool_def_##name + #else // define the object + #define osPoolDef(name, no, type) \ + uint32_t os_pool_m_##name[3+((sizeof(type)+3)/4)*(no)]; \ + const osPoolDef_t os_pool_def_##name = \ + { (no), sizeof(type), (os_pool_m_##name) } + #endif + + /// \brief Access a Memory Pool definition. + /// \param name name of the memory pool + #define osPool(name) \ + &os_pool_def_##name + + /// Create and Initialize a memory pool. + /// \param[in] pool_def memory pool definition referenced with \ref osPool. + /// \return memory pool ID for reference by other functions or NULL in case of error. + osPoolId osPoolCreate (const osPoolDef_t *pool_def); + + /// Allocate a memory block from a memory pool. + /// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. + /// \return address of the allocated memory block or NULL in case of no memory available. + void *osPoolAlloc (osPoolId pool_id); + + /// Allocate a memory block from a memory pool and set memory block to zero. + /// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. + /// \return address of the allocated memory block or NULL in case of no memory available. + void *osPoolCAlloc (osPoolId pool_id); + + /// Return an allocated memory block back to a specific memory pool. + /// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. + /// \param[in] block address of the allocated memory block that is returned to the memory pool. + /// \return status code that indicates the execution status of the function. + osStatus osPoolFree (osPoolId pool_id, void *block); + + #endif // Memory Pool Management available + + + // ==== Message Queue Management Functions ==== + + #if (defined (osFeature_MessageQ) && (osFeature_MessageQ != 0)) // Message Queues available + + /// \brief Create a Message Queue Definition. + /// \param name name of the queue. + /// \param queue_sz maximum number of messages in the queue. + /// \param type data type of a single message element (for debugger). + #if defined (osObjectsExternal) // object is external + #define osMessageQDef(name, queue_sz, type) \ + extern const osMessageQDef_t os_messageQ_def_##name + #else // define the object + #define osMessageQDef(name, queue_sz, type) \ + uint32_t os_messageQ_q_##name[4+(queue_sz)] = { 0 }; \ + const osMessageQDef_t os_messageQ_def_##name = \ + { (queue_sz), (os_messageQ_q_##name) } + #endif + + /// \brief Access a Message Queue Definition. + /// \param name name of the queue + #define osMessageQ(name) \ + &os_messageQ_def_##name + + /// Create and Initialize a Message Queue. + /// \param[in] queue_def queue definition referenced with \ref osMessageQ. + /// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. + /// \return message queue ID for reference by other functions or NULL in case of error. + osMessageQId osMessageCreate (const osMessageQDef_t *queue_def, osThreadId thread_id); + + /// Put a Message to a Queue. + /// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. + /// \param[in] info message information. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus osMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec); + + /// Get a Message or Wait for a Message from a Queue. + /// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return event information that includes status code. + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define osMessageGet __osMessageGet + osEvent __osMessageGet (osMessageQId queue_id, uint32_t millisec); + #else + os_InRegs osEvent osMessageGet (osMessageQId queue_id, uint32_t millisec); + #endif + + #endif // Message Queues available + + + // ==== Mail Queue Management Functions ==== + + #if (defined (osFeature_MailQ) && (osFeature_MailQ != 0)) // Mail Queues available + + /// \brief Create a Mail Queue Definition. + /// \param name name of the queue + /// \param queue_sz maximum number of messages in queue + /// \param type data type of a single message element + #if defined (osObjectsExternal) // object is external + #define osMailQDef(name, queue_sz, type) \ + extern const osMailQDef_t os_mailQ_def_##name + #else // define the object + #define osMailQDef(name, queue_sz, type) \ + uint32_t os_mailQ_q_##name[4+(queue_sz)] = { 0 }; \ + uint32_t os_mailQ_m_##name[3+((sizeof(type)+3)/4)*(queue_sz)]; \ + void * os_mailQ_p_##name[2] = { (os_mailQ_q_##name), os_mailQ_m_##name }; \ + const osMailQDef_t os_mailQ_def_##name = \ + { (queue_sz), sizeof(type), (os_mailQ_p_##name) } + #endif + + /// \brief Access a Mail Queue Definition. + /// \param name name of the queue + #define osMailQ(name) \ + &os_mailQ_def_##name + + /// Create and Initialize mail queue. + /// \param[in] queue_def reference to the mail queue definition obtain with \ref osMailQ + /// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. + /// \return mail queue ID for reference by other functions or NULL in case of error. + osMailQId osMailCreate (const osMailQDef_t *queue_def, osThreadId thread_id); + + /// Allocate a memory block from a mail. + /// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out + /// \return pointer to memory block that can be filled with mail or NULL in case of error. + void *osMailAlloc (osMailQId queue_id, uint32_t millisec); + + /// Allocate a memory block from a mail and set memory block to zero. + /// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out + /// \return pointer to memory block that can be filled with mail or NULL in case of error. + void *osMailCAlloc (osMailQId queue_id, uint32_t millisec); + + /// Put a mail to a queue. + /// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. + /// \param[in] mail memory block previously allocated with \ref osMailAlloc or \ref osMailCAlloc. + /// \return status code that indicates the execution status of the function. + osStatus osMailPut (osMailQId queue_id, void *mail); + + /// Get a mail from a queue. + /// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. + /// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out + /// \return event that contains mail information or error code. + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define osMailGet __osMailGet + osEvent __osMailGet (osMailQId queue_id, uint32_t millisec); + #else + os_InRegs osEvent osMailGet (osMailQId queue_id, uint32_t millisec); + #endif + + /// Free a memory block from a mail. + /// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. + /// \param[in] mail pointer to the memory block that was obtained with \ref osMailGet. + /// \return status code that indicates the execution status of the function. + osStatus osMailFree (osMailQId queue_id, void *mail); + + #endif // Mail Queues available + + + // ==== RTX Extensions ==== + + /// Suspend the RTX task scheduler. + /// \return number of ticks, for how long the system can sleep or power-down. + uint32_t os_suspend (void); + + /// Resume the RTX task scheduler. + /// \param[in] sleep_time specifies how long the system was in sleep or power-down mode. + void os_resume (uint32_t sleep_time); + + /// OS idle demon (running when no other thread is ready to run). + __NO_RETURN void os_idle_demon (void); + + /// OS error callback (called when a runtime error is detected). + /// \param[in] error_code actual error code that has been detected. + __NO_RETURN void os_error (uint32_t error_code); + + + #ifdef __cplusplus + } + #endif + + #endif // _CMSIS_OS_H \ No newline at end of file diff --git a/adapter/cmsis/cmsis_header/RTOS2/cmsis_os2.h b/adapter/cmsis/cmsis_header/RTOS2/cmsis_os2.h new file mode 100644 index 00000000..514232e3 --- /dev/null +++ b/adapter/cmsis/cmsis_header/RTOS2/cmsis_os2.h @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2013-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ---------------------------------------------------------------------- + * + * $Date: 12. June 2020 + * $Revision: V2.1.3 + * + * Project: CMSIS-RTOS2 API + * Title: cmsis_os2.h header file + * + * Version 2.1.3 + * Additional functions allowed to be called from Interrupt Service Routines: + * - osThreadGetId + * Version 2.1.2 + * Additional functions allowed to be called from Interrupt Service Routines: + * - osKernelGetInfo, osKernelGetState + * Version 2.1.1 + * Additional functions allowed to be called from Interrupt Service Routines: + * - osKernelGetTickCount, osKernelGetTickFreq + * Changed Kernel Tick type to uint32_t: + * - updated: osKernelGetTickCount, osDelayUntil + * Version 2.1.0 + * Support for critical and uncritical sections (nesting safe): + * - updated: osKernelLock, osKernelUnlock + * - added: osKernelRestoreLock + * Updated Thread and Event Flags: + * - changed flags parameter and return type from int32_t to uint32_t + * Version 2.0.0 + * Initial Release + *---------------------------------------------------------------------------*/ + + #ifndef CMSIS_OS2_H_ + #define CMSIS_OS2_H_ + + #ifndef __NO_RETURN + #if defined(__CC_ARM) + #define __NO_RETURN __declspec(noreturn) + #elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define __NO_RETURN __attribute__((__noreturn__)) + #elif defined(__GNUC__) || defined(__clang__) + #define __NO_RETURN __attribute__((__noreturn__)) + #elif defined(__ICCARM__) + #define __NO_RETURN __noreturn + #else + #define __NO_RETURN + #endif + #endif + + #include + #include + + #ifdef __cplusplus + extern "C" + { + #endif + + + // ==== Enumerations, structures, defines ==== + + /// Version information. + typedef struct { + uint32_t api; ///< API version (major.minor.rev: mmnnnrrrr dec). + uint32_t kernel; ///< Kernel version (major.minor.rev: mmnnnrrrr dec). + } osVersion_t; + + /// Kernel state. + typedef enum { + osKernelInactive = 0, ///< Inactive. + osKernelReady = 1, ///< Ready. + osKernelRunning = 2, ///< Running. + osKernelLocked = 3, ///< Locked. + osKernelSuspended = 4, ///< Suspended. + osKernelError = -1, ///< Error. + osKernelReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. + } osKernelState_t; + + /// Thread state. + typedef enum { + osThreadInactive = 0, ///< Inactive. + osThreadReady = 1, ///< Ready. + osThreadRunning = 2, ///< Running. + osThreadBlocked = 3, ///< Blocked. + osThreadTerminated = 4, ///< Terminated. + osThreadError = -1, ///< Error. + osThreadReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. + } osThreadState_t; + + /// Priority values. + typedef enum { + osPriorityNone = 0, ///< No priority (not initialized). + osPriorityIdle = 1, ///< Reserved for Idle thread. + osPriorityLow = 8, ///< Priority: low + osPriorityLow1 = 8+1, ///< Priority: low + 1 + osPriorityLow2 = 8+2, ///< Priority: low + 2 + osPriorityLow3 = 8+3, ///< Priority: low + 3 + osPriorityLow4 = 8+4, ///< Priority: low + 4 + osPriorityLow5 = 8+5, ///< Priority: low + 5 + osPriorityLow6 = 8+6, ///< Priority: low + 6 + osPriorityLow7 = 8+7, ///< Priority: low + 7 + osPriorityBelowNormal = 16, ///< Priority: below normal + osPriorityBelowNormal1 = 16+1, ///< Priority: below normal + 1 + osPriorityBelowNormal2 = 16+2, ///< Priority: below normal + 2 + osPriorityBelowNormal3 = 16+3, ///< Priority: below normal + 3 + osPriorityBelowNormal4 = 16+4, ///< Priority: below normal + 4 + osPriorityBelowNormal5 = 16+5, ///< Priority: below normal + 5 + osPriorityBelowNormal6 = 16+6, ///< Priority: below normal + 6 + osPriorityBelowNormal7 = 16+7, ///< Priority: below normal + 7 + osPriorityNormal = 24, ///< Priority: normal + osPriorityNormal1 = 24+1, ///< Priority: normal + 1 + osPriorityNormal2 = 24+2, ///< Priority: normal + 2 + osPriorityNormal3 = 24+3, ///< Priority: normal + 3 + osPriorityNormal4 = 24+4, ///< Priority: normal + 4 + osPriorityNormal5 = 24+5, ///< Priority: normal + 5 + osPriorityNormal6 = 24+6, ///< Priority: normal + 6 + osPriorityNormal7 = 24+7, ///< Priority: normal + 7 + osPriorityAboveNormal = 32, ///< Priority: above normal + osPriorityAboveNormal1 = 32+1, ///< Priority: above normal + 1 + osPriorityAboveNormal2 = 32+2, ///< Priority: above normal + 2 + osPriorityAboveNormal3 = 32+3, ///< Priority: above normal + 3 + osPriorityAboveNormal4 = 32+4, ///< Priority: above normal + 4 + osPriorityAboveNormal5 = 32+5, ///< Priority: above normal + 5 + osPriorityAboveNormal6 = 32+6, ///< Priority: above normal + 6 + osPriorityAboveNormal7 = 32+7, ///< Priority: above normal + 7 + osPriorityHigh = 40, ///< Priority: high + osPriorityHigh1 = 40+1, ///< Priority: high + 1 + osPriorityHigh2 = 40+2, ///< Priority: high + 2 + osPriorityHigh3 = 40+3, ///< Priority: high + 3 + osPriorityHigh4 = 40+4, ///< Priority: high + 4 + osPriorityHigh5 = 40+5, ///< Priority: high + 5 + osPriorityHigh6 = 40+6, ///< Priority: high + 6 + osPriorityHigh7 = 40+7, ///< Priority: high + 7 + osPriorityRealtime = 48, ///< Priority: realtime + osPriorityRealtime1 = 48+1, ///< Priority: realtime + 1 + osPriorityRealtime2 = 48+2, ///< Priority: realtime + 2 + osPriorityRealtime3 = 48+3, ///< Priority: realtime + 3 + osPriorityRealtime4 = 48+4, ///< Priority: realtime + 4 + osPriorityRealtime5 = 48+5, ///< Priority: realtime + 5 + osPriorityRealtime6 = 48+6, ///< Priority: realtime + 6 + osPriorityRealtime7 = 48+7, ///< Priority: realtime + 7 + osPriorityISR = 56, ///< Reserved for ISR deferred thread. + osPriorityError = -1, ///< System cannot determine priority or illegal priority. + osPriorityReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. + } osPriority_t; + + /// Entry point of a thread. + typedef void (*osThreadFunc_t) (void *argument); + + /// Timer callback function. + typedef void (*osTimerFunc_t) (void *argument); + + /// Timer type. + typedef enum { + osTimerOnce = 0, ///< One-shot timer. + osTimerPeriodic = 1 ///< Repeating timer. + } osTimerType_t; + + // Timeout value. + #define osWaitForever 0xFFFFFFFFU ///< Wait forever timeout value. + + // Flags options (\ref osThreadFlagsWait and \ref osEventFlagsWait). + #define osFlagsWaitAny 0x00000000U ///< Wait for any flag (default). + #define osFlagsWaitAll 0x00000001U ///< Wait for all flags. + #define osFlagsNoClear 0x00000002U ///< Do not clear flags which have been specified to wait for. + + // Flags errors (returned by osThreadFlagsXxxx and osEventFlagsXxxx). + #define osFlagsError 0x80000000U ///< Error indicator. + #define osFlagsErrorUnknown 0xFFFFFFFFU ///< osError (-1). + #define osFlagsErrorTimeout 0xFFFFFFFEU ///< osErrorTimeout (-2). + #define osFlagsErrorResource 0xFFFFFFFDU ///< osErrorResource (-3). + #define osFlagsErrorParameter 0xFFFFFFFCU ///< osErrorParameter (-4). + #define osFlagsErrorISR 0xFFFFFFFAU ///< osErrorISR (-6). + + // Thread attributes (attr_bits in \ref osThreadAttr_t). + #define osThreadDetached 0x00000000U ///< Thread created in detached mode (default) + #define osThreadJoinable 0x00000001U ///< Thread created in joinable mode + + // Mutex attributes (attr_bits in \ref osMutexAttr_t). + #define osMutexRecursive 0x00000001U ///< Recursive mutex. + #define osMutexPrioInherit 0x00000002U ///< Priority inherit protocol. + #define osMutexRobust 0x00000008U ///< Robust mutex. + + /// Status code values returned by CMSIS-RTOS functions. + typedef enum { + osOK = 0, ///< Operation completed successfully. + osError = -1, ///< Unspecified RTOS error: run-time error but no other error message fits. + osErrorTimeout = -2, ///< Operation not completed within the timeout period. + osErrorResource = -3, ///< Resource not available. + osErrorParameter = -4, ///< Parameter error. + osErrorNoMemory = -5, ///< System is out of memory: it was impossible to allocate or reserve memory for the operation. + osErrorISR = -6, ///< Not allowed in ISR context: the function cannot be called from interrupt service routines. + osStatusReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. + } osStatus_t; + + + /// \details Thread ID identifies the thread. + typedef void *osThreadId_t; + + /// \details Timer ID identifies the timer. + typedef void *osTimerId_t; + + /// \details Event Flags ID identifies the event flags. + typedef void *osEventFlagsId_t; + + /// \details Mutex ID identifies the mutex. + typedef void *osMutexId_t; + + /// \details Semaphore ID identifies the semaphore. + typedef void *osSemaphoreId_t; + + /// \details Memory Pool ID identifies the memory pool. + typedef void *osMemoryPoolId_t; + + /// \details Message Queue ID identifies the message queue. + typedef void *osMessageQueueId_t; + + + #ifndef TZ_MODULEID_T + #define TZ_MODULEID_T + /// \details Data type that identifies secure software modules called by a process. + typedef uint32_t TZ_ModuleId_t; + #endif + + + /// Attributes structure for thread. + typedef struct { + const char *name; ///< name of the thread + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *stack_mem; ///< memory for stack + uint32_t stack_size; ///< size of stack + osPriority_t priority; ///< initial thread priority (default: osPriorityNormal) + TZ_ModuleId_t tz_module; ///< TrustZone module identifier + uint32_t reserved; ///< reserved (must be 0) + } osThreadAttr_t; + + /// Attributes structure for timer. + typedef struct { + const char *name; ///< name of the timer + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + } osTimerAttr_t; + + /// Attributes structure for event flags. + typedef struct { + const char *name; ///< name of the event flags + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + } osEventFlagsAttr_t; + + /// Attributes structure for mutex. + typedef struct { + const char *name; ///< name of the mutex + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + } osMutexAttr_t; + + /// Attributes structure for semaphore. + typedef struct { + const char *name; ///< name of the semaphore + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + } osSemaphoreAttr_t; + + /// Attributes structure for memory pool. + typedef struct { + const char *name; ///< name of the memory pool + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *mp_mem; ///< memory for data storage + uint32_t mp_size; ///< size of provided memory for data storage + } osMemoryPoolAttr_t; + + /// Attributes structure for message queue. + typedef struct { + const char *name; ///< name of the message queue + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *mq_mem; ///< memory for data storage + uint32_t mq_size; ///< size of provided memory for data storage + } osMessageQueueAttr_t; + + + // ==== Kernel Management Functions ==== + + /// Initialize the RTOS Kernel. + /// \return status code that indicates the execution status of the function. + osStatus_t osKernelInitialize (void); + + /// Get RTOS Kernel Information. + /// \param[out] version pointer to buffer for retrieving version information. + /// \param[out] id_buf pointer to buffer for retrieving kernel identification string. + /// \param[in] id_size size of buffer for kernel identification string. + /// \return status code that indicates the execution status of the function. + osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size); + + /// Get the current RTOS Kernel state. + /// \return current RTOS Kernel state. + osKernelState_t osKernelGetState (void); + + /// Start the RTOS Kernel scheduler. + /// \return status code that indicates the execution status of the function. + osStatus_t osKernelStart (void); + + /// Lock the RTOS Kernel scheduler. + /// \return previous lock state (1 - locked, 0 - not locked, error code if negative). + int32_t osKernelLock (void); + + /// Unlock the RTOS Kernel scheduler. + /// \return previous lock state (1 - locked, 0 - not locked, error code if negative). + int32_t osKernelUnlock (void); + + /// Restore the RTOS Kernel scheduler lock state. + /// \param[in] lock lock state obtained by \ref osKernelLock or \ref osKernelUnlock. + /// \return new lock state (1 - locked, 0 - not locked, error code if negative). + int32_t osKernelRestoreLock (int32_t lock); + + /// Suspend the RTOS Kernel scheduler. + /// \return time in ticks, for how long the system can sleep or power-down. + uint32_t osKernelSuspend (void); + + /// Resume the RTOS Kernel scheduler. + /// \param[in] sleep_ticks time in ticks for how long the system was in sleep or power-down mode. + void osKernelResume (uint32_t sleep_ticks); + + /// Get the RTOS kernel tick count. + /// \return RTOS kernel current tick count. + uint32_t osKernelGetTickCount (void); + + /// Get the RTOS kernel tick frequency. + /// \return frequency of the kernel tick in hertz, i.e. kernel ticks per second. + uint32_t osKernelGetTickFreq (void); + + /// Get the RTOS kernel system timer count. + /// \return RTOS kernel current system timer count as 32-bit value. + uint32_t osKernelGetSysTimerCount (void); + + /// Get the RTOS kernel system timer frequency. + /// \return frequency of the system timer in hertz, i.e. timer ticks per second. + uint32_t osKernelGetSysTimerFreq (void); + + + // ==== Thread Management Functions ==== + + /// Create a thread and add it to Active Threads. + /// \param[in] func thread function. + /// \param[in] argument pointer that is passed to the thread function as start argument. + /// \param[in] attr thread attributes; NULL: default values. + /// \return thread ID for reference by other functions or NULL in case of error. + osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr); + + /// Get name of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return name as null-terminated string. + const char *osThreadGetName (osThreadId_t thread_id); + + /// Return the thread ID of the current running thread. + /// \return thread ID for reference by other functions or NULL in case of error. + osThreadId_t osThreadGetId (void); + + /// Get current thread state of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return current thread state of the specified thread. + osThreadState_t osThreadGetState (osThreadId_t thread_id); + + /// Get stack size of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return stack size in bytes. + uint32_t osThreadGetStackSize (osThreadId_t thread_id); + + /// Get available stack space of a thread based on stack watermark recording during execution. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return remaining stack space in bytes. + uint32_t osThreadGetStackSpace (osThreadId_t thread_id); + + /// Change priority of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \param[in] priority new priority value for the thread function. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority); + + /// Get current priority of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return current priority value of the specified thread. + osPriority_t osThreadGetPriority (osThreadId_t thread_id); + + /// Pass control to next thread that is in state \b READY. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadYield (void); + + /// Suspend execution of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadSuspend (osThreadId_t thread_id); + + /// Resume execution of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadResume (osThreadId_t thread_id); + + /// Detach a thread (thread storage can be reclaimed when thread terminates). + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadDetach (osThreadId_t thread_id); + + /// Wait for specified thread to terminate. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadJoin (osThreadId_t thread_id); + + /// Terminate execution of current running thread. + __NO_RETURN void osThreadExit (void); + + /// Terminate execution of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \return status code that indicates the execution status of the function. + osStatus_t osThreadTerminate (osThreadId_t thread_id); + + /// Get number of active threads. + /// \return number of active threads. + uint32_t osThreadGetCount (void); + + /// Enumerate active threads. + /// \param[out] thread_array pointer to array for retrieving thread IDs. + /// \param[in] array_items maximum number of items in array for retrieving thread IDs. + /// \return number of enumerated threads. + uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items); + + + // ==== Thread Flags Functions ==== + + /// Set the specified Thread Flags of a thread. + /// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. + /// \param[in] flags specifies the flags of the thread that shall be set. + /// \return thread flags after setting or error code if highest bit set. + uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags); + + /// Clear the specified Thread Flags of current running thread. + /// \param[in] flags specifies the flags of the thread that shall be cleared. + /// \return thread flags before clearing or error code if highest bit set. + uint32_t osThreadFlagsClear (uint32_t flags); + + /// Get the current Thread Flags of current running thread. + /// \return current thread flags. + uint32_t osThreadFlagsGet (void); + + /// Wait for one or more Thread Flags of the current running thread to become signaled. + /// \param[in] flags specifies the flags to wait for. + /// \param[in] options specifies flags options (osFlagsXxxx). + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return thread flags before clearing or error code if highest bit set. + uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout); + + + // ==== Generic Wait Functions ==== + + /// Wait for Timeout (Time Delay). + /// \param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value + /// \return status code that indicates the execution status of the function. + osStatus_t osDelay (uint32_t ticks); + + /// Wait until specified time. + /// \param[in] ticks absolute time in ticks + /// \return status code that indicates the execution status of the function. + osStatus_t osDelayUntil (uint32_t ticks); + + + // ==== Timer Management Functions ==== + + /// Create and Initialize a timer. + /// \param[in] func function pointer to callback function. + /// \param[in] type \ref osTimerOnce for one-shot or \ref osTimerPeriodic for periodic behavior. + /// \param[in] argument argument to the timer callback function. + /// \param[in] attr timer attributes; NULL: default values. + /// \return timer ID for reference by other functions or NULL in case of error. + osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr); + + /// Get name of a timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerNew. + /// \return name as null-terminated string. + const char *osTimerGetName (osTimerId_t timer_id); + + /// Start or restart a timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerNew. + /// \param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value of the timer. + /// \return status code that indicates the execution status of the function. + osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks); + + /// Stop a timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osTimerStop (osTimerId_t timer_id); + + /// Check if a timer is running. + /// \param[in] timer_id timer ID obtained by \ref osTimerNew. + /// \return 0 not running, 1 running. + uint32_t osTimerIsRunning (osTimerId_t timer_id); + + /// Delete a timer. + /// \param[in] timer_id timer ID obtained by \ref osTimerNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osTimerDelete (osTimerId_t timer_id); + + + // ==== Event Flags Management Functions ==== + + /// Create and Initialize an Event Flags object. + /// \param[in] attr event flags attributes; NULL: default values. + /// \return event flags ID for reference by other functions or NULL in case of error. + osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr); + + /// Get name of an Event Flags object. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \return name as null-terminated string. + const char *osEventFlagsGetName (osEventFlagsId_t ef_id); + + /// Set the specified Event Flags. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \param[in] flags specifies the flags that shall be set. + /// \return event flags after setting or error code if highest bit set. + uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags); + + /// Clear the specified Event Flags. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \param[in] flags specifies the flags that shall be cleared. + /// \return event flags before clearing or error code if highest bit set. + uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags); + + /// Get the current Event Flags. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \return current event flags. + uint32_t osEventFlagsGet (osEventFlagsId_t ef_id); + + /// Wait for one or more Event Flags to become signaled. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \param[in] flags specifies the flags to wait for. + /// \param[in] options specifies flags options (osFlagsXxxx). + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return event flags before clearing or error code if highest bit set. + uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout); + + /// Delete an Event Flags object. + /// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id); + + + // ==== Mutex Management Functions ==== + + /// Create and Initialize a Mutex object. + /// \param[in] attr mutex attributes; NULL: default values. + /// \return mutex ID for reference by other functions or NULL in case of error. + osMutexId_t osMutexNew (const osMutexAttr_t *attr); + + /// Get name of a Mutex object. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. + /// \return name as null-terminated string. + const char *osMutexGetName (osMutexId_t mutex_id); + + /// Acquire a Mutex or timeout if it is locked. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout); + + /// Release a Mutex that was acquired by \ref osMutexAcquire. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osMutexRelease (osMutexId_t mutex_id); + + /// Get Thread which owns a Mutex object. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. + /// \return thread ID of owner thread or NULL when mutex was not acquired. + osThreadId_t osMutexGetOwner (osMutexId_t mutex_id); + + /// Delete a Mutex object. + /// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osMutexDelete (osMutexId_t mutex_id); + + + // ==== Semaphore Management Functions ==== + + /// Create and Initialize a Semaphore object. + /// \param[in] max_count maximum number of available tokens. + /// \param[in] initial_count initial number of available tokens. + /// \param[in] attr semaphore attributes; NULL: default values. + /// \return semaphore ID for reference by other functions or NULL in case of error. + osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr); + + /// Get name of a Semaphore object. + /// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. + /// \return name as null-terminated string. + const char *osSemaphoreGetName (osSemaphoreId_t semaphore_id); + + /// Acquire a Semaphore token or timeout if no tokens are available. + /// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout); + + /// Release a Semaphore token up to the initial maximum count. + /// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id); + + /// Get current Semaphore token count. + /// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. + /// \return number of tokens available. + uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id); + + /// Delete a Semaphore object. + /// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id); + + + // ==== Memory Pool Management Functions ==== + + /// Create and Initialize a Memory Pool object. + /// \param[in] block_count maximum number of memory blocks in memory pool. + /// \param[in] block_size memory block size in bytes. + /// \param[in] attr memory pool attributes; NULL: default values. + /// \return memory pool ID for reference by other functions or NULL in case of error. + osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr); + + /// Get name of a Memory Pool object. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return name as null-terminated string. + const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id); + + /// Allocate a memory block from a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return address of the allocated memory block or NULL in case of no memory is available. + void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout); + + /// Return an allocated memory block back to a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \param[in] block address of the allocated memory block to be returned to the memory pool. + /// \return status code that indicates the execution status of the function. + osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block); + + /// Get maximum number of memory blocks in a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return maximum number of memory blocks. + uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id); + + /// Get memory block size in a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return memory block size in bytes. + uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id); + + /// Get number of memory blocks used in a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return number of memory blocks used. + uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id); + + /// Get number of memory blocks available in a Memory Pool. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return number of memory blocks available. + uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id); + + /// Delete a Memory Pool object. + /// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id); + + + // ==== Message Queue Management Functions ==== + + /// Create and Initialize a Message Queue object. + /// \param[in] msg_count maximum number of messages in queue. + /// \param[in] msg_size maximum message size in bytes. + /// \param[in] attr message queue attributes; NULL: default values. + /// \return message queue ID for reference by other functions or NULL in case of error. + osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr); + + /// Get name of a Message Queue object. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return name as null-terminated string. + const char *osMessageQueueGetName (osMessageQueueId_t mq_id); + + /// Put a Message into a Queue or timeout if Queue is full. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \param[in] msg_ptr pointer to buffer with message to put into a queue. + /// \param[in] msg_prio message priority. + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout); + + /// Get a Message from a Queue or timeout if Queue is empty. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \param[out] msg_ptr pointer to buffer for message to get from a queue. + /// \param[out] msg_prio pointer to buffer for message priority or NULL. + /// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. + /// \return status code that indicates the execution status of the function. + osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout); + + /// Get maximum number of messages in a Message Queue. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return maximum number of messages. + uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id); + + /// Get maximum message size in a Message Queue. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return maximum message size in bytes. + uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id); + + /// Get number of queued messages in a Message Queue. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return number of queued messages. + uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id); + + /// Get number of available slots for messages in a Message Queue. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return number of available slots for messages. + uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id); + + /// Reset a Message Queue to initial empty state. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id); + + /// Delete a Message Queue object. + /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. + /// \return status code that indicates the execution status of the function. + osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id); + + + #ifdef __cplusplus + } + #endif + + #endif // CMSIS_OS2_H_ \ No newline at end of file diff --git a/adapter/cmsis/cmsis_header/RTOS2/os_tick.h b/adapter/cmsis/cmsis_header/RTOS2/os_tick.h new file mode 100644 index 00000000..551984c7 --- /dev/null +++ b/adapter/cmsis/cmsis_header/RTOS2/os_tick.h @@ -0,0 +1,80 @@ +/**************************************************************************//** + * @file os_tick.h + * @brief CMSIS OS Tick header file + * @version V1.0.2 + * @date 19. March 2021 + ******************************************************************************/ +/* + * Copyright (c) 2017-2021 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #ifndef OS_TICK_H + #define OS_TICK_H + + #include + + #ifdef __cplusplus + extern "C" + { + #endif + + /// IRQ Handler. + #ifndef IRQHANDLER_T + #define IRQHANDLER_T + typedef void (*IRQHandler_t) (void); + #endif + + /// Setup OS Tick timer to generate periodic RTOS Kernel Ticks + /// \param[in] freq tick frequency in Hz + /// \param[in] handler tick IRQ handler + /// \return 0 on success, -1 on error. + int32_t OS_Tick_Setup (uint32_t freq, IRQHandler_t handler); + + /// Enable OS Tick timer interrupt + void OS_Tick_Enable (void); + + /// Disable OS Tick timer interrupt + void OS_Tick_Disable (void); + + /// Acknowledge execution of OS Tick timer interrupt + void OS_Tick_AcknowledgeIRQ (void); + + /// Get OS Tick timer IRQ number + /// \return OS Tick IRQ number + int32_t OS_Tick_GetIRQn (void); + + /// Get OS Tick timer clock frequency + /// \return OS Tick timer clock frequency in Hz + uint32_t OS_Tick_GetClock (void); + + /// Get OS Tick timer interval reload value + /// \return OS Tick timer interval reload value + uint32_t OS_Tick_GetInterval (void); + + /// Get OS Tick timer counter value + /// \return OS Tick timer counter value + uint32_t OS_Tick_GetCount (void); + + /// Get OS Tick timer overflow status + /// \return OS Tick overflow status (1 - overflow, 0 - no overflow). + uint32_t OS_Tick_GetOverflow (void); + + #ifdef __cplusplus + } + #endif + + #endif /* OS_TICK_H */ \ No newline at end of file diff --git a/adapter/cmsis/cmsis_header/RTOS2/wrapper.h b/adapter/cmsis/cmsis_header/RTOS2/wrapper.h new file mode 100644 index 00000000..036c8785 --- /dev/null +++ b/adapter/cmsis/cmsis_header/RTOS2/wrapper.h @@ -0,0 +1,16 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "cmsis_os2.h" +#include "os_tick.h" \ No newline at end of file diff --git a/adapter/cmsis/cmsis_header/src/lib.rs b/adapter/cmsis/cmsis_header/src/lib.rs new file mode 100644 index 00000000..ca58df59 --- /dev/null +++ b/adapter/cmsis/cmsis_header/src/lib.rs @@ -0,0 +1,23 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(dead_code)] +#![no_std] + +use core::{env, include}; + +include!(env!("BINDGEN_DIR")); diff --git a/adapter/cmsis/src/lib.rs b/adapter/cmsis/src/lib.rs new file mode 100644 index 00000000..ca92f40b --- /dev/null +++ b/adapter/cmsis/src/lib.rs @@ -0,0 +1,87 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(non_snake_case)] +#![no_std] +#![cfg_attr(test, feature(custom_test_frameworks))] +#![cfg_attr(test, test_runner(tests::adapter_test_runner))] +#![cfg_attr(test, reexport_test_harness_main = "adapter_test_main")] +#![cfg_attr(test, no_main)] + +extern crate alloc; +#[cfg(cmsis_rtos1_adapter)] +pub mod os1; +#[cfg(cmsis_rtos2_adapter)] +pub mod os2; + +#[cfg(test)] +mod tests { + use super::*; + use blueos::{ + allocator::KernelAllocator, + arch, scheduler, + thread::{Builder, Entry, Thread, ThreadNode}, + }; + + #[global_allocator] + static ALLOCATOR: KernelAllocator = KernelAllocator; + + #[used] + #[link_section = ".bk_app_array"] + static INIT_TEST: extern "C" fn() = init_test; + + #[inline(never)] + pub fn adapter_test_runner(tests: &[&dyn Fn()]) { + let t = scheduler::current_thread(); + semihosting::println!("CMSIS adapter unittest started"); + semihosting::println!("Running {} tests", tests.len()); + semihosting::println!( + "Before test, thread 0x{:x}, rc: {}, heap status: {:?}, sp: 0x{:x}", + Thread::id(&t), + ThreadNode::strong_count(&t), + ALLOCATOR.memory_info(), + arch::current_sp(), + ); + for test in tests { + test(); + } + semihosting::println!( + "After test, thread 0x{:x}, heap status: {:?}, sp: 0x{:x}", + Thread::id(&t), + ALLOCATOR.memory_info(), + arch::current_sp(), + ); + semihosting::println!("Adapter unittest ended"); + + #[cfg(coverage)] + blueos::coverage::write_coverage_data(); + } + + extern "C" fn test_main() { + adapter_test_main(); + } + + extern "C" fn init_test() { + semihosting::println!("create test thread"); + let t = Builder::new(Entry::C(test_main)).start(); + } + + #[panic_handler] + fn oops(info: &core::panic::PanicInfo<'_>) -> ! { + let _dig = blueos::support::DisableInterruptGuard::new(); + semihosting::println!("{}", info); + semihosting::println!("Oops: {}", info.message()); + loop {} + } +} diff --git a/adapter/cmsis/src/os1/mod.rs b/adapter/cmsis/src/os1/mod.rs new file mode 100644 index 00000000..e6a70547 --- /dev/null +++ b/adapter/cmsis/src/os1/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod tick; diff --git a/adapter/cmsis/src/os1/tick.rs b/adapter/cmsis/src/os1/tick.rs new file mode 100644 index 00000000..6bfdf3fe --- /dev/null +++ b/adapter/cmsis/src/os1/tick.rs @@ -0,0 +1,42 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use blueos::time; +use blueos_kconfig::TICKS_PER_SECOND; + +// Define constants that will be exported to C +// These match the extern const declarations in cmsis_os.h +#[no_mangle] +pub static os_tickfreq: u32 = TICKS_PER_SECOND as u32; // System timer frequency in Hz + +/// Get the RTOS kernel system timer counter. +/// \return RTOS kernel system timer as 32-bit value +/// uint32_t osKernelSysTick (void); +#[no_mangle] +pub extern "C" fn osKernelSysTick() -> u32 { + time::get_sys_ticks() as u32 +} + + +#[cfg(test)] +mod tests { + use super::*; + use blueos_test_macro::test; + + #[test] + fn test_osKernelSysTick() { + let tick = osKernelSysTick(); + assert!(tick > 0); + } +} \ No newline at end of file diff --git a/adapter/cmsis/src/os2/mod.rs b/adapter/cmsis/src/os2/mod.rs new file mode 100644 index 00000000..e6a70547 --- /dev/null +++ b/adapter/cmsis/src/os2/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod tick; diff --git a/adapter/cmsis/src/os2/tick.rs b/adapter/cmsis/src/os2/tick.rs new file mode 100644 index 00000000..16c0ee4f --- /dev/null +++ b/adapter/cmsis/src/os2/tick.rs @@ -0,0 +1,69 @@ +// Copyright (c) 2025 vivo Mobile Communication Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use blueos::time; +use blueos_kconfig::TICKS_PER_SECOND; + +// Get the RTOS kernel tick count. +// \return RTOS kernel current tick count. +// uint32_t osKernelGetTickCount (void); +#[no_mangle] +pub extern "C" fn osKernelGetTickCount() -> u32 { + time::get_sys_ticks() as u32 +} + +// Get the RTOS kernel tick frequency. +// \return frequency of the kernel tick in hertz, i.e. kernel ticks per second. +// uint32_t osKernelGetTickFreq (void); +#[no_mangle] +pub extern "C" fn osKernelGetTickFreq() -> u32 { + TICKS_PER_SECOND as u32 +} + +// Get the RTOS kernel system timer count. +// \return RTOS kernel current system timer count as 32-bit value. +// uint32_t osKernelGetSysTimerCount (void); +#[no_mangle] +pub extern "C" fn osKernelGetSysTimerCount() -> u32 { + time::get_sys_cycles() as u32 +} + +// Get the RTOS kernel system timer frequency. +// \return frequency of the system timer in hertz, i.e. timer ticks per second. +// uint32_t osKernelGetSysTimerFreq (void); +#[no_mangle] +pub extern "C" fn osKernelGetSysTimerFreq() -> u32 { + TICKS_PER_SECOND as u32 +} + +#[cfg(test)] +mod tests { + use super::*; + use blueos_test_macro::test; + + #[test] + fn test_osKernelSysTick() { + let tick = osKernelGetTickCount(); + assert!(tick > 0); + + let count = osKernelGetSysTimerCount(); + assert!(count > 0); + + let freq = osKernelGetSysTimerFreq(); + assert!(freq > 0); + + let freq = osKernelGetTickFreq(); + assert!(freq > 0); + } +} \ No newline at end of file diff --git a/adapter/coverage.checker b/adapter/coverage.checker new file mode 100644 index 00000000..79cc3a7f --- /dev/null +++ b/adapter/coverage.checker @@ -0,0 +1,3 @@ +// TOTAL-TIMEOUT: 8 +// ASSERT-SUCC: coverage test end. +// ASSERT-FAIL: Oops: \ No newline at end of file diff --git a/adapter/unittests.checker b/adapter/unittests.checker new file mode 100644 index 00000000..19c61bf6 --- /dev/null +++ b/adapter/unittests.checker @@ -0,0 +1,3 @@ +// TOTAL-TIMEOUT: 8 +// ASSERT-SUCC: Adapter unittest ended +// ASSERT-FAIL: Oops: \ No newline at end of file diff --git a/kconfig/config/.config b/kconfig/config/.config new file mode 100644 index 00000000..6a53dc43 --- /dev/null +++ b/kconfig/config/.config @@ -0,0 +1,61 @@ +CONFIG_IRQ_PRIORITY_BITS_2=y +# CONFIG_IRQ_PRIORITY_BITS_3 is not set +# CONFIG_IRQ_PRIORITY_BITS_8 is not set +CONFIG_ALIGN_SIZE=8 +CONFIG_TICKS_PER_SECOND=100 +# CONFIG_SMP is not set +CONFIG_NUM_CORES=1 +CONFIG_THREAD_PRIORITY=y +# CONFIG_THREAD_PRIORITY_32 is not set +CONFIG_THREAD_PRIORITY_256=y +CONFIG_MAIN_THREAD_PRIORITY=100 +CONFIG_THREAD_PRIORITY_MAX=256 +CONFIG_SERIAL_RX_FIFO_SIZE=512 +CONFIG_SERIAL_TX_FIFO_SIZE=512 +# CONFIG_ALLOCATOR_TLSF is not set +CONFIG_ALLOCATOR_SLAB=y +# CONFIG_ALLOCATOR_LLFF is not set +# CONFIG_ALLOCATOR_BUDDY is not set +CONFIG_ALLOCATOR="slab" +CONFIG_SOFT_TIMER=y +CONFIG_EVENT_FLAGS=y +CONFIG_ROBIN_SCHEDULER=y +CONFIG_ROBIN_SLICE=10 +CONFIG_OVERFLOW_CHECK=y +CONFIG_STACK_HIGHWATER_CHECK=y +# CONFIG_DEBUGGING_SCHEDULER is not set +CONFIG_MAIN_THREAD_STACK_SIZE=12288 +CONFIG_IDLE_THREAD_STACK_SIZE=2048 +CONFIG_TIMER_THREAD_STACK_SIZE=2048 +# CONFIG_FDT is not set +# CONFIG_VIRTIO is not set +CONFIG_PROCFS=y +CONFIG_NETWORK_STACK_SIZE=24576 + +# +# smoltcp TCP/IP Stack Configuration +# +CONFIG_SMOLTCP=y +CONFIG_ASSEMBLER_MAX_SEGMENT_COUNT=4 +CONFIG_REASSEMBLY_BUFFER_COUNT=4 +CONFIG_REASSEMBLY_BUFFER_SIZE=1500 +CONFIG_FRAGMENTATION_BUFFER_SIZE=4096 +CONFIG_IFACE_MAX_ADDR_COUNT=8 +CONFIG_IFACE_MAX_MULTICAST_GROUP_COUNT=4 +CONFIG_IFACE_MAX_ROUTE_COUNT=2 +CONFIG_IFACE_MAX_SIXLOWPAN_ADDRESS_CONTEXT_COUNT=4 +CONFIG_IFACE_NEIGHBOR_CACHE_COUNT=8 +CONFIG_RPL_RELATIONS_BUFFER_COUNT=16 +CONFIG_RPL_PARENTS_BUFFER_COUNT=8 +CONFIG_IPV6_HBH_MAX_OPTIONS=4 +CONFIG_DNS_MAX_NAME_SIZE=255 +CONFIG_DNS_MAX_RESULT_COUNT=1 +CONFIG_DNS_MAX_SERVER_COUNT=1 +# end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +# CONFIG_CMSIS_RTOS1_ADAPTER is not set +CONFIG_CMSIS_RTOS2_ADAPTER=y +# end of os adapter configuration diff --git a/kconfig/config/Kconfig b/kconfig/config/Kconfig index a3b048d9..187df9b8 100644 --- a/kconfig/config/Kconfig +++ b/kconfig/config/Kconfig @@ -249,3 +249,24 @@ config DNS_MAX_SERVER_COUNT Maximum amount of DNS servers that can be configured in one DNS socket. endmenu # SMOLTCP + +# os adapter configuration +menu "os adapter configuration" + choice + prompt "RTOS Adapter Selection" + optional + help + Select which RTOS adapter to use. Only one can be selected. + + config CMSIS_RTOS1_ADAPTER + bool "CMSIS RTOS1 Adapter" + help + os adapter for CMSIS RTOS1 + + config CMSIS_RTOS2_ADAPTER + bool "CMSIS RTOS2 Adapter" + help + os adapter for CMSIS RTOS2 + + endchoice +endmenu # os adapter configuration \ No newline at end of file diff --git a/kconfig/config/qemu_mps2_an385/coverage/defconfig b/kconfig/config/qemu_mps2_an385/coverage/defconfig index 2edee271..6a53dc43 100644 --- a/kconfig/config/qemu_mps2_an385/coverage/defconfig +++ b/kconfig/config/qemu_mps2_an385/coverage/defconfig @@ -52,3 +52,10 @@ CONFIG_DNS_MAX_NAME_SIZE=255 CONFIG_DNS_MAX_RESULT_COUNT=1 CONFIG_DNS_MAX_SERVER_COUNT=1 # end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +# CONFIG_CMSIS_RTOS1_ADAPTER is not set +CONFIG_CMSIS_RTOS2_ADAPTER=y +# end of os adapter configuration diff --git a/kconfig/config/qemu_mps2_an385/debug/defconfig b/kconfig/config/qemu_mps2_an385/debug/defconfig index 2edee271..6a53dc43 100644 --- a/kconfig/config/qemu_mps2_an385/debug/defconfig +++ b/kconfig/config/qemu_mps2_an385/debug/defconfig @@ -52,3 +52,10 @@ CONFIG_DNS_MAX_NAME_SIZE=255 CONFIG_DNS_MAX_RESULT_COUNT=1 CONFIG_DNS_MAX_SERVER_COUNT=1 # end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +# CONFIG_CMSIS_RTOS1_ADAPTER is not set +CONFIG_CMSIS_RTOS2_ADAPTER=y +# end of os adapter configuration diff --git a/kconfig/config/qemu_mps2_an385/release/defconfig b/kconfig/config/qemu_mps2_an385/release/defconfig index 2edee271..6a53dc43 100644 --- a/kconfig/config/qemu_mps2_an385/release/defconfig +++ b/kconfig/config/qemu_mps2_an385/release/defconfig @@ -52,3 +52,10 @@ CONFIG_DNS_MAX_NAME_SIZE=255 CONFIG_DNS_MAX_RESULT_COUNT=1 CONFIG_DNS_MAX_SERVER_COUNT=1 # end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +# CONFIG_CMSIS_RTOS1_ADAPTER is not set +CONFIG_CMSIS_RTOS2_ADAPTER=y +# end of os adapter configuration diff --git a/kconfig/config/qemu_mps3_an547/debug/defconfig b/kconfig/config/qemu_mps3_an547/debug/defconfig index 7b204fa6..7ac12d9b 100644 --- a/kconfig/config/qemu_mps3_an547/debug/defconfig +++ b/kconfig/config/qemu_mps3_an547/debug/defconfig @@ -18,6 +18,7 @@ CONFIG_ALLOCATOR_TLSF=y # CONFIG_ALLOCATOR_BUDDY is not set CONFIG_ALLOCATOR="tlsf" CONFIG_SOFT_TIMER=y +# CONFIG_EVENT_FLAGS is not set CONFIG_ROBIN_SCHEDULER=y CONFIG_ROBIN_SLICE=10 CONFIG_OVERFLOW_CHECK=y @@ -51,3 +52,10 @@ CONFIG_DNS_MAX_NAME_SIZE=255 CONFIG_DNS_MAX_RESULT_COUNT=1 CONFIG_DNS_MAX_SERVER_COUNT=1 # end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +CONFIG_CMSIS_RTOS1_ADAPTER=y +# CONFIG_CMSIS_RTOS2_ADAPTER is not set +# end of os adapter configuration diff --git a/kconfig/config/qemu_mps3_an547/release/defconfig b/kconfig/config/qemu_mps3_an547/release/defconfig index 7b204fa6..7ac12d9b 100644 --- a/kconfig/config/qemu_mps3_an547/release/defconfig +++ b/kconfig/config/qemu_mps3_an547/release/defconfig @@ -18,6 +18,7 @@ CONFIG_ALLOCATOR_TLSF=y # CONFIG_ALLOCATOR_BUDDY is not set CONFIG_ALLOCATOR="tlsf" CONFIG_SOFT_TIMER=y +# CONFIG_EVENT_FLAGS is not set CONFIG_ROBIN_SCHEDULER=y CONFIG_ROBIN_SLICE=10 CONFIG_OVERFLOW_CHECK=y @@ -51,3 +52,10 @@ CONFIG_DNS_MAX_NAME_SIZE=255 CONFIG_DNS_MAX_RESULT_COUNT=1 CONFIG_DNS_MAX_SERVER_COUNT=1 # end of smoltcp TCP/IP Stack Configuration + +# +# os adapter configuration +# +CONFIG_CMSIS_RTOS1_ADAPTER=y +# CONFIG_CMSIS_RTOS2_ADAPTER is not set +# end of os adapter configuration diff --git a/kernel/src/arch/aarch64/mod.rs b/kernel/src/arch/aarch64/mod.rs index 9f0c0bea..ad699795 100644 --- a/kernel/src/arch/aarch64/mod.rs +++ b/kernel/src/arch/aarch64/mod.rs @@ -388,7 +388,7 @@ pub(crate) extern "C" fn idle() { } #[inline] -pub(crate) extern "C" fn current_sp() -> usize { +pub extern "C" fn current_sp() -> usize { let x: usize; unsafe { core::arch::asm!("mov {}, sp", out(reg) x, options(nostack, nomem)) }; x diff --git a/kernel/src/arch/arm/mod.rs b/kernel/src/arch/arm/mod.rs index 9bb3dbb0..1967c1e8 100644 --- a/kernel/src/arch/arm/mod.rs +++ b/kernel/src/arch/arm/mod.rs @@ -462,7 +462,7 @@ pub extern "C" fn current_psp() -> usize { } #[naked] -pub extern "C" fn switch_context_with_hook( +pub(crate) extern "C" fn switch_context_with_hook( saved_sp_mut: *mut u8, to_sp: usize, hook: *mut ContextSwitchHookHolder, @@ -513,7 +513,7 @@ pub extern "C" fn restore_context(to_sp: usize) -> ! { } #[inline(always)] -pub extern "C" fn restore_context_with_hook(to_sp: usize, hook: *mut ContextSwitchHookHolder) -> ! { +pub(crate) extern "C" fn restore_context_with_hook(to_sp: usize, hook: *mut ContextSwitchHookHolder) -> ! { switch_context_with_hook(core::ptr::null_mut(), to_sp, hook); unreachable!("Should have switched to another thread"); } diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 07677223..b19aa450 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -15,14 +15,14 @@ #[cfg(target_arch = "arm")] pub(crate) mod arm; #[cfg(target_arch = "arm")] -pub(crate) use arm::*; +pub use arm::*; #[cfg(target_arch = "riscv64")] pub(crate) mod riscv64; #[cfg(target_arch = "riscv64")] -pub(crate) use riscv64::*; +pub use riscv64::*; #[cfg(target_arch = "aarch64")] pub(crate) mod aarch64; #[cfg(target_arch = "aarch64")] -pub(crate) use aarch64::*; +pub use aarch64::*; diff --git a/kernel/src/arch/riscv64/mod.rs b/kernel/src/arch/riscv64/mod.rs index fa20e5bf..5d68aca8 100644 --- a/kernel/src/arch/riscv64/mod.rs +++ b/kernel/src/arch/riscv64/mod.rs @@ -228,7 +228,7 @@ pub(crate) extern "C" fn enable_local_irq_restore(old: usize) { } #[inline] -pub(crate) extern "C" fn current_sp() -> usize { +pub extern "C" fn current_sp() -> usize { let x: usize; unsafe { core::arch::asm!("mv {}, sp", out(reg) x, options(nostack, nomem)) }; x diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 7540ae78..ce50ad0d 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -74,7 +74,7 @@ pub mod ffi { } pub mod allocator; -pub(crate) mod arch; +pub mod arch; pub mod asynk; pub(crate) mod boards; pub(crate) mod boot; @@ -93,7 +93,7 @@ pub mod support; pub mod sync; pub mod syscall_handlers; pub mod thread; -pub(crate) mod time; +pub mod time; pub mod types; pub mod vfs; diff --git a/kernel/src/support.rs b/kernel/src/support.rs index cdfac56c..7aa06767 100644 --- a/kernel/src/support.rs +++ b/kernel/src/support.rs @@ -25,7 +25,7 @@ use core::{ }; #[derive(Debug)] -pub(crate) struct DisableInterruptGuard { +pub struct DisableInterruptGuard { old: usize, } diff --git a/kernel/src/sync/event_flags.rs b/kernel/src/sync/event_flags.rs index 17a3f030..541f290b 100644 --- a/kernel/src/sync/event_flags.rs +++ b/kernel/src/sync/event_flags.rs @@ -246,7 +246,7 @@ mod tests { assert!(result.is_ok()); // Wait for flag that doesn't exist (should timeout) - let result = event_flags.wait(0x02, EventFlagsMode::ANY, 100); + let result = event_flags.wait(0x02, EventFlagsMode::ALL, 100); assert_eq!(result, Err(code::ETIMEDOUT)); } diff --git a/kernel/src/time/mod.rs b/kernel/src/time/mod.rs index 7850fb32..6acfa7d7 100644 --- a/kernel/src/time/mod.rs +++ b/kernel/src/time/mod.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub(crate) mod systick; +pub mod systick; pub(crate) mod timer; use crate::{arch, boards, scheduler, support::DisableInterruptGuard, thread::Thread}; From d70dcc9550a82ce71a022358eb5c30d90da20f24 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Tue, 14 Oct 2025 08:54:39 +0800 Subject: [PATCH 02/23] Fix format --- adapter/cmsis/BUILD.gn | 2 +- adapter/cmsis/src/os1/tick.rs | 3 +-- adapter/cmsis/src/os2/tick.rs | 2 +- kernel/src/arch/arm/mod.rs | 5 ++++- kernel/src/lib.rs | 5 ++--- kernel/src/sync/event_flags.rs | 7 ++----- kernel/src/sync/semaphore.rs | 8 +++++--- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/adapter/cmsis/BUILD.gn b/adapter/cmsis/BUILD.gn index dc83ae8c..1b7173f7 100644 --- a/adapter/cmsis/BUILD.gn +++ b/adapter/cmsis/BUILD.gn @@ -72,4 +72,4 @@ run_qemu_check("run_cmsis_unittest") { } else { checker = "//kernel/adapter/unittests.checker" } -} \ No newline at end of file +} diff --git a/adapter/cmsis/src/os1/tick.rs b/adapter/cmsis/src/os1/tick.rs index 6bfdf3fe..7868259d 100644 --- a/adapter/cmsis/src/os1/tick.rs +++ b/adapter/cmsis/src/os1/tick.rs @@ -28,7 +28,6 @@ pub extern "C" fn osKernelSysTick() -> u32 { time::get_sys_ticks() as u32 } - #[cfg(test)] mod tests { use super::*; @@ -39,4 +38,4 @@ mod tests { let tick = osKernelSysTick(); assert!(tick > 0); } -} \ No newline at end of file +} diff --git a/adapter/cmsis/src/os2/tick.rs b/adapter/cmsis/src/os2/tick.rs index 16c0ee4f..eb32a8a1 100644 --- a/adapter/cmsis/src/os2/tick.rs +++ b/adapter/cmsis/src/os2/tick.rs @@ -66,4 +66,4 @@ mod tests { let freq = osKernelGetTickFreq(); assert!(freq > 0); } -} \ No newline at end of file +} diff --git a/kernel/src/arch/arm/mod.rs b/kernel/src/arch/arm/mod.rs index 1967c1e8..ad05821d 100644 --- a/kernel/src/arch/arm/mod.rs +++ b/kernel/src/arch/arm/mod.rs @@ -513,7 +513,10 @@ pub extern "C" fn restore_context(to_sp: usize) -> ! { } #[inline(always)] -pub(crate) extern "C" fn restore_context_with_hook(to_sp: usize, hook: *mut ContextSwitchHookHolder) -> ! { +pub(crate) extern "C" fn restore_context_with_hook( + to_sp: usize, + hook: *mut ContextSwitchHookHolder, +) -> ! { switch_context_with_hook(core::ptr::null_mut(), to_sp, hook); unreachable!("Should have switched to another thread"); } diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index ce50ad0d..d8e655aa 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -130,9 +130,8 @@ mod tests { extern crate alloc; use super::*; use crate::{ - allocator, allocator::KernelAllocator, config, - support::DisableInterruptGuard, sync, time::WAITING_FOREVER, - types::Arc, + allocator, allocator::KernelAllocator, config, support::DisableInterruptGuard, sync, + time::WAITING_FOREVER, types::Arc, }; use blueos_header::syscalls::NR::Nop; use blueos_kconfig::NUM_CORES; diff --git a/kernel/src/sync/event_flags.rs b/kernel/src/sync/event_flags.rs index 541f290b..c78b717e 100644 --- a/kernel/src/sync/event_flags.rs +++ b/kernel/src/sync/event_flags.rs @@ -242,11 +242,11 @@ mod tests { assert!(event_flags.set(0x01).is_ok()); // Wait for any flag (should succeed immediately) - let result = event_flags.wait(0x01, EventFlagsMode::ANY, 100); + let result = event_flags.wait(0x01, EventFlagsMode::ANY | EventFlagsMode::NO_CLEAR, 100); assert!(result.is_ok()); // Wait for flag that doesn't exist (should timeout) - let result = event_flags.wait(0x02, EventFlagsMode::ALL, 100); + let result = event_flags.wait(0x02, EventFlagsMode::ANY, 100); assert_eq!(result, Err(code::ETIMEDOUT)); } @@ -279,9 +279,6 @@ mod tests { let result = event_flags.wait(0, EventFlagsMode::ANY, 0); assert_eq!(result, Err(code::ETIMEDOUT)); - - let result = event_flags.wait(0, EventFlagsMode::ANY, 100); - assert_eq!(result, Err(code::ETIMEDOUT)); } #[test] diff --git a/kernel/src/sync/semaphore.rs b/kernel/src/sync/semaphore.rs index 30c78cca..73a156a6 100644 --- a/kernel/src/sync/semaphore.rs +++ b/kernel/src/sync/semaphore.rs @@ -16,8 +16,9 @@ use super::SpinLock; use crate::{ irq, scheduler, scheduler::{InsertMode, WaitQueue}, - thread, thread::Thread, - time::WAITING_FOREVER, + thread, + thread::Thread, + time::WAITING_FOREVER, types::Int, }; use core::cell::Cell; @@ -73,7 +74,8 @@ impl Semaphore { ); } if old == 0 { - let _ = scheduler::suspend_me_with_timeout(w, WAITING_FOREVER, InsertMode::InsertToEnd); + let _ = + scheduler::suspend_me_with_timeout(w, WAITING_FOREVER, InsertMode::InsertToEnd); w = self.pending.irqsave_lock(); continue; } else { From caceb646540f17f75b0420d7b34c94f6522ea000 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 10:07:49 +0800 Subject: [PATCH 03/23] Add kernel Dockerfile --- .../workflows/build_and_push_docker_image.yml | 36 +++++++++++++ Dockerfile | 50 +++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 .github/workflows/build_and_push_docker_image.yml create mode 100644 Dockerfile diff --git a/.github/workflows/build_and_push_docker_image.yml b/.github/workflows/build_and_push_docker_image.yml new file mode 100644 index 00000000..bf1e8b51 --- /dev/null +++ b/.github/workflows/build_and_push_docker_image.yml @@ -0,0 +1,36 @@ +name: Create and push Docker image of the BlueOS kernel ci + +on: + workflow_dispatch: + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set Image Tags + id: tags + run: | + echo "IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/$(echo ${{ github.repository }} | awk -F/ '{print $2}')" >> $GITHUB_ENV + echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "tags=latest,$SHORT_SHA" >> $GITHUB_ENV + echo "Tags set: ${{ env.tags }}" + + - name: Build and Push + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ env.IMAGE_NAME }}:${{ env.tags }} + labels: | + org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..d29f312a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,50 @@ +FROM ubuntu:22.04 + +# Set environment variables +ENV DEBIAN_FRONTEND=noninteractive +ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:${PATH}" + +# Install system packages +RUN apt-get update && \ + apt-get install -y \ + clang \ + python3-kconfiglib \ + ninja-build \ + generate-ninja \ + curl \ + libfdt-dev \ + libslirp-dev \ + libglib2.0-dev \ + build-essential \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* + +# Install Arm GNU toolchain (ARM Cortex-M) +RUN curl -L -o arm-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi.tar.xz && \ + tar xf arm-toolchain.tar.xz -C /opt && \ + rm arm-toolchain.tar.xz + +# Install Arm64 GNU toolchain (AArch64) +RUN curl -L -o aarch64-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf.tar.xz && \ + tar xf aarch64-toolchain.tar.xz -C /opt && \ + rm aarch64-toolchain.tar.xz + +# Download and unpack prebuilt QEMU +RUN mkdir -p /opt/sysroot && \ + curl -L -o qemu.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/qemu-2025_08_05_12_17.tar.xz && \ + tar xf qemu.tar.xz -C /opt/sysroot && \ + rm qemu.tar.xz + +# Download and unpack prebuilt Rust toolchain +RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/blueos-toolchain-ubuntu-latest-2025_09_16_08_50.tar.xz && \ + tar xf blueos-toolchain.tar.xz -C /opt/sysroot && \ + rm blueos-toolchain.tar.xz + +# Install bindgen and cbindgen +RUN cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 + +# Set working directory +WORKDIR /workspace + +# Default command +CMD ["/bin/bash"] \ No newline at end of file From 8b8d3115933b0f67170c9f9f275c883f3f6f4284 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 10:33:16 +0800 Subject: [PATCH 04/23] use vivoblueos for test --- .github/workflows/build_and_push_docker_image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_push_docker_image.yml b/.github/workflows/build_and_push_docker_image.yml index bf1e8b51..49374037 100644 --- a/.github/workflows/build_and_push_docker_image.yml +++ b/.github/workflows/build_and_push_docker_image.yml @@ -21,7 +21,7 @@ jobs: - name: Set Image Tags id: tags run: | - echo "IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/$(echo ${{ github.repository }} | awk -F/ '{print $2}')" >> $GITHUB_ENV + echo "IMAGE_NAME=ghcr.io/vivoblueos/$(echo ${{ github.repository }} | awk -F/ '{print $2}')" >> $GITHUB_ENV echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV echo "tags=latest,$SHORT_SHA" >> $GITHUB_ENV echo "Tags set: ${{ env.tags }}" From 8e269dc1cadbe887e6464ff71b715ccbbb514943 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 10:53:38 +0800 Subject: [PATCH 05/23] use ubuntu 2404 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d29f312a..7dfc24d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 # Set environment variables ENV DEBIAN_FRONTEND=noninteractive From 306d4769e11d718659335dfac6e873f81a893e88 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 11:02:34 +0800 Subject: [PATCH 06/23] delelte bash at end of dockerfile --- Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7dfc24d1..6e96576f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,7 +44,4 @@ RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/r RUN cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 # Set working directory -WORKDIR /workspace - -# Default command -CMD ["/bin/bash"] \ No newline at end of file +WORKDIR /blueos-dev \ No newline at end of file From 551052aa7b3b01c4bdf7bdad0c3dea4316d9823e Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 11:40:38 +0800 Subject: [PATCH 07/23] Add repo binary --- .../workflows/build_and_push_docker_image.yml | 5 +-- .../workflows/reusable_cross_repos_build.yml | 39 +------------------ Dockerfile | 4 ++ 3 files changed, 6 insertions(+), 42 deletions(-) diff --git a/.github/workflows/build_and_push_docker_image.yml b/.github/workflows/build_and_push_docker_image.yml index 49374037..d1134d37 100644 --- a/.github/workflows/build_and_push_docker_image.yml +++ b/.github/workflows/build_and_push_docker_image.yml @@ -8,9 +8,6 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - name: Login to GHCR uses: docker/login-action@v3 with: @@ -21,7 +18,7 @@ jobs: - name: Set Image Tags id: tags run: | - echo "IMAGE_NAME=ghcr.io/vivoblueos/$(echo ${{ github.repository }} | awk -F/ '{print $2}')" >> $GITHUB_ENV + echo "IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/$(echo ${{ github.repository }} | awk -F/ '{print $2}')" >> $GITHUB_ENV echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV echo "tags=latest,$SHORT_SHA" >> $GITHUB_ENV echo "Tags set: ${{ env.tags }}" diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 1fd853e9..30318643 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -5,7 +5,7 @@ on: jobs: build_and_test: - runs-on: ubuntu-latest + runs-on: ghcr.io/${{ github.repository_owner }}/$(echo ${{ github.repository }}:latest permissions: contents: read checks: write @@ -83,11 +83,6 @@ jobs: })); core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); - - name: Download prebuilt Android repo - run: | - curl -L -o sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo - chmod a+x sysroot/usr/local/bin/repo - - name: Init repo and sync run: | repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml @@ -117,38 +112,6 @@ jobs: run: | repo sync -j$(nproc) - - name: Install packages - run: | - sudo apt-get update - sudo apt-get install -y clang python3-kconfiglib ninja-build generate-ninja curl libfdt-dev libslirp-dev libglib2.0-dev - - - name: Install Arm GNU toolchain - uses: carlosperate/arm-none-eabi-gcc-action@v1 - with: - release: 14.2.Rel1 - - - name: Install Arm64 GNU toolchain - uses: lawkai-vivo/aarch64-none-elf-gcc-action@v1 - with: - release: 14.2.Rel1 - - - name: Download and unpack prebuilt QEMU - run: | - curl -L -o qemu.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/qemu-2025_08_05_12_17.tar.xz - tar xvf qemu.tar.xz -C sysroot - rm -rvf qemu.tar.xz - - - name: Download and unpack prebuilt Rust toolchain - run: | - curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/blueos-toolchain-ubuntu-latest-2025_09_16_08_50.tar.xz - tar xvf blueos-toolchain.tar.xz -C sysroot - rm -rvf blueos-toolchain.tar.xz - echo "$PWD/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin" >> "$GITHUB_PATH" - - - name: Install bindgen and cbindgen - run: | - cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 - # FIXME: We should use action's builtin matrix. - name: Build and test the kernel run: | diff --git a/Dockerfile b/Dockerfile index 6e96576f..139927eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,5 +43,9 @@ RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/r # Install bindgen and cbindgen RUN cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 +# Install repo +RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo && \ + chmod a+x /opt/sysroot/usr/local/bin/repo + # Set working directory WORKDIR /blueos-dev \ No newline at end of file From 73d5d3777a74f166ca578a92776dfd11cec00054 Mon Sep 17 00:00:00 2001 From: Han-Jiang277 Date: Wed, 15 Oct 2025 15:28:47 +0800 Subject: [PATCH 08/23] fix build workflow path --- .github/workflows/build_and_push_docker_image.yml | 3 +++ .github/workflows/cross_repos_build.yml | 2 +- .github/workflows/reusable_cross_repos_build.yml | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_push_docker_image.yml b/.github/workflows/build_and_push_docker_image.yml index d1134d37..bf1e8b51 100644 --- a/.github/workflows/build_and_push_docker_image.yml +++ b/.github/workflows/build_and_push_docker_image.yml @@ -8,6 +8,9 @@ jobs: runs-on: ubuntu-latest steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Login to GHCR uses: docker/login-action@v3 with: diff --git a/.github/workflows/cross_repos_build.yml b/.github/workflows/cross_repos_build.yml index d534e24d..0f20d194 100644 --- a/.github/workflows/cross_repos_build.yml +++ b/.github/workflows/cross_repos_build.yml @@ -6,4 +6,4 @@ on: jobs: call_workflow: - uses: vivoblueos/kernel/.github/workflows/reusable_cross_repos_build.yml@main + uses: ./.github/workflows/reusable_cross_repos_build.yml diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 30318643..dfef4ddf 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -5,7 +5,7 @@ on: jobs: build_and_test: - runs-on: ghcr.io/${{ github.repository_owner }}/$(echo ${{ github.repository }}:latest + runs-on: ghcr.io/${{ github.repository_owner }}/kernel:latest permissions: contents: read checks: write From cb3ef139b94ad8c01677cab1f038470bfff56bce Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Wed, 15 Oct 2025 18:29:54 +0800 Subject: [PATCH 09/23] fix runs-on --- .github/workflows/reusable_cross_repos_build.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index dfef4ddf..06bc4c76 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -5,12 +5,16 @@ on: jobs: build_and_test: - runs-on: ghcr.io/${{ github.repository_owner }}/kernel:latest + runs-on: ubuntu-latest + permissions: contents: read checks: write pull-requests: write + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} steps: - name: Notify job started From 7fc1874bf23986f44888aaf8a561d5b480f8c7c8 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Wed, 15 Oct 2025 18:34:00 +0800 Subject: [PATCH 10/23] add git --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 139927eb..82c6df48 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-u # Install system packages RUN apt-get update && \ apt-get install -y \ + git \ clang \ python3-kconfiglib \ ninja-build \ From 58bbdd7350af206e3ba5d5176ac33f2e874b1f76 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Wed, 15 Oct 2025 21:33:42 +0800 Subject: [PATCH 11/23] use bash --- Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 82c6df48..27adb039 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,4 +49,6 @@ RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/gi chmod a+x /opt/sysroot/usr/local/bin/repo # Set working directory -WORKDIR /blueos-dev \ No newline at end of file +WORKDIR /blueos-dev + +CMD ["/bin/bash"] \ No newline at end of file From 9eaaee244422bfbcdb80764413afd10d13948600 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Wed, 15 Oct 2025 21:54:32 +0800 Subject: [PATCH 12/23] use sh in docker --- .github/workflows/reusable_cross_repos_build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 06bc4c76..a075fc2c 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -99,16 +99,16 @@ jobs: for patch in patches/*.diff; do patch_abspath=$(realpath ${patch}) repo=$(basename ${patch} ".diff") - if [[ ${repo} != "manifests" ]];then - pushd ${repo} + if [ "${repo}" != "manifests" ]; then + cd ${repo} else - pushd .repo/${repo} + cd .repo/${repo} fi git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} if [[ $? != 0 ]]; then exit 1 fi - popd + cd - > /dev/null done - name: Re-sync repos From 12d75d2e58f5a747bd72d26c2c43adbdd3b6ce3f Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Wed, 15 Oct 2025 22:02:00 +0800 Subject: [PATCH 13/23] Add cargo bin PATH --- .github/workflows/cross_repos_build.yml | 2 +- Dockerfile | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cross_repos_build.yml b/.github/workflows/cross_repos_build.yml index 0f20d194..a3f5d4af 100644 --- a/.github/workflows/cross_repos_build.yml +++ b/.github/workflows/cross_repos_build.yml @@ -6,4 +6,4 @@ on: jobs: call_workflow: - uses: ./.github/workflows/reusable_cross_repos_build.yml + uses: vivoblueos/kernel/.github/workflows/reusable_cross_repos_build.yml diff --git a/Dockerfile b/Dockerfile index 27adb039..30a97c31 100644 --- a/Dockerfile +++ b/Dockerfile @@ -41,8 +41,8 @@ RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/r tar xf blueos-toolchain.tar.xz -C /opt/sysroot && \ rm blueos-toolchain.tar.xz -# Install bindgen and cbindgen -RUN cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 +# Install bindgen and cbindgen to /opt/sysroot/usr/local/bin +RUN CARGO_INSTALL_ROOT=/opt/sysroot/usr/local cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 # Install repo RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo && \ From d471c35793e3052dfefc3f47edd0bc0210353313 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Thu, 16 Oct 2025 09:17:19 +0800 Subject: [PATCH 14/23] cant free space in docker --- .github/workflows/reusable_cross_repos_build.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index a075fc2c..b19871e3 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -26,18 +26,7 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` - }) - - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - with: - tool-cache: false - android: true - dotnet: true - haskell: true - large-packages: true - docker-images: true - swap-storage: true + }) - name: Prepare directories run: | From e5ca56bd09a2049ee1d67dc8cc5576174b19cdfa Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Thu, 16 Oct 2025 09:27:57 +0800 Subject: [PATCH 15/23] Add lld --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 30a97c31..59a5699c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-u RUN apt-get update && \ apt-get install -y \ git \ - clang \ + clang lld \ python3-kconfiglib \ ninja-build \ generate-ninja \ From d1d9350762dd2f6de5344ff9b7791ed15c9af439 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Thu, 16 Oct 2025 10:50:58 +0800 Subject: [PATCH 16/23] run as multi jobs --- .../workflows/reusable_cross_repos_build.yml | 568 +++++++++++++++++- Dockerfile | 14 +- 2 files changed, 560 insertions(+), 22 deletions(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index b19871e3..01400233 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -4,17 +4,13 @@ on: workflow_call: jobs: - build_and_test: + prepare: runs-on: ubuntu-latest - permissions: contents: read checks: write pull-requests: write - container: - image: ghcr.io/${{ github.repository_owner }}/kernel:latest - if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} steps: - name: Notify job started @@ -26,14 +22,10 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` - }) + }) - - name: Prepare directories + - name: Create patches directory run: | - # sysroot/ contains toolchain. - mkdir -p sysroot/usr/local/bin - echo "$PWD/sysroot/usr/local/bin" >> "$GITHUB_PATH" - # patches/ contains patches downloaded. mkdir -p patches - name: Download patches @@ -76,6 +68,25 @@ jobs: })); core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); + - name: Upload patches + uses: actions/upload-artifact@v4 + with: + name: patches + path: patches/ + + setup: + runs-on: ubuntu-latest + needs: prepare + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download patches + uses: actions/download-artifact@v4 + with: + name: patches + path: patches/ + - name: Init repo and sync run: | repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml @@ -105,20 +116,541 @@ jobs: run: | repo sync -j$(nproc) - # FIXME: We should use action's builtin matrix. - - name: Build and test the kernel - run: | - ./build/ci/run_ci.py + - name: Upload workspace + uses: actions/upload-artifact@v4 + with: + name: workspace + path: . + include-hidden-files: true + + check_format: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Check format + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + const fs = require('fs'); + const path = require('path'); + + // Get list of patch files + const patchesDir = 'patches'; + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + + if (patchFiles.length === 0) { + console.log('No patch files found for format check'); + return; + } + + let formatErrors = []; + let totalCheckedFiles = 0; + + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + let repoPath = repo; + + // Handle manifests repo special case + if (repo === 'manifests') { + continue; + } + + console.log(`\nChecking format in repo: ${repo} (${repoPath})`); + + // Check if repo directory exists + if (!fs.existsSync(repoPath)) { + console.log(`Repo directory ${repoPath} not found, skipping`); + continue; + } + + // Get modified files in this repo + let modifiedFiles = []; + try { + // Change to repo directory and run git status + const originalCwd = process.cwd(); + process.chdir(repoPath); + const gitStatus = execSync('git status --porcelain', { + encoding: 'utf-8' + }); + process.chdir(originalCwd); + + modifiedFiles = gitStatus + .split('\n') + .filter(line => line.trim()) + .map(line => line.substring(3).trim()) // Remove status prefix + .filter(file => file && file.length > 0); + } catch (error) { + console.log(`No git status available in ${repoPath}, skipping`); + continue; + } + + if (modifiedFiles.length === 0) { + console.log(`No modified files found in ${repo}`); + continue; + } + + console.log(`Found ${modifiedFiles.length} modified files in ${repo}:`); + modifiedFiles.forEach(file => console.log(` - ${file}`)); + + // Check format for each modified file + for (const file of modifiedFiles) { + const filePath = path.resolve(repoPath, file); + const ext = path.extname(file).toLowerCase(); + + // Skip if file doesn't exist + if (!fs.existsSync(filePath)) { + console.log(`File ${filePath} doesn't exist, skipping`); + continue; + } + + try { + if (ext === '.gn' || ext === '.gni') { + console.log(`Checking GN format for: ${repo}/${file}`); + const out = execSync(`gn format --dry-run "${filePath}"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`GN formatting issue in ${repo}/${file}:\n${out}`); + } + } else if (ext === '.py') { + console.log(`Checking Python format for: ${repo}/${file}`); + const out = execSync(`yapf3 -d "${filePath}"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Python formatting issue in ${repo}/${file}:\n${out}`); + } + } else if (ext === '.rs') { + console.log(`Checking Rust format for: ${repo}/${file}`); + const out = execSync(`rustfmt --edition=2021 --check --unstable-features --skip-children "${filePath}"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Rust formatting issue in ${repo}/${file}:\n${out}`); + } + } + totalCheckedFiles++; + } catch (error) { + // Check if it's a formatting error or other error + const output = (error.stdout || '') + (error.stderr || ''); + if (output.includes('format') || output.includes('Format')) { + formatErrors.push(`Formatting issue in ${repo}/${file}:\n${output}`); + } else { + console.log(`Skipping format check for ${repo}/${file}: ${error.message}`); + } + } + } + } + + console.log(`\nTotal files checked: ${totalCheckedFiles}`); + + if (formatErrors.length > 0) { + console.log('❌ Format check failed:'); + formatErrors.forEach(error => console.log(error)); + core.setFailed('Format check failed'); + process.exit(1); + } else { + console.log('✅ All modified files pass format check'); + } + + check_license: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Check license + uses: actions/github-script@v8 + with: + script: | + const fs = require('fs'); + const { execSync } = require('child_process'); + const patchesDir = 'patches'; + let failedRepos = []; + let checkedRepos = []; + + // Find all .diff files in patches/ + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + let repoPath = repo; + // Skip invalid repos for license check + if (repo.includes('manifests') || repo.includes('libc') || repo.includes('book') || repo.includes('external')) { + console.log(`Skipping ${repo} for license check.`); + continue; + } + + try { + // Run license-eye header check in each repo directory + const originalCwd = process.cwd(); + process.chdir(repoPath); + const result = execSync('license-eye header check', { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + process.chdir(originalCwd); + if (result.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${result}`); + failedRepos.push(repoPath); + } + } catch (e) { + // Always log output for diagnosis + const out = (e.stdout ? e.stdout.toString() : '') + (e.stderr ? e.stderr.toString() : ''); + if (out.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); + } else { + console.log(`Error running license-eye in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); + } + } + } + + if (failedRepos.length > 0) { + core.setFailed(`❌ License header issues found in these repos: ${failedRepos.join(', ')}`); + process.exit(1); + } else if (checkedRepos.length === 0) { + console.log('✅ No repos to check license for.'); + } else { + console.log('✅ All checked repos have valid license headers!'); + } + + build_host: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for host + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'host'; + const configs = [ + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } + } + + build_qemu_mps2_an385: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps2_an385 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps2_an385'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } + } + + build_qemu_mps3_an547: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps3_an547 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps3_an547'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } + } + + build_qemu_riscv64: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + - name: Build and test kernel for qemu_riscv64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_riscv64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } + } + + build_qemu_aarch64: + runs-on: ubuntu-latest + needs: setup + container: + image: ghcr.io/${{ github.repository_owner }}/kernel:latest + + steps: + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_virt64_aarch64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_virt64_aarch64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } + } + + notify: + runs-on: ubuntu-latest + needs: [prepare, setup, check_format, check_license, build_host, build_qemu_mps2_an385, build_qemu_mps3_an547, build_qemu_riscv64, build_qemu_aarch64] + if: always() + permissions: + contents: read + checks: write + pull-requests: write + + steps: - name: Notify job ended - if: always() uses: actions/github-script@v8 with: script: | - const sign = "${{ job.status }}" === 'success' ? '✅' : '❌'; + // Check if any job failed + const jobStatuses = { + prepare: "${{ needs.prepare.result }}", + setup: "${{ needs.setup.result }}", + check_format: "${{ needs.check_format.result }}", + check_license: "${{ needs.check_license.result }}", + build_host: "${{ needs.build_host.result }}", + build_qemu_mps2_an385: "${{ needs.build_qemu_mps2_an385.result }}", + build_qemu_mps3_an547: "${{ needs.build_qemu_mps3_an547.result }}", + build_qemu_riscv64: "${{ needs.build_qemu_riscv64.result }}", + build_qemu_aarch64: "${{ needs.build_qemu_aarch64.result }}" + }; + + // Debug: Log all job statuses + console.log('Job statuses:', JSON.stringify(jobStatuses, null, 2)); + + const failedJobs = Object.entries(jobStatuses) + .filter(([job, status]) => { + const isFailed = status === 'failure' || status === 'cancelled'; + if (isFailed) { + console.log(`Job ${job} failed with status: ${status}`); + } + return isFailed; + }) + .map(([job, status]) => `${job} (${status})`); + + const skippedJobs = Object.entries(jobStatuses) + .filter(([job, status]) => { + const isSkipped = status === 'skipped'; + if (isSkipped) { + console.log(`Job ${job} was skipped with status: ${status}`); + } + return isSkipped; + }) + .map(([job, status]) => `${job} (${status})`); + + console.log(`Failed jobs: ${failedJobs.length}, Skipped jobs: ${skippedJobs.length}`); + + let statusMessage; + let sign; + + if (failedJobs.length > 0) { + sign = '❌'; + statusMessage = `Job failed. Failed jobs: ${failedJobs.join(', ')}`; + if (skippedJobs.length > 0) { + statusMessage += `. Skipped jobs: ${skippedJobs.join(', ')}`; + } + } else if (skippedJobs.length > 0) { + sign = '⚠️'; + statusMessage = `Job completed with skipped jobs: ${skippedJobs.join(', ')}`; + } else { + sign = '✅'; + statusMessage = 'All jobs completed successfully'; + } + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Job ends with ${sign} ${{ job.status }}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` + body: `${sign} ${statusMessage}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` }) diff --git a/Dockerfile b/Dockerfile index 59a5699c..e8105271 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:24.04 # Set environment variables ENV DEBIAN_FRONTEND=noninteractive -ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:${PATH}" +ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:/opt/skywalking-license-eye-0.7.0-bin/bin/linux/:${PATH}" # Install system packages RUN apt-get update && \ @@ -18,6 +18,7 @@ RUN apt-get update && \ libglib2.0-dev \ build-essential \ pkg-config \ + clang-format yapf3 \ && rm -rf /var/lib/apt/lists/* # Install Arm GNU toolchain (ARM Cortex-M) @@ -41,13 +42,18 @@ RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/r tar xf blueos-toolchain.tar.xz -C /opt/sysroot && \ rm blueos-toolchain.tar.xz -# Install bindgen and cbindgen to /opt/sysroot/usr/local/bin -RUN CARGO_INSTALL_ROOT=/opt/sysroot/usr/local cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 - # Install repo RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo && \ chmod a+x /opt/sysroot/usr/local/bin/repo +# Install license-eye +RUN curl -L -o skywalking-license-eye.tgz https://github.com/apache/skywalking-eyes/releases/download/v0.7.0/skywalking-license-eye-0.7.0-bin.tgz && \ + tar xf skywalking-license-eye.tgz -C /opt && \ + rm skywalking-license-eye.tgz + +# Install bindgen and cbindgen to /opt/sysroot/usr/local/bin +RUN CARGO_INSTALL_ROOT=/opt/sysroot/usr/local cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 + # Set working directory WORKDIR /blueos-dev From 12dd0132cde1551065ad89f505001b95f4827dd8 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Thu, 16 Oct 2025 18:58:01 +0800 Subject: [PATCH 17/23] test yamlfmt --- .../workflows/reusable_cross_repos_build.yml | 955 +++++++++--------- .licenserc.yaml | 3 + Dockerfile | 7 +- 3 files changed, 475 insertions(+), 490 deletions(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 01400233..5ea65b8c 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -13,66 +13,66 @@ jobs: if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} steps: - - name: Notify job started - uses: actions/github-script@v8 - with: - script: | - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` - }) - - - name: Create patches directory - run: | - mkdir -p patches - - - name: Download patches - id: fetch_prs - uses: actions/github-script@v8 - with: - script: | - const fs = require('fs').promises; - const body = context.payload.comment.body; - const prRegex = /https:\/\/github\.com\/([^\/]+)\/([^\/]+)\/pull\/(\d+)/g; - let match; - let prs = []; - let patchUrls = []; - let fetched_prs = []; - while ((match = prRegex.exec(body)) !== null) { - let owner = match[1]; - let repo = match[2]; - let pr_num = match[3]; - prs.push({owner: owner, repo: repo, pr_num: pr_num}); - patchUrls.push({repo: repo, url: match[0] + ".diff"}); - } - // Get metadata of the PR. - await Promise.all(prs.map(async({ owner, repo, pr_num }) => { - const { data: pullRequest } = await github.rest.pulls.get({ - owner, - repo, - pull_number: pr_num, - }); - fetched_prs.push({owner: owner, repo: repo, pr_num: pr_num, sha: pullRequest.head.sha}); - })); - - // Download diffs. - await Promise.all(patchUrls.map(async ({ repo, url }) => { - console.log(`Downloading ${url}`); - const patchFilename = `patches/${repo}.diff`; - const response = await github.request({ - url: url, - }); - await fs.writeFile(patchFilename, response.data); - })); - core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); - - - name: Upload patches - uses: actions/upload-artifact@v4 - with: - name: patches - path: patches/ + - name: Notify job started + uses: actions/github-script@v8 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` + }) + + - name: Create patches directory + run: | + mkdir -p patches + + - name: Download patches + id: fetch_prs + uses: actions/github-script@v8 + with: + script: | + const fs = require('fs').promises; + const body = context.payload.comment.body; + const prRegex = /https:\/\/github\.com\/([^\/]+)\/([^\/]+)\/pull\/(\d+)/g; + let match; + let prs = []; + let patchUrls = []; + let fetched_prs = []; + while ((match = prRegex.exec(body)) !== null) { + let owner = match[1]; + let repo = match[2]; + let pr_num = match[3]; + prs.push({owner: owner, repo: repo, pr_num: pr_num}); + patchUrls.push({repo: repo, url: match[0] + ".diff"}); + } + // Get metadata of the PR. + await Promise.all(prs.map(async({ owner, repo, pr_num }) => { + const { data: pullRequest } = await github.rest.pulls.get({ + owner, + repo, + pull_number: pr_num, + }); + fetched_prs.push({owner: owner, repo: repo, pr_num: pr_num, sha: pullRequest.head.sha}); + })); + + // Download diffs. + await Promise.all(patchUrls.map(async ({ repo, url }) => { + console.log(`Downloading ${url}`); + const patchFilename = `patches/${repo}.diff`; + const response = await github.request({ + url: url, + }); + await fs.writeFile(patchFilename, response.data); + })); + core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); + + - name: Upload patches + uses: actions/upload-artifact@v4 + with: + name: patches + path: patches/ setup: runs-on: ubuntu-latest @@ -81,47 +81,42 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download patches - uses: actions/download-artifact@v4 - with: - name: patches - path: patches/ - - - name: Init repo and sync - run: | - repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml - repo sync -j$(nproc) - - - name: Apply patches - run: | - set -e - set -x - for patch in patches/*.diff; do - patch_abspath=$(realpath ${patch}) - repo=$(basename ${patch} ".diff") - if [ "${repo}" != "manifests" ]; then - cd ${repo} - else - cd .repo/${repo} - fi - git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} - if [[ $? != 0 ]]; then - exit 1 - fi - cd - > /dev/null - done - - - name: Re-sync repos - # In case manifests repo is changed. - run: | - repo sync -j$(nproc) - - - name: Upload workspace - uses: actions/upload-artifact@v4 - with: - name: workspace - path: . - include-hidden-files: true + - name: Download patches + uses: actions/download-artifact@v4 + with: + name: patches + path: patches/ + + - name: Init repo and sync + run: | + repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml + repo sync -j$(nproc) + + - name: Apply patches + run: | + set -e + set -x + for patch in patches/*.diff; do + patch_abspath=$(realpath ${patch}) + repo=$(basename ${patch} ".diff") + if [ "${repo}" != "manifests" ]; then + cd ${repo} + else + cd .repo/${repo} + fi + git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} + if [[ $? != 0 ]]; then + exit 1 + fi + cd - > /dev/null + done + + - name: Upload workspace + uses: actions/upload-artifact@v4 + with: + name: workspace + path: . + include-hidden-files: true check_format: runs-on: ubuntu-latest @@ -130,141 +125,129 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Check format + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + const fs = require('fs'); + const path = require('path'); + // Get list of patch files + const patchesDir = 'patches'; + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + if (patchFiles.length === 0) { + console.log('No patch files found for format check'); + return; + } + let formatErrors = []; + let totalCheckedFiles = 0; + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + let repoPath = repo; + // Handle manifests repo special case + if (repo === 'manifests') { + continue; + } + console.log(`\nChecking format in repo: ${repo} (${repoPath})`); + // Check if repo directory exists + if (!fs.existsSync(repoPath)) { + console.log(`Repo directory ${repoPath} not found, skipping`); + continue; + } + // Get modified files from patch file content + let modifiedFiles = []; + try { + const patchContent = fs.readFileSync(path.join(patchesDir, patchFile), 'utf-8'); + const patchLines = patchContent.split('\n'); + // Extract file paths from patch headers (+++ b/filename) + modifiedFiles = patchLines + .filter(line => line.startsWith('+++ b/')) + .map(line => line.substring(6)) // Remove '+++ b/' prefix + .filter(file => file && file.length > 0) + .filter(file => { + const ext = path.extname(file).toLowerCase(); + return ext === '.yml' || ext === '.gn' || ext === '.gni' || ext === '.py' || ext === '.rs'; + }); + console.log(`Found ${modifiedFiles.length} modified files in patch for ${repo}:`); + modifiedFiles.forEach(file => console.log(` - ${file}`)); + } catch (error) { + console.log(`Error reading patch file ${patchFile}: ${error.message}`); + continue; + } - - name: Check format - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - const fs = require('fs'); - const path = require('path'); - - // Get list of patch files - const patchesDir = 'patches'; - const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); - - if (patchFiles.length === 0) { - console.log('No patch files found for format check'); - return; + if (modifiedFiles.length === 0) { + console.log(`No relevant files found in patch for ${repo}`); + continue; } - - let formatErrors = []; - let totalCheckedFiles = 0; - - for (const patchFile of patchFiles) { - const repo = patchFile.replace(/\.diff$/, ''); - let repoPath = repo; - - // Handle manifests repo special case - if (repo === 'manifests') { - continue; - } - - console.log(`\nChecking format in repo: ${repo} (${repoPath})`); - - // Check if repo directory exists - if (!fs.existsSync(repoPath)) { - console.log(`Repo directory ${repoPath} not found, skipping`); + // Check format for each modified file + for (const file of modifiedFiles) { + const filePath = path.resolve(repoPath, file); + const ext = path.extname(file).toLowerCase(); + // Skip if file doesn't exist + if (!fs.existsSync(filePath)) { + console.log(`File ${filePath} doesn't exist, skipping`); continue; } - - // Get modified files in this repo - let modifiedFiles = []; try { - // Change to repo directory and run git status - const originalCwd = process.cwd(); - process.chdir(repoPath); - const gitStatus = execSync('git status --porcelain', { - encoding: 'utf-8' - }); - process.chdir(originalCwd); - - modifiedFiles = gitStatus - .split('\n') - .filter(line => line.trim()) - .map(line => line.substring(3).trim()) // Remove status prefix - .filter(file => file && file.length > 0); - } catch (error) { - console.log(`No git status available in ${repoPath}, skipping`); - continue; - } - - if (modifiedFiles.length === 0) { - console.log(`No modified files found in ${repo}`); - continue; - } - - console.log(`Found ${modifiedFiles.length} modified files in ${repo}:`); - modifiedFiles.forEach(file => console.log(` - ${file}`)); - - // Check format for each modified file - for (const file of modifiedFiles) { - const filePath = path.resolve(repoPath, file); - const ext = path.extname(file).toLowerCase(); - - // Skip if file doesn't exist - if (!fs.existsSync(filePath)) { - console.log(`File ${filePath} doesn't exist, skipping`); - continue; - } - - try { - if (ext === '.gn' || ext === '.gni') { - console.log(`Checking GN format for: ${repo}/${file}`); - const out = execSync(`gn format --dry-run "${filePath}"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - if (out.trim()) { - formatErrors.push(`GN formatting issue in ${repo}/${file}:\n${out}`); - } - } else if (ext === '.py') { - console.log(`Checking Python format for: ${repo}/${file}`); - const out = execSync(`yapf3 -d "${filePath}"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - if (out.trim()) { - formatErrors.push(`Python formatting issue in ${repo}/${file}:\n${out}`); - } - } else if (ext === '.rs') { - console.log(`Checking Rust format for: ${repo}/${file}`); - const out = execSync(`rustfmt --edition=2021 --check --unstable-features --skip-children "${filePath}"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - if (out.trim()) { - formatErrors.push(`Rust formatting issue in ${repo}/${file}:\n${out}`); - } + if (ext === '.gn' || ext === '.gni') { + console.log(`Checking GN format for: ${filePath}`); + const out = execSync(`gn format --dry-run \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`GN formatting issue in ${filePath}:\n${out}`); + } + } else if (ext === '.py') { + console.log(`Checking Python format for: ${filePath}`); + const out = execSync(`yapf3 -d \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Python formatting issue in ${filePath}:\n${out}`); } - totalCheckedFiles++; - } catch (error) { - // Check if it's a formatting error or other error - const output = (error.stdout || '') + (error.stderr || ''); - if (output.includes('format') || output.includes('Format')) { - formatErrors.push(`Formatting issue in ${repo}/${file}:\n${output}`); - } else { - console.log(`Skipping format check for ${repo}/${file}: ${error.message}`); + } else if (ext === '.rs') { + console.log(`Checking Rust format for: ${filePath}`); + const out = execSync(`rustfmt --edition=2021 --check --unstable-features --skip-children \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Rust formatting issue in ${filePath}:\n${out}`); } + } else if (ext === '.yml') { + // ymalfmt lint is not work for github actions + continue; + } + totalCheckedFiles++; + } catch (error) { + // Check if it's a formatting error or other error + const output = (error.stdout || '') + (error.stderr || ''); + if (output.includes('format') || output.includes('Format')) { + formatErrors.push(`Formatting issue in ${repo}/${file}:\n${output}`); + } else { + console.log(`Skipping format check for ${repo}/${file}: ${error.message}`); } } } - - console.log(`\nTotal files checked: ${totalCheckedFiles}`); - - if (formatErrors.length > 0) { - console.log('❌ Format check failed:'); - formatErrors.forEach(error => console.log(error)); - core.setFailed('Format check failed'); - process.exit(1); - } else { - console.log('✅ All modified files pass format check'); - } + } + + console.log(`\nTotal files checked: ${totalCheckedFiles}`); + if (formatErrors.length > 0) { + console.log('❌ Format check failed:'); + formatErrors.forEach(error => console.log(error)); + core.setFailed('Format check failed'); + process.exit(1); + } else { + console.log('✅ All modified files pass format check'); + } check_license: runs-on: ubuntu-latest @@ -273,68 +256,68 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Check license + uses: actions/github-script@v8 + with: + script: | + const fs = require('fs'); + const { execSync } = require('child_process'); + const patchesDir = 'patches'; + let failedRepos = []; + let checkedRepos = []; + + // Find all .diff files in patches/ + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + let repoPath = repo; + // Skip invalid repos for license check + if (repo.includes('manifests') || repo.includes('libc') || repo.includes('book') || repo.includes('external')) { + console.log(`Skipping ${repo} for license check.`); + continue; + } - - name: Check license - uses: actions/github-script@v8 - with: - script: | - const fs = require('fs'); - const { execSync } = require('child_process'); - const patchesDir = 'patches'; - let failedRepos = []; - let checkedRepos = []; - - // Find all .diff files in patches/ - const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); - - for (const patchFile of patchFiles) { - const repo = patchFile.replace(/\.diff$/, ''); - let repoPath = repo; - // Skip invalid repos for license check - if (repo.includes('manifests') || repo.includes('libc') || repo.includes('book') || repo.includes('external')) { - console.log(`Skipping ${repo} for license check.`); - continue; + try { + // Run license-eye header check in each repo directory + const originalCwd = process.cwd(); + process.chdir(repoPath); + const result = execSync('license-eye header check', { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + process.chdir(originalCwd); + if (result.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${result}`); + failedRepos.push(repoPath); } - - try { - // Run license-eye header check in each repo directory - const originalCwd = process.cwd(); - process.chdir(repoPath); - const result = execSync('license-eye header check', { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - process.chdir(originalCwd); - if (result.includes('ERROR one or more files does not have a valid license header')) { - console.log(`License issue in ${repoPath}:\n${result}`); - failedRepos.push(repoPath); - } - } catch (e) { - // Always log output for diagnosis - const out = (e.stdout ? e.stdout.toString() : '') + (e.stderr ? e.stderr.toString() : ''); - if (out.includes('ERROR one or more files does not have a valid license header')) { - console.log(`License issue in ${repoPath}:\n${out}`); - failedRepos.push(repoPath); - } else { - console.log(`Error running license-eye in ${repoPath}:\n${out}`); - failedRepos.push(repoPath); - } + } catch (e) { + // Always log output for diagnosis + const out = (e.stdout ? e.stdout.toString() : '') + (e.stderr ? e.stderr.toString() : ''); + if (out.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); + } else { + console.log(`Error running license-eye in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); } } + } - if (failedRepos.length > 0) { - core.setFailed(`❌ License header issues found in these repos: ${failedRepos.join(', ')}`); - process.exit(1); - } else if (checkedRepos.length === 0) { - console.log('✅ No repos to check license for.'); - } else { - console.log('✅ All checked repos have valid license headers!'); - } + if (failedRepos.length > 0) { + core.setFailed(`❌ License header issues found in these repos: ${failedRepos.join(', ')}`); + process.exit(1); + } else if (checkedRepos.length === 0) { + console.log('✅ No repos to check license for.'); + } else { + console.log('✅ All checked repos have valid license headers!'); + } build_host: runs-on: ubuntu-latest @@ -343,45 +326,45 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for host - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'host'; - const configs = [ - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for host + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'host'; + const configs = [ + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); } + } build_qemu_mps2_an385: runs-on: ubuntu-latest @@ -390,47 +373,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_mps2_an385 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_mps2_an385'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps2_an385 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps2_an385'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); } + } build_qemu_mps3_an547: runs-on: ubuntu-latest @@ -439,47 +422,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_mps3_an547 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_mps3_an547'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps3_an547 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps3_an547'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); } + } build_qemu_riscv64: runs-on: ubuntu-latest @@ -488,47 +471,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_riscv64 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_riscv64'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_riscv64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_riscv64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); } + } build_qemu_aarch64: runs-on: ubuntu-latest @@ -537,47 +520,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_virt64_aarch64 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_virt64_aarch64'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_virt64_aarch64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_virt64_aarch64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); } + } notify: runs-on: ubuntu-latest @@ -605,10 +588,8 @@ jobs: build_qemu_riscv64: "${{ needs.build_qemu_riscv64.result }}", build_qemu_aarch64: "${{ needs.build_qemu_aarch64.result }}" }; - // Debug: Log all job statuses console.log('Job statuses:', JSON.stringify(jobStatuses, null, 2)); - const failedJobs = Object.entries(jobStatuses) .filter(([job, status]) => { const isFailed = status === 'failure' || status === 'cancelled'; @@ -618,7 +599,6 @@ jobs: return isFailed; }) .map(([job, status]) => `${job} (${status})`); - const skippedJobs = Object.entries(jobStatuses) .filter(([job, status]) => { const isSkipped = status === 'skipped'; @@ -628,12 +608,9 @@ jobs: return isSkipped; }) .map(([job, status]) => `${job} (${status})`); - console.log(`Failed jobs: ${failedJobs.length}, Skipped jobs: ${skippedJobs.length}`); - let statusMessage; let sign; - if (failedJobs.length > 0) { sign = '❌'; statusMessage = `Job failed. Failed jobs: ${failedJobs.join(', ')}`; @@ -647,10 +624,10 @@ jobs: sign = '✅'; statusMessage = 'All jobs completed successfully'; } - + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `${sign} ${statusMessage}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` + body: `${sign} ${statusMessage}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` }) diff --git a/.licenserc.yaml b/.licenserc.yaml index ecffd732..1fea97cb 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -72,6 +72,9 @@ header: # `header` section is configurations for source codes license header. - ".licenserc.yaml" - "**/*.lds" - "**/lib64" + - ".github/**" + - "adapter/cmsis/cmsis_header/**" + - "Dockerfile" comment: on-failure # on what condition license-eye will comment on the pull request, `on-failure`, `always`, `never`. diff --git a/Dockerfile b/Dockerfile index e8105271..fd3dfec9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:24.04 # Set environment variables ENV DEBIAN_FRONTEND=noninteractive -ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:/opt/skywalking-license-eye-0.7.0-bin/bin/linux/:${PATH}" +ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:/opt/skywalking-license-eye-0.7.0-bin/bin/linux/:/opt/yamlfmt_0.18.0_Linux_x86_64/:${PATH}" # Install system packages RUN apt-get update && \ @@ -51,6 +51,11 @@ RUN curl -L -o skywalking-license-eye.tgz https://github.com/apache/skywalking-e tar xf skywalking-license-eye.tgz -C /opt && \ rm skywalking-license-eye.tgz +# Install yamlfmt +RUN curl -L -o yamlfmt.tar.gz https://github.com/google/yamlfmt/releases/download/v0.18.0/yamlfmt_0.18.0_Linux_x86_64.tar.gz && \ + tar xf yamlfmt.tar.gz -C /opt && \ + rm yamlfmt.tar.gz + # Install bindgen and cbindgen to /opt/sysroot/usr/local/bin RUN CARGO_INSTALL_ROOT=/opt/sysroot/usr/local cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 From 26511a0a4304006899c979b87004592046764368 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 10:02:42 +0800 Subject: [PATCH 18/23] add ymal format --- .../workflows/build_and_push_docker_image.yml | 2 +- .github/workflows/cross_repos_build.yml | 2 +- .../workflows/reusable_cross_repos_build.yml | 940 +++++++++--------- .yamlfmt | 5 + Dockerfile | 51 +- 5 files changed, 502 insertions(+), 498 deletions(-) create mode 100644 .yamlfmt diff --git a/.github/workflows/build_and_push_docker_image.yml b/.github/workflows/build_and_push_docker_image.yml index bf1e8b51..8900dc95 100644 --- a/.github/workflows/build_and_push_docker_image.yml +++ b/.github/workflows/build_and_push_docker_image.yml @@ -33,4 +33,4 @@ jobs: push: true tags: ${{ env.IMAGE_NAME }}:${{ env.tags }} labels: | - org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} \ No newline at end of file + org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} diff --git a/.github/workflows/cross_repos_build.yml b/.github/workflows/cross_repos_build.yml index 0f20d194..d534e24d 100644 --- a/.github/workflows/cross_repos_build.yml +++ b/.github/workflows/cross_repos_build.yml @@ -6,4 +6,4 @@ on: jobs: call_workflow: - uses: ./.github/workflows/reusable_cross_repos_build.yml + uses: vivoblueos/kernel/.github/workflows/reusable_cross_repos_build.yml@main diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 5ea65b8c..7650cb3a 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -4,7 +4,7 @@ on: workflow_call: jobs: - prepare: + setup: runs-on: ubuntu-latest permissions: contents: read @@ -12,111 +12,95 @@ jobs: pull-requests: write if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} - steps: - - name: Notify job started - uses: actions/github-script@v8 - with: - script: | - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` - }) - - - name: Create patches directory - run: | - mkdir -p patches - - - name: Download patches - id: fetch_prs - uses: actions/github-script@v8 - with: - script: | - const fs = require('fs').promises; - const body = context.payload.comment.body; - const prRegex = /https:\/\/github\.com\/([^\/]+)\/([^\/]+)\/pull\/(\d+)/g; - let match; - let prs = []; - let patchUrls = []; - let fetched_prs = []; - while ((match = prRegex.exec(body)) !== null) { - let owner = match[1]; - let repo = match[2]; - let pr_num = match[3]; - prs.push({owner: owner, repo: repo, pr_num: pr_num}); - patchUrls.push({repo: repo, url: match[0] + ".diff"}); - } - // Get metadata of the PR. - await Promise.all(prs.map(async({ owner, repo, pr_num }) => { - const { data: pullRequest } = await github.rest.pulls.get({ - owner, - repo, - pull_number: pr_num, - }); - fetched_prs.push({owner: owner, repo: repo, pr_num: pr_num, sha: pullRequest.head.sha}); - })); - - // Download diffs. - await Promise.all(patchUrls.map(async ({ repo, url }) => { - console.log(`Downloading ${url}`); - const patchFilename = `patches/${repo}.diff`; - const response = await github.request({ - url: url, - }); - await fs.writeFile(patchFilename, response.data); - })); - core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); - - - name: Upload patches - uses: actions/upload-artifact@v4 - with: - name: patches - path: patches/ - - setup: - runs-on: ubuntu-latest - needs: prepare container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download patches - uses: actions/download-artifact@v4 - with: - name: patches - path: patches/ - - - name: Init repo and sync - run: | - repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml - repo sync -j$(nproc) - - - name: Apply patches - run: | - set -e - set -x - for patch in patches/*.diff; do - patch_abspath=$(realpath ${patch}) - repo=$(basename ${patch} ".diff") - if [ "${repo}" != "manifests" ]; then - cd ${repo} - else - cd .repo/${repo} - fi - git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} - if [[ $? != 0 ]]; then - exit 1 - fi - cd - > /dev/null - done - - - name: Upload workspace - uses: actions/upload-artifact@v4 - with: - name: workspace - path: . - include-hidden-files: true + - name: Notify job started + uses: actions/github-script@v8 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Job is started, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` + }) + + - name: Create patches directory + run: | + mkdir -p patches + + - name: Download patches + id: fetch_prs + uses: actions/github-script@v8 + with: + script: | + const fs = require('fs').promises; + const body = context.payload.comment.body; + const prRegex = /https:\/\/github\.com\/([^\/]+)\/([^\/]+)\/pull\/(\d+)/g; + let match; + let prs = []; + let patchUrls = []; + let fetched_prs = []; + while ((match = prRegex.exec(body)) !== null) { + let owner = match[1]; + let repo = match[2]; + let pr_num = match[3]; + prs.push({owner: owner, repo: repo, pr_num: pr_num}); + patchUrls.push({repo: repo, url: match[0] + ".diff"}); + } + // Get metadata of the PR. + await Promise.all(prs.map(async({ owner, repo, pr_num }) => { + const { data: pullRequest } = await github.rest.pulls.get({ + owner, + repo, + pull_number: pr_num, + }); + fetched_prs.push({owner: owner, repo: repo, pr_num: pr_num, sha: pullRequest.head.sha}); + })); + + // Download diffs. + await Promise.all(patchUrls.map(async ({ repo, url }) => { + console.log(`Downloading ${url}`); + const patchFilename = `patches/${repo}.diff`; + const response = await github.request({ + url: url, + }); + await fs.writeFile(patchFilename, response.data); + })); + core.setOutput('fetched_prs', JSON.stringify(fetched_prs)); + + - name: Init repo and sync + run: | + repo init --depth=1 -u https://github.com/vivoblueos/manifests.git -b main -m manifest.xml + repo sync -j$(nproc) + + - name: Apply patches + run: | + set -e + set -x + for patch in patches/*.diff; do + patch_abspath=$(realpath ${patch}) + repo=$(basename ${patch} ".diff") + if [ "${repo}" != "manifests" ]; then + cd ${repo} + else + cd .repo/${repo} + fi + git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} + if [[ $? != 0 ]]; then + exit 1 + fi + cd - > /dev/null + done + + - name: Upload workspace + uses: actions/upload-artifact@v4 + with: + name: workspace + path: . + include-hidden-files: true check_format: runs-on: ubuntu-latest @@ -125,129 +109,136 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Check format - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - const fs = require('fs'); - const path = require('path'); - // Get list of patch files - const patchesDir = 'patches'; - const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); - if (patchFiles.length === 0) { - console.log('No patch files found for format check'); - return; - } - let formatErrors = []; - let totalCheckedFiles = 0; - for (const patchFile of patchFiles) { - const repo = patchFile.replace(/\.diff$/, ''); - let repoPath = repo; - // Handle manifests repo special case - if (repo === 'manifests') { - continue; - } - console.log(`\nChecking format in repo: ${repo} (${repoPath})`); - // Check if repo directory exists - if (!fs.existsSync(repoPath)) { - console.log(`Repo directory ${repoPath} not found, skipping`); - continue; - } - // Get modified files from patch file content - let modifiedFiles = []; - try { - const patchContent = fs.readFileSync(path.join(patchesDir, patchFile), 'utf-8'); - const patchLines = patchContent.split('\n'); - // Extract file paths from patch headers (+++ b/filename) - modifiedFiles = patchLines - .filter(line => line.startsWith('+++ b/')) - .map(line => line.substring(6)) // Remove '+++ b/' prefix - .filter(file => file && file.length > 0) - .filter(file => { - const ext = path.extname(file).toLowerCase(); - return ext === '.yml' || ext === '.gn' || ext === '.gni' || ext === '.py' || ext === '.rs'; - }); - console.log(`Found ${modifiedFiles.length} modified files in patch for ${repo}:`); - modifiedFiles.forEach(file => console.log(` - ${file}`)); - } catch (error) { - console.log(`Error reading patch file ${patchFile}: ${error.message}`); - continue; - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . - if (modifiedFiles.length === 0) { - console.log(`No relevant files found in patch for ${repo}`); - continue; + - name: Check format + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + const fs = require('fs'); + const path = require('path'); + // Get list of patch files + const patchesDir = 'patches'; + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + if (patchFiles.length === 0) { + console.log('No patch files found for format check'); + return; } - // Check format for each modified file - for (const file of modifiedFiles) { - const filePath = path.resolve(repoPath, file); - const ext = path.extname(file).toLowerCase(); - // Skip if file doesn't exist - if (!fs.existsSync(filePath)) { - console.log(`File ${filePath} doesn't exist, skipping`); + let formatErrors = []; + let totalCheckedFiles = 0; + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + // Handle manifests repo special case + if (repo === 'manifests') { continue; } + let repoPath = repo; + console.log(`\nChecking format in repo: (${repoPath})`); + // Check if repo directory exists + if (!fs.existsSync(repoPath)) { + console.log(`Repo directory ${repoPath} not found, skipping`); + continue; + } + // Get modified files from patch file content + let modifiedFiles = []; try { - if (ext === '.gn' || ext === '.gni') { - console.log(`Checking GN format for: ${filePath}`); - const out = execSync(`gn format --dry-run \"${filePath}\"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] + const patchContent = fs.readFileSync(path.join(patchesDir, patchFile), 'utf-8'); + const patchLines = patchContent.split('\n'); + // Extract file paths from patch headers (+++ b/filename) + modifiedFiles = patchLines + .filter(line => line.startsWith('+++ b/')) + .map(line => line.substring(6)) // Remove '+++ b/' prefix + .filter(file => file && file.length > 0) + .filter(file => { + const ext = path.extname(file).toLowerCase(); + return ext === '.yml' || ext === '.yaml' || ext === '.gn' || ext === '.gni' || ext === '.py' || ext === '.rs'; }); - if (out.trim()) { - formatErrors.push(`GN formatting issue in ${filePath}:\n${out}`); - } - } else if (ext === '.py') { - console.log(`Checking Python format for: ${filePath}`); - const out = execSync(`yapf3 -d \"${filePath}\"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - if (out.trim()) { - formatErrors.push(`Python formatting issue in ${filePath}:\n${out}`); - } - } else if (ext === '.rs') { - console.log(`Checking Rust format for: ${filePath}`); - const out = execSync(`rustfmt --edition=2021 --check --unstable-features --skip-children \"${filePath}\"`, { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - if (out.trim()) { - formatErrors.push(`Rust formatting issue in ${filePath}:\n${out}`); - } - } else if (ext === '.yml') { - // ymalfmt lint is not work for github actions + console.log(`Found ${modifiedFiles.length} modified files in patch for ${repo}:`); + modifiedFiles.forEach(file => console.log(` - ${file}`)); + } catch (error) { + console.log(`Error reading patch file ${patchFile}: ${error.message}`); + continue; + } + + if (modifiedFiles.length === 0) { + console.log(`No relevant files found in patch for ${repo}`); + continue; + } + // Check format for each modified file + for (const file of modifiedFiles) { + const filePath = path.resolve(repoPath, file); + const ext = path.extname(file).toLowerCase(); + // Skip if file doesn't exist + if (!fs.existsSync(filePath)) { + console.log(`File ${filePath} doesn't exist, skipping`); continue; } - totalCheckedFiles++; - } catch (error) { - // Check if it's a formatting error or other error - const output = (error.stdout || '') + (error.stderr || ''); - if (output.includes('format') || output.includes('Format')) { - formatErrors.push(`Formatting issue in ${repo}/${file}:\n${output}`); - } else { - console.log(`Skipping format check for ${repo}/${file}: ${error.message}`); + console.log(`Checking format for: ${filePath}`); + try { + if (ext === '.gn' || ext === '.gni') { + console.log(`Checking GN format for: ${filePath}, ext: ${ext}`); + const out = execSync(`gn format --dry-run \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`GN formatting issue in ${filePath}:\n${out}`); + } + } else if (ext === '.py') { + console.log(`Checking Python format for: ${filePath}`); + const out = execSync(`yapf3 -d \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Python formatting issue in ${filePath}:\n${out}`); + } + } else if (ext === '.rs') { + console.log(`Checking Rust format for: ${filePath}`); + const out = execSync(`rustfmt --edition=2021 --check --unstable-features --skip-children \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`Rust formatting issue in ${filePath}:\n${out}`); + } + } else if (ext === '.yml' || ext === '.yaml') { + console.log(`Checking YAML format for: ${filePath}`); + const out = execSync(`yamlfmt -lint \"${filePath}\"`, { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + if (out.trim()) { + formatErrors.push(`YAML formatting issue in ${filePath}:\n${out}`); + } + } + totalCheckedFiles++; + } catch (error) { + // Check if it's a formatting error or other error + const output = (error.stdout || '') + (error.stderr || ''); + if (output.includes('format') || output.includes('Format')) { + formatErrors.push(`Formatting issue in ${repo}/${file}:\n${output}`); + } else { + console.log(`Skipping format check for ${repo}/${file}: ${error.message}`); + } } } } - } - - console.log(`\nTotal files checked: ${totalCheckedFiles}`); - if (formatErrors.length > 0) { - console.log('❌ Format check failed:'); - formatErrors.forEach(error => console.log(error)); - core.setFailed('Format check failed'); - process.exit(1); - } else { - console.log('✅ All modified files pass format check'); - } + + console.log(`\nTotal files checked: ${totalCheckedFiles}`); + if (formatErrors.length > 0) { + console.log('❌ Format check failed:'); + formatErrors.forEach(error => console.log(error)); + core.setFailed('Format check failed'); + process.exit(1); + } else { + console.log('✅ All modified files pass format check'); + } check_license: runs-on: ubuntu-latest @@ -256,68 +247,76 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Check license - uses: actions/github-script@v8 - with: - script: | - const fs = require('fs'); - const { execSync } = require('child_process'); - const patchesDir = 'patches'; - let failedRepos = []; - let checkedRepos = []; - - // Find all .diff files in patches/ - const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); - - for (const patchFile of patchFiles) { - const repo = patchFile.replace(/\.diff$/, ''); - let repoPath = repo; - // Skip invalid repos for license check - if (repo.includes('manifests') || repo.includes('libc') || repo.includes('book') || repo.includes('external')) { - console.log(`Skipping ${repo} for license check.`); - continue; - } + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . - try { - // Run license-eye header check in each repo directory - const originalCwd = process.cwd(); - process.chdir(repoPath); - const result = execSync('license-eye header check', { - encoding: 'utf-8', - stdio: ['inherit', 'pipe', 'pipe'] - }); - process.chdir(originalCwd); - if (result.includes('ERROR one or more files does not have a valid license header')) { - console.log(`License issue in ${repoPath}:\n${result}`); - failedRepos.push(repoPath); + - name: Check license + uses: actions/github-script@v8 + with: + script: | + const fs = require('fs'); + const { execSync } = require('child_process'); + const patchesDir = 'patches'; + let failedRepos = []; + let checkedRepos = []; + + // Find all .diff files in patches/ + const patchFiles = fs.readdirSync(patchesDir).filter(f => f.endsWith('.diff')); + + for (const patchFile of patchFiles) { + const repo = patchFile.replace(/\.diff$/, ''); + let repoPath = repo; + // Skip invalid repos for license check + if (repo.includes('manifests') || repo.includes('libc') || repo.includes('book') || repo.includes('external')) { + console.log(`Skipping ${repo} for license check.`); + continue; } - } catch (e) { - // Always log output for diagnosis - const out = (e.stdout ? e.stdout.toString() : '') + (e.stderr ? e.stderr.toString() : ''); - if (out.includes('ERROR one or more files does not have a valid license header')) { - console.log(`License issue in ${repoPath}:\n${out}`); - failedRepos.push(repoPath); - } else { - console.log(`Error running license-eye in ${repoPath}:\n${out}`); - failedRepos.push(repoPath); + + try { + // Run license-eye header check in each repo directory + const originalCwd = process.cwd(); + process.chdir(repoPath); + // Print the contents of the .licenserc.yaml file in the repo directory (if present) + const licenseRcFile = '.licenserc.yaml'; + if (fs.existsSync(licenseRcFile)) { + console.log(`--- .licenserc.yaml in ${repoPath} ---`); + } else { + console.log(`No .licenserc.yaml found in ${repoPath}`); + continue; + } + const result = execSync('license-eye header check', { + encoding: 'utf-8', + stdio: ['inherit', 'pipe', 'pipe'] + }); + process.chdir(originalCwd); + if (result.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${result}`); + failedRepos.push(repoPath); + } + } catch (e) { + // Always log output for diagnosis + const out = (e.stdout ? e.stdout.toString() : '') + (e.stderr ? e.stderr.toString() : ''); + if (out.includes('ERROR one or more files does not have a valid license header')) { + console.log(`License issue in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); + } else { + console.log(`Error running license-eye in ${repoPath}:\n${out}`); + failedRepos.push(repoPath); + } } } - } - if (failedRepos.length > 0) { - core.setFailed(`❌ License header issues found in these repos: ${failedRepos.join(', ')}`); - process.exit(1); - } else if (checkedRepos.length === 0) { - console.log('✅ No repos to check license for.'); - } else { - console.log('✅ All checked repos have valid license headers!'); - } + if (failedRepos.length > 0) { + core.setFailed(`❌ License header issues found in these repos: ${failedRepos.join(', ')}`); + process.exit(1); + } else if (checkedRepos.length === 0) { + console.log('✅ No repos to check license for.'); + } else { + console.log('✅ All checked repos have valid license headers!'); + } build_host: runs-on: ubuntu-latest @@ -326,45 +325,45 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for host - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'host'; - const configs = [ - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for host + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'none'; + const configs = [ + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `board="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } } - } build_qemu_mps2_an385: runs-on: ubuntu-latest @@ -373,47 +372,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_mps2_an385 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_mps2_an385'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps2_an385 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps2_an385'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `board="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } } - } build_qemu_mps3_an547: runs-on: ubuntu-latest @@ -422,47 +421,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_mps3_an547 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_mps3_an547'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_mps3_an547 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_mps3_an547'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `board="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } } - } build_qemu_riscv64: runs-on: ubuntu-latest @@ -471,47 +470,47 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_riscv64 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_riscv64'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_riscv64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_riscv64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `board="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } } - } build_qemu_aarch64: runs-on: ubuntu-latest @@ -520,51 +519,51 @@ jobs: image: ghcr.io/${{ github.repository_owner }}/kernel:latest steps: - - name: Download workspace - uses: actions/download-artifact@v4 - with: - name: workspace - path: . - - - name: Build and test kernel for qemu_virt64_aarch64 - uses: actions/github-script@v8 - with: - script: | - const { execSync } = require('child_process'); - - const board = 'qemu_virt64_aarch64'; - const configs = [ - { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, - { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, - { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, - { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } - ]; - - for (const config of configs) { - console.log(`\nBuilding ${board} ${config.name} version...`); - - const outDir = `out/${board}.${config.name}`; - const args = `bsp="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; - - try { - // Generate build files - console.log(`Running: gn gen ${outDir} --args='${args}'`); - execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); - - // Build and test - console.log(`Running: ninja -C ${outDir} check_all`); - execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); - - console.log(`✅ ${board} ${config.name} build completed successfully`); - } catch (error) { - console.error(`❌ ${board} ${config.name} build failed:`, error.message); - process.exit(1); + - name: Download workspace + uses: actions/download-artifact@v4 + with: + name: workspace + path: . + + - name: Build and test kernel for qemu_virt64_aarch64 + uses: actions/github-script@v8 + with: + script: | + const { execSync } = require('child_process'); + + const board = 'qemu_virt64_aarch64'; + const configs = [ + { name: 'debug.dsc', build_type: 'debug', direct_syscall_handler: 'true' }, + { name: 'debug', build_type: 'debug', direct_syscall_handler: 'false' }, + { name: 'release.dsc', build_type: 'release', direct_syscall_handler: 'true' }, + { name: 'release', build_type: 'release', direct_syscall_handler: 'false' } + ]; + + for (const config of configs) { + console.log(`\nBuilding ${board} ${config.name} version...`); + + const outDir = `out/${board}.${config.name}`; + const args = `board="${board}" build_type="${config.build_type}" direct_syscall_handler=${config.direct_syscall_handler}`; + + try { + // Generate build files + console.log(`Running: gn gen ${outDir} --args='${args}'`); + execSync(`gn gen ${outDir} --args='${args}'`, { stdio: 'inherit' }); + + // Build and test + console.log(`Running: ninja -C ${outDir} check_all`); + execSync(`ninja -C ${outDir} check_all`, { stdio: 'inherit' }); + + console.log(`✅ ${board} ${config.name} build completed successfully`); + } catch (error) { + console.error(`❌ ${board} ${config.name} build failed:`, error.message); + process.exit(1); + } } - } notify: runs-on: ubuntu-latest - needs: [prepare, setup, check_format, check_license, build_host, build_qemu_mps2_an385, build_qemu_mps3_an547, build_qemu_riscv64, build_qemu_aarch64] + needs: [setup, check_format, check_license, build_host, build_qemu_mps2_an385, build_qemu_mps3_an547, build_qemu_riscv64, build_qemu_aarch64] if: always() permissions: contents: read @@ -578,7 +577,6 @@ jobs: script: | // Check if any job failed const jobStatuses = { - prepare: "${{ needs.prepare.result }}", setup: "${{ needs.setup.result }}", check_format: "${{ needs.check_format.result }}", check_license: "${{ needs.check_license.result }}", diff --git a/.yamlfmt b/.yamlfmt new file mode 100644 index 00000000..81e59e16 --- /dev/null +++ b/.yamlfmt @@ -0,0 +1,5 @@ +formatter: + type: basic + retain_line_breaks: true + retain_line_breaks_single: true + pad_line_comments: 2 diff --git a/Dockerfile b/Dockerfile index fd3dfec9..3c672799 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,11 +2,11 @@ FROM ubuntu:24.04 # Set environment variables ENV DEBIAN_FRONTEND=noninteractive -ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:/opt/skywalking-license-eye-0.7.0-bin/bin/linux/:/opt/yamlfmt_0.18.0_Linux_x86_64/:${PATH}" +ENV PATH="/opt/sysroot/usr/local/bin:/opt/sysroot/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/bin:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi/bin/:/opt/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf/bin/:/opt/skywalking-license-eye-0.7.0-bin/bin/linux/:${PATH}" # Install system packages -RUN apt-get update && \ - apt-get install -y \ +RUN apt-get update \ + && apt-get install -y \ git \ clang lld \ python3-kconfiglib \ @@ -19,42 +19,43 @@ RUN apt-get update && \ build-essential \ pkg-config \ clang-format yapf3 \ - && rm -rf /var/lib/apt/lists/* + && rm -rf /var/lib/apt/lists/* # Install Arm GNU toolchain (ARM Cortex-M) -RUN curl -L -o arm-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi.tar.xz && \ - tar xf arm-toolchain.tar.xz -C /opt && \ - rm arm-toolchain.tar.xz +RUN curl -L -o arm-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-arm-none-eabi.tar.xz \ + && tar xf arm-toolchain.tar.xz -C /opt \ + && rm arm-toolchain.tar.xz # Install Arm64 GNU toolchain (AArch64) -RUN curl -L -o aarch64-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf.tar.xz && \ - tar xf aarch64-toolchain.tar.xz -C /opt && \ - rm aarch64-toolchain.tar.xz +RUN curl -L -o aarch64-toolchain.tar.xz https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-x86_64-aarch64-none-elf.tar.xz \ + && tar xf aarch64-toolchain.tar.xz -C /opt \ + && rm aarch64-toolchain.tar.xz # Download and unpack prebuilt QEMU -RUN mkdir -p /opt/sysroot && \ - curl -L -o qemu.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/qemu-2025_08_05_12_17.tar.xz && \ - tar xf qemu.tar.xz -C /opt/sysroot && \ - rm qemu.tar.xz +RUN mkdir -p /opt/sysroot \ + && curl -L -o qemu.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/qemu-2025_08_05_12_17.tar.xz \ + && tar xf qemu.tar.xz -C /opt/sysroot \ + && rm qemu.tar.xz # Download and unpack prebuilt Rust toolchain -RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/blueos-toolchain-ubuntu-latest-2025_09_16_08_50.tar.xz && \ - tar xf blueos-toolchain.tar.xz -C /opt/sysroot && \ - rm blueos-toolchain.tar.xz +RUN curl -L -o blueos-toolchain.tar.xz https://github.com/vivoblueos/toolchain/releases/download/v0.8.0/blueos-toolchain-ubuntu-latest-2025_09_16_08_50.tar.xz \ + && tar xf blueos-toolchain.tar.xz -C /opt/sysroot \ + && rm blueos-toolchain.tar.xz # Install repo -RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo && \ - chmod a+x /opt/sysroot/usr/local/bin/repo +RUN curl -L -o /opt/sysroot/usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo \ + && chmod a+x /opt/sysroot/usr/local/bin/repo # Install license-eye -RUN curl -L -o skywalking-license-eye.tgz https://github.com/apache/skywalking-eyes/releases/download/v0.7.0/skywalking-license-eye-0.7.0-bin.tgz && \ - tar xf skywalking-license-eye.tgz -C /opt && \ - rm skywalking-license-eye.tgz +RUN curl -L -o skywalking-license-eye.tgz https://github.com/apache/skywalking-eyes/releases/download/v0.7.0/skywalking-license-eye-0.7.0-bin.tgz \ + && tar xf skywalking-license-eye.tgz -C /opt \ + && rm skywalking-license-eye.tgz # Install yamlfmt -RUN curl -L -o yamlfmt.tar.gz https://github.com/google/yamlfmt/releases/download/v0.18.0/yamlfmt_0.18.0_Linux_x86_64.tar.gz && \ - tar xf yamlfmt.tar.gz -C /opt && \ - rm yamlfmt.tar.gz +RUN curl -L -o yamlfmt.tar.gz https://github.com/google/yamlfmt/releases/download/v0.18.0/yamlfmt_0.18.0_Linux_x86_64.tar.gz \ + && tar xf yamlfmt.tar.gz -C /opt/sysroot/usr/local/bin/ \ + && chmod a+x /opt/sysroot/usr/local/bin/yamlfmt \ + && rm yamlfmt.tar.gz # Install bindgen and cbindgen to /opt/sysroot/usr/local/bin RUN CARGO_INSTALL_ROOT=/opt/sysroot/usr/local cargo install bindgen-cli@0.72.1 cbindgen@0.29.0 From e630a28565fd5ed8f90f5da15b0fbfbaa8baa5c4 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 14:18:20 +0800 Subject: [PATCH 19/23] fix format --- .github/workflows/reusable_cross_repos_build.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 7650cb3a..56c22d57 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -83,16 +83,16 @@ jobs: for patch in patches/*.diff; do patch_abspath=$(realpath ${patch}) repo=$(basename ${patch} ".diff") - if [ "${repo}" != "manifests" ]; then - cd ${repo} + if [[ ${repo} != "manifests" ]];then + cd ${repo} else - cd .repo/${repo} + cd .repo/${repo} fi git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} if [[ $? != 0 ]]; then exit 1 fi - cd - > /dev/null + cd - > /dev/null done - name: Upload workspace @@ -627,5 +627,5 @@ jobs: issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `${sign} ${statusMessage}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` + body: `${sign} ${statusMessage}, see ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}.` }) From 17b6ea1f850787b0a4808007a311237f408b979c Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 16:10:49 +0800 Subject: [PATCH 20/23] Fix: Add ghcr credentials --- .../workflows/reusable_cross_repos_build.yml | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 56c22d57..367e85da 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -14,6 +14,9 @@ jobs: if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Notify job started @@ -107,6 +110,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -245,6 +251,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -323,6 +332,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -370,6 +382,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -419,6 +434,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -468,6 +486,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace @@ -517,6 +538,9 @@ jobs: needs: setup container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest + credentials: + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} steps: - name: Download workspace From 8dd50bba1636222feb275e16096b3e0798f8d34d Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 16:30:49 +0800 Subject: [PATCH 21/23] Fix: add packages read permission --- .../workflows/reusable_cross_repos_build.yml | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 367e85da..46ba30a3 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -10,6 +10,7 @@ jobs: contents: read checks: write pull-requests: write + packages: read if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, 'build_prs') }} container: @@ -108,6 +109,10 @@ jobs: check_format: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -249,6 +254,10 @@ jobs: check_license: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -330,6 +339,10 @@ jobs: build_host: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -380,6 +393,10 @@ jobs: build_qemu_mps2_an385: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -432,6 +449,10 @@ jobs: build_qemu_mps3_an547: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -484,6 +505,10 @@ jobs: build_qemu_riscv64: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: @@ -536,6 +561,10 @@ jobs: build_qemu_aarch64: runs-on: ubuntu-latest needs: setup + permissions: + contents: read + checks: write + packages: read container: image: ghcr.io/${{ github.repository_owner }}/kernel:latest credentials: From 0c030077817b6ac744ad46daa9b5115bd7256e83 Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 16:48:57 +0800 Subject: [PATCH 22/23] Fix: use posix sh in workflow --- .github/workflows/cross_repos_build.yml | 2 +- .github/workflows/reusable_cross_repos_build.yml | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cross_repos_build.yml b/.github/workflows/cross_repos_build.yml index d534e24d..0f20d194 100644 --- a/.github/workflows/cross_repos_build.yml +++ b/.github/workflows/cross_repos_build.yml @@ -6,4 +6,4 @@ on: jobs: call_workflow: - uses: vivoblueos/kernel/.github/workflows/reusable_cross_repos_build.yml@main + uses: ./.github/workflows/reusable_cross_repos_build.yml diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 46ba30a3..8c0adabb 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -63,7 +63,6 @@ jobs: }); fetched_prs.push({owner: owner, repo: repo, pr_num: pr_num, sha: pullRequest.head.sha}); })); - // Download diffs. await Promise.all(patchUrls.map(async ({ repo, url }) => { console.log(`Downloading ${url}`); @@ -87,13 +86,13 @@ jobs: for patch in patches/*.diff; do patch_abspath=$(realpath ${patch}) repo=$(basename ${patch} ".diff") - if [[ ${repo} != "manifests" ]];then + if [ "${repo}" != "manifests" ]; then cd ${repo} else cd .repo/${repo} fi git apply -3 --check --verbose ${patch_abspath} && git apply -3 --verbose ${patch_abspath} - if [[ $? != 0 ]]; then + if [ $? != 0 ]; then exit 1 fi cd - > /dev/null From c09f8aa8a68a4cf467025eeb9242606afe598f2c Mon Sep 17 00:00:00 2001 From: han-jiang277 Date: Fri, 17 Oct 2025 16:56:23 +0800 Subject: [PATCH 23/23] test --- .github/workflows/reusable_cross_repos_build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/reusable_cross_repos_build.yml b/.github/workflows/reusable_cross_repos_build.yml index 8c0adabb..f031ec52 100644 --- a/.github/workflows/reusable_cross_repos_build.yml +++ b/.github/workflows/reusable_cross_repos_build.yml @@ -1,5 +1,6 @@ name: Reusable trigger of building cross multiple repositories + on: workflow_call: