diff --git a/CMSIS/Core_AArch64/Include/CMSIS_Include_core_aarch64.cmake b/CMSIS/Core_AArch64/Include/CMSIS_Include_core_aarch64.cmake new file mode 100644 index 000000000..5b5997984 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/CMSIS_Include_core_aarch64.cmake @@ -0,0 +1,12 @@ +#Description: CMSIS Include For Cortex-A; user_visible: True +include_guard(GLOBAL) +message("CMSIS_Include_core_ca component is included.") + +target_sources(${MCUX_SDK_PROJECT_NAME} PRIVATE + ${CMAKE_CURRENT_LIST_DIR}/../Source/cache_armv8a.c + ${CMAKE_CURRENT_LIST_DIR}/../Source/mmu_armv8a.c +) + +target_include_directories(${MCUX_SDK_PROJECT_NAME} PUBLIC + ${CMAKE_CURRENT_LIST_DIR}/. +) diff --git a/CMSIS/Core_AArch64/Include/cache_armv8a.h b/CMSIS/Core_AArch64/Include/cache_armv8a.h new file mode 100644 index 000000000..be3a3c409 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/cache_armv8a.h @@ -0,0 +1,131 @@ +/**************************************************************************//** + * @file cache_armv8a.h + * @brief CMSIS AARCH64 Cache API header file + * @version V1.0.0 + * @date 21. January 2022 + ******************************************************************************/ + +/* + * Copyright 2022-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CACHE_ARMV8A_H +#define __CACHE_ARMV8A_H + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +#define dcache_ops(op, operand) \ +({ \ + __asm__ volatile ("dc " __STRINGIFY(op) ", %0" \ + ::"r" (operand): "memory"); \ +}) + + +/* Invalidate instruction cache by virtual address to PoU */ +static inline void icache_invalidate_range(uintptr_t addr, size_t size) +{ + uintptr_t cur = addr; + uintptr_t end_addr = cur + size; + + /* Align address to line size */ + cur &= ~(ICACHE_LINE_SIZE - 1); + + do { + __asm__ volatile ("ic ivau, %0" ::"r" (cur): "memory"); + cur += ICACHE_LINE_SIZE; + } while (cur < end_addr); + + __DSB(); + __ISB(); +} + +/* Invalidate all instruction cache to PoU */ +static inline void icache_invalidate_all(void) +{ + __asm__ volatile ("ic iallu" ::: "memory"); + __DSB(); + __ISB(); +} + +/* Clean data cache by virtual address to PoC */ +static inline void dcache_clean_range(uintptr_t addr, size_t size) +{ + uintptr_t cur = addr; + uintptr_t end = addr + size; + + /* Align address to line size */ + cur &= ~(DCACHE_LINE_SIZE - 1); + + while (cur < end) { + dcache_ops(cvac, cur); + cur += DCACHE_LINE_SIZE; + } + + __DSB(); +} + +/* Invalidate data cache by virtual address to PoC */ +static inline void dcache_invalidate_range(uintptr_t addr, size_t size) +{ + uintptr_t cur = addr; + uintptr_t end = addr + size; + + if (end & (DCACHE_LINE_SIZE - 1)) { + end &= ~(DCACHE_LINE_SIZE - 1); + dcache_ops(civac, end); + } + + if (cur & (DCACHE_LINE_SIZE - 1)) { + cur &= ~(DCACHE_LINE_SIZE - 1); + if (cur != end) + dcache_ops(civac, cur); + cur += DCACHE_LINE_SIZE; + } + + while (cur < end) { + dcache_ops(ivac, cur); + cur += DCACHE_LINE_SIZE; + } + + __DSB(); +} + +/* Clean and invalidate data cache by virtual address to PoC */ +static inline void dcache_clean_invalidate_range(uintptr_t addr, size_t size) +{ + uintptr_t cur = addr; + uintptr_t end = addr + size; + + /* Align address to line size */ + cur &= ~(DCACHE_LINE_SIZE - 1); + + while (cur < end) { + dcache_ops(civac, cur); + cur += DCACHE_LINE_SIZE; + } + + __DSB(); +} + +void dcache_clean_all(void); +void dcache_invalidate_all(void); +void dcache_clean_invalidate_all(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __CACHE_ARMV8A_H */ diff --git a/CMSIS/Core_AArch64/Include/cmsis_compiler.h b/CMSIS/Core_AArch64/Include/cmsis_compiler.h new file mode 100644 index 000000000..b8e3669c7 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/cmsis_compiler.h @@ -0,0 +1,92 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler generic header file + * @version V1.0.0 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/* Define compiler macros for CPU architecture, if not yet defined by the + * compiler default macros + */ +#if __ARM_ARCH_8A__ +/* Macro already defined */ +#else + #if defined(__ARM_ARCH_8A) && __ARM_ARCH_8A == 1 + #define __ARM_ARCH_8A__ 1 + #endif /* __ARM_ARCH_8A == 1 */ +#endif + +#if defined ( __CC_ARM ) + #define __ASM __asm /*!< asm keyword for ARM Compiler */ + #define __INLINE __inline /*!< inline keyword for ARM Compiler */ + #define __STATIC_INLINE static __inline + +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define __ASM __asm /*!< asm keyword for ARM Compiler */ + #define __INLINE __inline /*!< inline keyword for ARM Compiler */ + #define __STATIC_INLINE static __inline + +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + +#elif defined ( __ICCARM__ ) + #include "cmsis_iar.h" + +#else + #error Unknown compiler +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + +#ifdef __cplusplus +} +#endif + +#endif /* __CMSIS_COMPILER_H */ diff --git a/CMSIS/Core_AArch64/Include/cmsis_gcc.h b/CMSIS/Core_AArch64/Include/cmsis_gcc.h new file mode 100644 index 000000000..812291bfb --- /dev/null +++ b/CMSIS/Core_AArch64/Include/cmsis_gcc.h @@ -0,0 +1,269 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.0 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __FORCEINLINE + #define __FORCEINLINE __attribute__((always_inline)) +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif + +#ifndef __STRINGIFY + #define __STRINGIFY(x) #x +#endif + +#ifndef __MSR + #define __MSR(sysreg, val) \ + __asm volatile ("msr "__STRINGIFY(sysreg)", %0\n" : : "r"((uint64_t)(val))) +#endif + +#ifndef __MRS +#define __MRS(sysreg, pVal) \ + __asm volatile ("mrs %0, "__STRINGIFY(sysreg)"\n" : "=r"((*pVal))) +#endif + +#ifndef __WFI +#define __WFI() \ + __asm volatile ("wfi") +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + + +/** + \brief Get Interrupt Mask Bits + \details Returns the current state of the interrupt mask bits from the DAIF register. + \return Interrupt Mask value + */ +__STATIC_FORCEINLINE uint64_t __get_DAIF(void) +{ + uint64_t result; + __MRS(DAIF, &result); + return result; +} + + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the DAIF. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("msr daifclr, #2" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the DAIF. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("msr daifset, #2" : : : "memory"); +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/** + \brief Hypervisor call with 2 arguments + \details Makes an hypervisor call with two arguments stored in x0 and x1. + */ +#define HVC_2(imm, x0, x1) __asm volatile ( \ + "mov x0, %0 \n\t" \ + "mov x1, %1 \n\t" \ + "hvc #" __STRINGIFY(imm) "\n\t" \ + : : "r" (x0), "r" (x1) : "x0", "x1", "memory") + +/** + \brief Multiprocessor Affinity + \details Indicates the core number in the Cortex-Axx processor. + */ +__STATIC_FORCEINLINE uint64_t __get_MPIDR_EL1(void) +{ + uint64_t result; + __MRS(MPIDR_EL1, &result); + return result; +} + +#define MPIDR_GetCoreID() \ + ({ uint64_t mpidr = __get_MPIDR_EL1(); \ + (mpidr >> (8 * MPIDR_SUPPORT_MT(mpidr))) & MPIDR_AFFLVL_MASK; }) + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#ifndef __ISB +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb":::"memory"); +} +#endif + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#ifndef __DSB +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb sy":::"memory"); +} +#endif + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#ifndef __DMB +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb sy":::"memory"); +} +#endif + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ + return __builtin_bswap32(value); +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + return __builtin_bswap16(value); +} + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("brk "#value) + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ + +__STATIC_FORCEINLINE void __NOP(void) +{ + __ASM volatile ("nop"); +} + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + +/** + \brief likely/unlikely() branch prediction + \details Gives hints to the compiler to favor either side of a jump instruction + \param [in] expr Boolean expression under evaluation + \return The same boolean value + */ +#ifndef unlikely +__STATIC_FORCEINLINE long unlikely(long expr) +{ + return __builtin_expect(expr, 0L); +} +#endif + +#ifndef likely +__STATIC_FORCEINLINE long likely(long expr) +{ + return __builtin_expect(expr, 1L); +} +#endif + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +#endif /* __CMSIS_GCC_H */ diff --git a/CMSIS/Core_AArch64/Include/cmsis_iar.h b/CMSIS/Core_AArch64/Include/cmsis_iar.h new file mode 100644 index 000000000..b4ef06882 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/cmsis_iar.h @@ -0,0 +1,30 @@ +/**************************************************************************//** + * @file cmsis_iar.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.0 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __CMSIS_IAR_H +#define __CMSIS_IAR_H + +#error Unsupported compiler + +#endif /* __CMSIS_IAR_H */ diff --git a/CMSIS/Core_AArch64/Include/core_ca53.h b/CMSIS/Core_AArch64/Include/core_ca53.h new file mode 100644 index 000000000..3b2d618f8 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/core_ca53.h @@ -0,0 +1,108 @@ +/**************************************************************************//** + * @file core_ca53.h + * @brief CMSIS Cortex-A53 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021,2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CA53_H_GENERIC +#define __CORE_CA53_H_GENERIC + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + +#define __CORTEX_Axx (53U) /*!< Cortex-Axx Core */ + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA53_REV + #define __CA53_REV 0x0000U + #warning "__CA53_REV not defined in device header file; using default!" + #endif + + #ifndef __CACHE_PRESENT + #define __CACHE_PRESENT 1U + #warning "__CACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 1U + #define __FPU_USED 1U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MMU_PRESENT + #define __MMU_PRESENT 1U + #warning "__MMU_PRESENT not defined in device header file; using default!" + #endif +#endif + +#include "cmsis_compiler.h" /* Core Instruction and Function Access */ + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA53_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CA53_H_DEPENDANT +#define __CORE_CA53_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * Cache Functions + ******************************************************************************/ +#define ICACHE_LINE_SIZE (64) +#define DCACHE_LINE_SIZE (64) + +#include "core_common.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA53_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/CMSIS/Core_AArch64/Include/core_ca55.h b/CMSIS/Core_AArch64/Include/core_ca55.h new file mode 100644 index 000000000..f0b10bd59 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/core_ca55.h @@ -0,0 +1,109 @@ +/**************************************************************************//** + * @file core_ca55.h + * @brief CMSIS Cortex-A55 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date Nov. 2022 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021,2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CA55_H_GENERIC +#define __CORE_CA55_H_GENERIC + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + +#define __CORTEX_Axx (55U) /*!< Cortex-Axx Core */ + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA55_REV + #define __CA55_REV 0x0000U + #warning "__CA55_REV not defined in device header file; using default!" + #endif + + #ifndef __CACHE_PRESENT + #define __CACHE_PRESENT 1U + #warning "__CACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 1U + #define __FPU_USED 1U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MMU_PRESENT + #define __MMU_PRESENT 1U + #warning "__MMU_PRESENT not defined in device header file; using default!" + #endif +#endif + +#include "cmsis_compiler.h" /* Core Instruction and Function Access */ + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA55_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CA55_H_DEPENDANT +#define __CORE_CA55_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * Cache Functions + ******************************************************************************/ + +#define ICACHE_LINE_SIZE (64) +#define DCACHE_LINE_SIZE (64) + +#include "core_common.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA55_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/CMSIS/Core_AArch64/Include/core_common.h b/CMSIS/Core_AArch64/Include/core_common.h new file mode 100644 index 000000000..013ca26f2 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/core_common.h @@ -0,0 +1,138 @@ +/**************************************************************************//** + * @file core_common.h + * @brief CMSIS Cortex-A AArch64 Core Common Header File + * @version V1.0.0 + * @date 06. Feb 2023 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_COMMON_H +#define __CORE_COMMON_H + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * Register Definitions + ******************************************************************************/ + +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +/* DAIF Register */ +#define DAIF_F_BIT BIT(6) +#define DAIF_I_BIT BIT(7) +#define DAIF_A_BIT BIT(8) +#define DAIF_D_BIT BIT(9) + +/* System Control Register */ +#define SCTLR_M_BIT BIT(0) +#define SCTLR_A_BIT BIT(1) +#define SCTLR_C_BIT BIT(2) +#define SCTLR_SA_BIT BIT(3) +#define SCTLR_I_BIT BIT(12) + +/* Exception levels EL0-EL3 */ +#define MODE_EL_SHIFT (0x2) +#define MODE_EL_MASK (0x3) + +#define MODE_EL3 (0x3) +#define MODE_EL2 (0x2) +#define MODE_EL1 (0x1) +#define MODE_EL0 (0x0) + +#define GET_EL(_mode) (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK) + +/* MPIDR */ +#define MPIDR_AFFLVL_MASK (0xfful) +#define MPIDR_AFF0_SHIFT (0) +#define MPIDR_AFF1_SHIFT (8) +#define MPIDR_AFF2_SHIFT (16) +#define MPIDR_AFF3_SHIFT (32) +#define MPIDR_MT_MASK (0x1) +#define MPIDR_MT_SHIFT (24) + +#define MPIDR_SUPPORT_MT(mpidr) ((mpidr >> MPIDR_MT_SHIFT) & MPIDR_MT_MASK) + + +#define MPIDR_TO_AFF_LEVEL(mpidr, aff_level) \ + (((mpidr) >> MPIDR_AFF##aff_level##_SHIFT) & MPIDR_AFFLVL_MASK) + +#define MPIDR_AFFINITY_MASK \ + ((MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \ + (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \ + (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \ + (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT)) + +/******************************************************************************* + * Cache Functions + ******************************************************************************/ + +#if defined (__CACHE_PRESENT) && (__CACHE_PRESENT == 1U) + + #include "cache_armv8a.h" + +#endif + + +/******************************************************************************* + * GIC Functions + ******************************************************************************/ + +#if defined (__GIC_PRESENT) && (__GIC_PRESENT == 1U) + + #include "gic_v3.h" + +#endif + + +/******************************************************************************* + * MMU Functions + ******************************************************************************/ + +#if defined (__MMU_PRESENT) && (__MMU_PRESENT == 1U) + + #include "mmu_armv8a.h" + +#endif + + +/******************************************************************************* + * Timer Functions + ******************************************************************************/ + +#if defined (__TIM_PRESENT) && (__TIM_PRESENT == 1U) + #include "timer_armv8a.h" +#endif + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_COMMON_H */ diff --git a/CMSIS/Core_AArch64/Include/gic_v3.h b/CMSIS/Core_AArch64/Include/gic_v3.h new file mode 100644 index 000000000..1eb4e13cf --- /dev/null +++ b/CMSIS/Core_AArch64/Include/gic_v3.h @@ -0,0 +1,949 @@ +/**************************************************************************//** + * @file gic_v3.h + * @brief CMSIS Cortex-A53 Generic Interrupt Controller API header file + * @version V1.0.1 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __GIC_V3_H +#define __GIC_V3_H + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * GIC Data Types + ******************************************************************************/ + +/** \brief AArch64 System registers to access the Generic Interrupt Controller CPU interface +*/ +#if defined(__GNUC__) + #define ICC_BPR0_EL1 S3_0_C12_C8_3 + #define ICC_BPR1_EL1 S3_0_C12_C12_3 + #define ICC_CTLR_EL1 S3_0_C12_C12_4 + #define ICC_CTLR_EL3 S3_6_C12_C12_4 + #define ICC_EOIR0_EL1 S3_0_C12_C8_1 + #define ICC_EOIR1_EL1 S3_0_C12_C12_1 + #define ICC_HPPIR0_EL1 S3_0_C12_C8_2 + #define ICC_HPPIR1_EL1 S3_0_C12_C12_2 + #define ICC_IAR0_EL1 S3_0_C12_C8_0 + #define ICC_IAR1_EL1 S3_0_C12_C12_0 + #define ICC_IGRPEN0_EL1 S3_0_C12_C12_6 + #define ICC_IGRPEN1_EL1 S3_0_C12_C12_7 + #define ICC_IGRPEN1_EL3 S3_6_C12_C12_7 + #define ICC_PMR_EL1 S3_0_C4_C6_0 + #define ICC_RPR_EL1 S3_0_C12_C11_3 + #define ICC_SGI0R_EL1 S3_0_C12_C11_7 + #define ICC_SGI1R_EL1 S3_0_C12_C11_5 + #define ICC_SRE_EL1 S3_0_C12_C12_5 + #define ICC_SRE_EL2 S3_4_C12_C9_5 + #define ICC_SRE_EL3 S3_6_C12_C12_5 +#endif /* __GNUC__ */ + +/* ICC_SGIR */ +#define ICC_SGIR_TARGETLIST_SHIFT (0) +#define ICC_SGIR_TARGETLIST_MASK (0xffff) +#define ICC_SGIR_AFF_MASK (0xff) +#define ICC_SGIR_AFF1_SHIFT (16) +#define ICC_SGIR_INTID_SHIFT (24) +#define ICC_SGIR_INTID_MASK (0xf) +#define ICC_SGIR_AFF2_SHIFT (32) +#define ICC_SGIR_IRM_SHIFT (40) +#define ICC_SGIR_IRM_MASK (0x1) +#define ICC_SGIR_RS_SHIFT (44) +#define ICC_SGIR_RS_MASK (0xf) +#define ICC_SGIR_AFF3_SHIFT (48) + +#define MPIDR_TO_RS(mpidr) (MPIDR_TO_AFF_LEVEL(mpidr, 0) >> 4) + +#define COMPOSE_ICC_SGIR_VALUE(aff3, aff2, aff1, intid, irm, rs, tlist) \ + ((((uint64_t)(aff3) & ICC_SGIR_AFF_MASK) << ICC_SGIR_AFF3_SHIFT) | \ + (((uint64_t)(rs) & ICC_SGIR_RS_MASK) << ICC_SGIR_RS_SHIFT) | \ + (((uint64_t)(irm) & ICC_SGIR_IRM_MASK) << ICC_SGIR_IRM_SHIFT) | \ + (((uint64_t)(aff2) & ICC_SGIR_AFF_MASK) << ICC_SGIR_AFF2_SHIFT) | \ + (((intid) & ICC_SGIR_INTID_MASK) << ICC_SGIR_INTID_SHIFT) | \ + (((aff1) & ICC_SGIR_AFF_MASK) << ICC_SGIR_AFF1_SHIFT) | \ + (((tlist) & ICC_SGIR_TARGETLIST_MASK) << ICC_SGIR_TARGETLIST_SHIFT)) + +#define GIC_REDISTRIBUTOR_STRIDE (0x20000) +#define GICR_SGI_BASE_OFF (0x10000) + +#define GICR_TYPER_LAST_SHIFT (4) +#define GICR_TYPER_LAST_MASK (1 << GICR_TYPER_LAST_SHIFT) +#define GICR_TYPER_AFF_SHIFT (32) + +#define GICR_WAKER_PS_SHIFT (1) +#define GICR_WAKER_CA_SHIFT (2) + + +/** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */ + __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */ + RESERVED(0, uint32_t) + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ + RESERVED(1[11], uint32_t) + __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */ + RESERVED(2, uint32_t) + __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */ + RESERVED(3, uint32_t) + __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */ + RESERVED(4, uint32_t) + __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */ + RESERVED(5[9], uint32_t) + __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */ + __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */ + __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */ + __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */ + __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */ + __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */ + __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */ + __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */ + RESERVED(6, uint32_t) + __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */ + RESERVED(7, uint32_t) + __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */ + __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */ + RESERVED(8[32], uint32_t) + __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */ + __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */ + RESERVED(9[3], uint32_t) + __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */ + __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */ + RESERVED(10[5172], uint32_t) + __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6000(R/W) Interrupt Routing Registers */ +} GICDistributor_Type; + +#define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */ + +/** \brief Structure type to access the Generic Interrupt Controller Redistributor (GICR) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Redistributor Control Register */ + __IM uint32_t IIDR; /*!< \brief Offset: 0x004 (R/ ) Implementer Identification Register */ + __IM uint64_t TYPER; /*!< \brief Offset: 0x008 (R/ ) Redistributor Type Register */ + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ + __IOM uint32_t WAKER; /*!< \brief Offset: 0x014 (R/W) Redistributor Wake Register */ + __IM uint32_t MPAMIDR; /*!< \brief Offset: 0x018 (R/ ) Report maximum PARTID and PMG Register */ + __IOM uint32_t PARTIDR; /*!< \brief Offset: 0x01C (R/W) Set PARTID and PMG Register */ + RESERVED(1[8], uint32_t) + __OM uint32_t SETLPIR; /*!< \brief Offset: 0x040 ( /W) Set LPI Pending Register */ + RESERVED(2, uint32_t) + __OM uint32_t CLRLPIR; /*!< \brief Offset: 0x048 ( /W) Clear LPI Pending Register */ + RESERVED(3[9], uint32_t) + __IOM uint32_t PROPBASER; /*!< \brief Offset: 0x070 (R/W) Redistributor Properties Base Address Register */ + RESERVED(4, uint32_t) + __IOM uint32_t PENDBASER; /*!< \brief Offset: 0x078 (R/W) Redistributor LPI Pending Table Base Address Register */ + RESERVED(5[9], uint32_t) + __OM uint32_t INVLPIR; /*!< \brief Offset: 0x0A0 ( /W) Redistributor Invalidate LPI Register */ + RESERVED(6[3], uint32_t) + __OM uint32_t INVALLR; /*!< \brief Offset: 0x0B0 ( /W) Redistributor Invalidate All Register */ + RESERVED(7[3], uint32_t) + __IM uint32_t SYNCR; /*!< \brief Offset: 0x0C0 (R/ ) Redistributor Synchronize Register */ +} GICRedistributor_Type; + +/* Memory mapped GIC interface may be disabled when ICC_SRE_ELx.SRE set 1 by hypervisor. + In this case we will be using MSR/MRS system registers. */ +#ifdef GIC_INTERFACE_BASE + +/** \brief Structure type to access the Generic Interrupt Controller Interface (GICC) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */ + __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */ + __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */ + __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */ + __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */ + __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */ + __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */ + __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */ + __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */ + __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */ + __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */ + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */ + RESERVED(1[40], uint32_t) + __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */ + __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */ + RESERVED(2[3], uint32_t) + __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */ + RESERVED(3[960], uint32_t) + __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */ +} GICInterface_Type; + +#define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */ +#endif /* GIC_INTERFACE_BASE */ + +/* ctrl register access in non-secure */ +#define GICD_CTLR_RWP 31 +#define GICD_CTLR_ARE_NS 4 +#define GICD_CTLR_ENGRP1A 1 +#define GICD_CTLR_ENGRP1 0 + +#define GICR_CTLR_RWP 3 + +enum gic_rwp { + GICD_RWP, + GICR_RWP, +}; + +/******************************************************************************* + * GIC Functions + ******************************************************************************/ + +/* ########################## GIC functions ###################################### */ + +/** \brief Get the recomposed MPIDR_EL1 Affinity fields. +* the recomposed Affinity value format is (aff3:aff2:aff1:aff0) +*/ +__STATIC_INLINE uint32_t GIC_MPIDRtoAffinity(void) +{ + uint32_t aff3, aff2, aff1, aff0, aff; + uint64_t mpidr = __get_MPIDR_EL1(); + + aff0 = MPIDR_TO_AFF_LEVEL(mpidr, 0); + aff1 = MPIDR_TO_AFF_LEVEL(mpidr, 1); + aff2 = MPIDR_TO_AFF_LEVEL(mpidr, 2); + aff3 = MPIDR_TO_AFF_LEVEL(mpidr, 3); + + aff = (aff0 & MPIDR_AFFLVL_MASK) << 0 | + (aff1 & MPIDR_AFFLVL_MASK) << 8 | + (aff2 & MPIDR_AFFLVL_MASK) << 16 | + (aff3 & MPIDR_AFFLVL_MASK) << 24; + + return aff; +} + +/** \brief Get the Redistributor base. +*/ +__STATIC_INLINE GICRedistributor_Type *GIC_GetRdist(void) +{ + uintptr_t rd_addr = GIC_REDISTRIBUTOR_BASE; + uint32_t rd_aff, aff = GIC_MPIDRtoAffinity(); + uint64_t rd_typer; + + do { + rd_typer = ((GICRedistributor_Type *)rd_addr)->TYPER; + rd_aff = rd_typer >> GICR_TYPER_AFF_SHIFT; + + if (rd_aff == aff) + return (GICRedistributor_Type *)rd_addr; + + rd_addr += GIC_REDISTRIBUTOR_STRIDE; + } while (!(rd_typer & GICR_TYPER_LAST_MASK)); + + return NULL; +} + +/** \brief Get the Redistributor SGI_base. +*/ +__STATIC_INLINE void *GIC_GetRdistSGIBase(void *rd_base) +{ + return (void *)((uintptr_t)rd_base + GICR_SGI_BASE_OFF); +} + +/** \brief Wait for register write pending. +*/ +__STATIC_INLINE void GIC_WaitRWP(enum gic_rwp rwp) +{ + uint32_t rwp_mask; + uint32_t __IM *base; + + if (rwp == GICR_RWP) { + base = &GIC_GetRdist()->CTLR; + if (!base) + return; + rwp_mask = BIT(GICR_CTLR_RWP); + } else if (rwp == GICD_RWP) { + base = &GICDistributor->CTLR; + rwp_mask = BIT(GICD_CTLR_RWP); + } else { + return; + } + + while (*base & rwp_mask) + ; +} + +/** \brief Get the Affinity Routing status. +*/ +__STATIC_INLINE bool GIC_GetARE(void) +{ + return !!(GICDistributor->CTLR & 0x30); +} + +/** \brief Disable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_DisableDistributor(void) +{ + GICDistributor->CTLR &=~1U; + GIC_WaitRWP(GICD_RWP); +} + +/** \brief Read the GIC's TYPER register. +* \return GICDistributor_Type::TYPER +*/ +__STATIC_INLINE uint32_t GIC_DistributorInfo(void) +{ + return (GICDistributor->TYPER); +} + +/** \brief Reads the GIC's IIDR register. +* \return GICDistributor_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_DistributorImplementer(void) +{ + return (GICDistributor->IIDR); +} + +/** \brief Sets the GIC's ITARGETSR register for the given interrupt. +* \param [in] IRQn Interrupt to be configured. +* \param [in] cpu_target CPU interfaces to assign this interrupt to. +*/ +__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint64_t cpu_target) +{ + if (IRQn >= 32) + { + if (GIC_GetARE()) + { + /* affinity routing */ + GICDistributor->IROUTER[IRQn] = cpu_target; + } + else + { + /* legacy */ + uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U)); + } + } +} + +/** \brief Get the target core of the interrupt. +* \param [in] IRQn Interrupt to acquire the configuration for. +* +* \return: +* For SPI: GICDistributor_Type::ITARGETSR when Affinity Routing isn't enabled, +* or GICDistributor_Type::IROUTER when Affinity Routing is enabled +* For SGI/PPI: The Affinity fields of the MPIDR_EL1. +*/ +__STATIC_INLINE uint64_t GIC_GetTarget(IRQn_Type IRQn) +{ + uint64_t cpu_target = 0; + + if (IRQn >= 32) + { + if (GIC_GetARE()) + { + /* affinity routing */ + cpu_target = GICDistributor->IROUTER[IRQn]; + } + else + { + /* legacy */ + cpu_target = (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + } + } + else + { + /* local */ + cpu_target = __get_MPIDR_EL1() & MPIDR_AFFINITY_MASK; + } + + return cpu_target; +} + +/** \brief Enables the given interrupt using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn) +{ + uint64_t mpidr = __get_MPIDR_EL1(); + GICDistributor_Type *s_RedistPPIBaseAddrs; + + GIC_SetTarget(IRQn, mpidr & MPIDR_AFFINITY_MASK); + + if (IRQn < 32) { + s_RedistPPIBaseAddrs = GIC_GetRdistSGIBase(GIC_GetRdist()); + s_RedistPPIBaseAddrs->ISENABLER[0] = 1U << IRQn; + } else { + GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U); + } +} + +/** \brief Get interrupt enable status using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not enabled, 1 - interrupt is enabled. +*/ +__STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn) +{ + return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} + +/** \brief Disables the given interrupt using GIC's ICENABLER register. +* \param [in] IRQn The interrupt to be disabled. +*/ +__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn) +{ + GICDistributor_Type *s_RedistPPIBaseAddrs; + + if (IRQn < 32) { + s_RedistPPIBaseAddrs = GIC_GetRdistSGIBase(GIC_GetRdist()); + s_RedistPPIBaseAddrs->ICENABLER[0] = 1U << IRQn; + GIC_WaitRWP(GICR_RWP); + } else { + GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U); + GIC_WaitRWP(GICD_RWP); + } +} + +/** \brief Get interrupt pending status from GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not pending, 1 - interrupt is pendig. +*/ +__STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn) +{ + uint32_t pend; + + if (IRQn >= 16) { + pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; + } else { + // INTID 0-15 Software Generated Interrupt + pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + // No CPU identification offered + if (pend != 0U) { + pend = 1U; + } else { + pend = 0U; + } + } + + return (pend); +} + +/** \brief Sets the given interrupt as pending using GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16) { + GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->SPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + // Forward the interrupt to the CPU interface that requested it + GICDistributor->SGIR = (IRQn | 0x02000000U); + } +} + +/** \brief Clears the given interrupt from being pending using GIC's ICPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16) { + GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + } +} + +/** \brief Sets the interrupt configuration using GIC's ICFGR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config) +{ + uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; + uint32_t shift = (IRQn % 16U) << 1U; + + icfgr &= (~(3U << shift)); + icfgr |= ( int_config << shift); + + GICDistributor->ICFGR[IRQn / 16U] = icfgr; +} + +/** \brief Get the interrupt configuration from the GIC's ICFGR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn) +{ + return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U)); +} + +__STATIC_INLINE void GIC_SetRedistPriority(IRQn_Type IRQn, uint32_t priority) +{ + GICDistributor_Type *s_RedistPPIBaseAddrs = GIC_GetRdistSGIBase(GIC_GetRdist()); + uint32_t mask = s_RedistPPIBaseAddrs->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + + s_RedistPPIBaseAddrs->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Set the priority for the given interrupt. +* \param [in] IRQn The interrupt to be configured. +* \param [in] priority The priority for the interrupt, lower values denote higher priorities. +*/ +__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + uint32_t mask; + + if ((IRQn < 32) && (GIC_GetARE())) { + GIC_SetRedistPriority(IRQn, priority); + } else { + mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); + } +} + +__STATIC_INLINE void GIC_RedistWakeUp(void) +{ + GICRedistributor_Type *const s_RedistBaseAddrs = GIC_GetRdist(); + + if (!s_RedistBaseAddrs) + return; + + if (!(s_RedistBaseAddrs->WAKER & (1 << GICR_WAKER_CA_SHIFT))) + return; + + s_RedistBaseAddrs->WAKER &= ~ (1 << GICR_WAKER_PS_SHIFT); + while (s_RedistBaseAddrs->WAKER & (1 << GICR_WAKER_CA_SHIFT)) + ; +} + +__STATIC_INLINE uint32_t GIC_GetRedistPriority(IRQn_Type IRQn) +{ + GICDistributor_Type *s_RedistPPIBaseAddrs; + + s_RedistPPIBaseAddrs = GIC_GetRdistSGIBase(GIC_GetRdist()); + return (s_RedistPPIBaseAddrs->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Read the current interrupt priority from GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be queried. +*/ +__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn) +{ + if ((IRQn < 32) && (GIC_GetARE())) { + return GIC_GetRedistPriority(IRQn); + } else { + return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + } +} + +/** \brief Get the status for a given interrupt. +* \param [in] IRQn The interrupt to get status for. +* \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active +*/ +__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn) +{ + uint32_t pending, active; + + active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + + return ((active<<1U) | pending); +} + +/** \brief Generate a software interrupt (Affinity Routing version). +* \param [in] IRQn Software interrupt to be generated. +* \param [in] target_aff Target affinity in MPIDR form. +* \param [in] tlist List of CPUs the software interrupt should be forwarded to. +*/ +__STATIC_INLINE void GIC_SendSGI_ARE(IRQn_Type IRQn, uint64_t target_aff, uint16_t tlist) +{ + uint32_t aff3, aff2, aff1, rs; + uint64_t val; + + if (IRQn >= 16) + return; + + aff1 = MPIDR_TO_AFF_LEVEL(target_aff, 1); + aff2 = MPIDR_TO_AFF_LEVEL(target_aff, 2); + aff3 = MPIDR_TO_AFF_LEVEL(target_aff, 3); + rs = MPIDR_TO_RS(target_aff); + val = COMPOSE_ICC_SGIR_VALUE(aff3, aff2, aff1, IRQn, 0, rs, tlist); + + __DSB(); + __MSR(ICC_SGI1R_EL1, val); + __ISB(); +} + +/** \brief Generate a software interrupt. +* \param [in] IRQn Software interrupt to be generated. +* \param [in] target_aff Target affinity in MPIDR form. +* \param [in] target_list List of CPUs the software interrupt should be forwarded to. +*/ +__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint64_t target_aff, uint16_t target_list) +{ + if (IRQn >= 16) + return; + + if (GIC_GetARE()) { + /* affinity routing */ + GIC_SendSGI_ARE(IRQn, target_aff, target_list); + } else { + GICDistributor->SGIR = ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL); + } +} + +/** \brief Set the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group) +{ + uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U]; + uint32_t shift = (IRQn % 32U); + + igroupr &= (~(1U << shift)); + igroupr |= ( (group & 1U) << shift); + + GICDistributor->IGROUPR[IRQn / 32U] = igroupr; +} +#define GIC_SetSecurity GIC_SetGroup + +__STATIC_INLINE void GIC_SetRedistGroup(IRQn_Type IRQn, uint32_t group) +{ + GICDistributor_Type *s_RedistPPIBaseAddrs; + uint32_t shift = (IRQn % 32U); + uint32_t igroupr; + + s_RedistPPIBaseAddrs = GIC_GetRdistSGIBase(GIC_GetRdist()); + igroupr = s_RedistPPIBaseAddrs->IGROUPR[IRQn / 32U]; + + igroupr &= (~(1U << shift)); + igroupr |= ( (group & 1U) << shift); + + s_RedistPPIBaseAddrs->IGROUPR[IRQn / 32U] = igroupr; +} +#define GIC_SetSecurity GIC_SetGroup + +/** \brief Get the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn) +{ + return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} +#define GIC_GetSecurity GIC_GetGroup + +/** \brief Initialize the interrupt distributor. +*/ +__STATIC_INLINE void GIC_DistInit(void) +{ + uint32_t i; + uint32_t num_irq = 0U; + uint32_t priority_field; + uint32_t ppi_priority; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableDistributor(); + //Get the maximum number of interrupts that the GIC supports + num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored. + Use PPI, as it is always accessible, even for a Guest OS using a hypervisor. + Then restore the initial state.*/ + ppi_priority = GIC_GetPriority((IRQn_Type)31U); + GIC_SetPriority((IRQn_Type)31U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)31U); + GIC_SetPriority((IRQn_Type)31U, ppi_priority); + + for (i = 32U; i < num_irq; i++) + { + /* Use non secure group1 for all SPI */ + GIC_SetGroup(i, 1); + //Disable the SPI interrupt + GIC_DisableIRQ((IRQn_Type)i); + //Set level-sensitive (and N-N model) + GIC_SetConfiguration((IRQn_Type)i, 0U); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field*2U/3U); + } + + /* Enable distributor with ARE_NS and NS_Group1 */ + GICDistributor->CTLR = ((1U << GICD_CTLR_ARE_NS) | (1U << GICD_CTLR_ENGRP1A)); + GIC_WaitRWP(GICD_RWP); +} + +/** \brief Initialize the interrupt redistributor. +*/ +__STATIC_INLINE void GIC_RedistInit(void) +{ + uint32_t i; + uint32_t priority_field; + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetRedistPriority((IRQn_Type)31U, 0xFFU); + priority_field = GIC_GetRedistPriority((IRQn_Type)31U); + + /* Wakeup the GIC */ + GIC_RedistWakeUp(); + + for (i = 0; i < 32; i++) + { + //Disable the SPI interrupt + GIC_DisableIRQ((IRQn_Type)i); + //Set priority + GIC_SetRedistPriority((IRQn_Type)i, priority_field*2U/3U); + } +} + +#ifdef GICInterface + +/** \brief Enable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_EnableInterface(void) +{ + GICInterface->CTLR |= 1U; //enable interface +} + +/** \brief Disable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_DisableInterface(void) +{ + GICInterface->CTLR &=~1U; //disable distributor +} + +/** \brief Read the CPU's IAR register. +* \return GICInterface_Type::IAR +*/ +__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) +{ + return (IRQn_Type)(GICInterface->IAR); +} + +/** \brief Writes the given interrupt number to the CPU's EOIR register. +* \param [in] IRQn The interrupt to be signaled as finished. +*/ +__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) +{ + GICInterface->EOIR = IRQn; +} + + +/** \brief Set the interrupt priority mask using CPU's PMR register. +* \param [in] priority Priority mask to be set. +*/ +__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) +{ + GICInterface->PMR = priority & 0xFFUL; //set priority mask +} + +/** \brief Read the current interrupt priority mask from CPU's PMR register. +* \result GICInterface_Type::PMR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) +{ + return GICInterface->PMR; +} + +/** \brief Configures the group priority and subpriority split point using CPU's BPR register. +* \param [in] binary_point Amount of bits used as subpriority. +*/ +__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) +{ + GICInterface->BPR = binary_point & 7U; //set binary point +} + +/** \brief Read the current group priority and subpriority split point from CPU's BPR register. +* \return GICInterface_Type::BPR +*/ +__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) +{ + return GICInterface->BPR; +} + +/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. +* \return GICInterface_Type::HPPIR +*/ +__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) +{ + return GICInterface->HPPIR; +} + +/** \brief Provides information about the implementer and revision of the CPU interface. +* \return GICInterface_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfaceId(void) +{ + return GICInterface->IIDR; +} + +#else /* GICInterface */ + +/** \brief Enable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_EnableInterface(void) +{ + __MSR(ICC_IGRPEN1_EL1, 1); +} + +/** \brief Disable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_DisableInterface(void) +{ + __MSR(ICC_IGRPEN1_EL1, 0); +} + +/** \brief Read the CPU's IAR register. +* \return GICInterface_Type::IAR +*/ +__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) +{ + uint32_t result; + __MRS(ICC_IAR1_EL1, &result); + return (IRQn_Type)(result); +} + +/** \brief Writes the given interrupt number to the CPU's EOIR register. +* \param [in] IRQn The interrupt to be signaled as finished. +*/ +__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) +{ + __MSR(ICC_EOIR1_EL1, (uint32_t)IRQn); +} + +/** \brief Set the interrupt priority mask using CPU's PMR register. +* \param [in] priority Priority mask to be set. +*/ +__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) +{ + __MSR(ICC_PMR_EL1, priority & 0xFFUL); +} + +/** \brief Read the current interrupt priority mask from CPU's PMR register. +* \result GICInterface_Type::PMR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) +{ + uint32_t result; + __MRS(ICC_PMR_EL1, &result); + return result; +} + +/** \brief Configures the group priority and subpriority split point using CPU's BPR register. +* \param [in] binary_point Amount of bits used as subpriority. +*/ +__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) +{ + __MSR(ICC_BPR1_EL1, binary_point & 7U); +} + +/** \brief Read the current group priority and subpriority split point from CPU's BPR register. +* \return GICInterface_Type::BPR +*/ +__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) +{ + uint32_t result; + __MRS(ICC_BPR1_EL1, &result); + return result; +} + +/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. +* \return GICInterface_Type::HPPIR +*/ +__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) +{ + uint32_t result; + __MRS(ICC_HPPIR1_EL1, &result); + return result; +} + +#endif + +__STATIC_INLINE void GIC_CPUInterfaceInit(void) +{ + uint32_t i; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableInterface(); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + //SGI and PPI + for (i = 0U; i < 32U; i++) + { + if (i > 15U) { + //Set level-sensitive (and N-N model) for PPI + GIC_SetConfiguration((IRQn_Type)i, 0U); + } + //Disable SGI and PPI interrupts + GIC_DisableIRQ((IRQn_Type)i); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field*2U/3U); + } + + //Set binary point to 0 + GIC_SetBinaryPoint(0U); + //Set priority mask + GIC_SetInterfacePriorityMask(0xFFU); + //Enable interface + GIC_EnableInterface(); +} + +/** \brief Initialize and enable the GIC +*/ +__STATIC_INLINE void GIC_Enable(int init_dist) +{ + /* Only one core should be responsible for the GIC distributor setup */ + if (init_dist) + GIC_DistInit(); + + GIC_RedistInit(); + GIC_CPUInterfaceInit(); //per CPU +} + +#ifdef __cplusplus +} +#endif + +#endif /* __GIC_V3_H */ diff --git a/CMSIS/Core_AArch64/Include/mmu_armv8a.h b/CMSIS/Core_AArch64/Include/mmu_armv8a.h new file mode 100644 index 000000000..6baab1429 --- /dev/null +++ b/CMSIS/Core_AArch64/Include/mmu_armv8a.h @@ -0,0 +1,260 @@ +/**************************************************************************//** + * @file mmu_armv8a.h + * @brief CMSIS Cortex-Axx MMU API header file + * @version V1.0.0 + * @date 20. october 2021 + ******************************************************************************/ +/* + * Copyright 2019 Broadcom + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __MMU_ARMV8A_H +#define __MMU_ARMV8A_H + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +#ifndef KB +#define KB(x) ((x) << 10) +#endif + +#ifndef MB +#define MB(x) ((KB(x)) << 10) +#endif + +#ifndef GB +#define GB(x) ((MB(x)) << 10) +#endif + +/******************************************************************************/ + +/* Following Memory types supported through MAIR encodings can be passed + * by user through "attrs"(attributes) field of specified memory region. + * As MAIR supports such 8 encodings, we will reserve attrs[2:0]; + * so that we can provide encodings upto 7 if needed in future. + */ +#define MT_TYPE_MASK 0x7U +#define MT_TYPE(attr) (attr & MT_TYPE_MASK) +#define MT_DEVICE_nGnRnE 0U +#define MT_DEVICE_nGnRE 1U +#define MT_DEVICE_GRE 2U +#define MT_NORMAL_NC 3U +#define MT_NORMAL 4U +#define MT_NORMAL_WT 5U + +#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_nGnRnE * 8)) | \ + (0x04 << (MT_DEVICE_nGnRE * 8)) | \ + (0x0c << (MT_DEVICE_GRE * 8)) | \ + (0x44 << (MT_NORMAL_NC * 8)) | \ + (0xffUL << (MT_NORMAL * 8)) | \ + (0xbbUL << (MT_NORMAL_WT * 8))) + +/* More flags from user's perpective are supported using remaining bits + * of "attrs" field, i.e. attrs[31:3], underlying code will take care + * of setting PTE fields correctly. + * + * current usage of attrs[31:3] is: + * attrs[3] : Access Permissions + * attrs[4] : Memory access from secure/ns state + * attrs[5] : Execute Permissions privileged mode (PXN) + * attrs[6] : Execute Permissions unprivileged mode (UXN) + * attrs[7] : Mirror RO/RW permissions to EL0 + * attrs[8] : Overwrite existing mapping if any + * + */ +#define MT_PERM_SHIFT 3U +#define MT_SEC_SHIFT 4U +#define MT_P_EXECUTE_SHIFT 5U +#define MT_U_EXECUTE_SHIFT 6U +#define MT_RW_AP_SHIFT 7U +#define MT_NO_OVERWRITE_SHIFT 8U + +#define MT_RO (0U << MT_PERM_SHIFT) +#define MT_RW (1U << MT_PERM_SHIFT) + +#define MT_RW_AP_ELx (1U << MT_RW_AP_SHIFT) +#define MT_RW_AP_EL_HIGHER (0U << MT_RW_AP_SHIFT) + +#define MT_SECURE (0U << MT_SEC_SHIFT) +#define MT_NS (1U << MT_SEC_SHIFT) + +#define MT_P_EXECUTE (0U << MT_P_EXECUTE_SHIFT) +#define MT_P_EXECUTE_NEVER (1U << MT_P_EXECUTE_SHIFT) + +#define MT_U_EXECUTE (0U << MT_U_EXECUTE_SHIFT) +#define MT_U_EXECUTE_NEVER (1U << MT_U_EXECUTE_SHIFT) + +#define MT_NO_OVERWRITE (1U << MT_NO_OVERWRITE_SHIFT) + +#define MT_P_RW_U_RW (MT_RW | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER) +#define MT_P_RW_U_NA (MT_RW | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER) +#define MT_P_RO_U_RO (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER) +#define MT_P_RO_U_NA (MT_RO | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE_NEVER | MT_U_EXECUTE_NEVER) +#define MT_P_RO_U_RX (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE_NEVER | MT_U_EXECUTE) +#define MT_P_RX_U_RX (MT_RO | MT_RW_AP_ELx | MT_P_EXECUTE | MT_U_EXECUTE) +#define MT_P_RX_U_NA (MT_RO | MT_RW_AP_EL_HIGHER | MT_P_EXECUTE | MT_U_EXECUTE_NEVER) + +#ifdef CONFIG_ARMV8_A_NS +#define MT_DEFAULT_SECURE_STATE MT_NS +#else +#define MT_DEFAULT_SECURE_STATE MT_SECURE +#endif +#ifndef CONFIG_ARM64_PA_BITS +#define CONFIG_ARM64_PA_BITS 48 +#endif +#ifndef CONFIG_ARM64_VA_BITS +#define CONFIG_ARM64_VA_BITS 48 +#endif + +/* + * PTE descriptor can be Block descriptor or Table descriptor + * or Page descriptor. + */ +#define PTE_DESC_TYPE_MASK 3U +#define PTE_BLOCK_DESC 1U +#define PTE_TABLE_DESC 3U +#define PTE_PAGE_DESC 3U +#define PTE_INVALID_DESC 0U + +/* + * Block and Page descriptor attributes fields + */ +#define PTE_BLOCK_DESC_MEMTYPE(x) (x << 2) +#define PTE_BLOCK_DESC_NS (1ULL << 5) +#define PTE_BLOCK_DESC_AP_ELx (1ULL << 6) +#define PTE_BLOCK_DESC_AP_EL_HIGHER (0ULL << 6) +#define PTE_BLOCK_DESC_AP_RO (1ULL << 7) +#define PTE_BLOCK_DESC_AP_RW (0ULL << 7) +#define PTE_BLOCK_DESC_NON_SHARE (0ULL << 8) +#define PTE_BLOCK_DESC_OUTER_SHARE (2ULL << 8) +#define PTE_BLOCK_DESC_INNER_SHARE (3ULL << 8) +#define PTE_BLOCK_DESC_AF (1ULL << 10) +#define PTE_BLOCK_DESC_NG (1ULL << 11) +#define PTE_BLOCK_DESC_PXN (1ULL << 53) +#define PTE_BLOCK_DESC_UXN (1ULL << 54) + +/* + * TCR definitions. + */ +#define TCR_EL1_IPS_SHIFT 32U +#define TCR_EL2_PS_SHIFT 16U +#define TCR_EL3_PS_SHIFT 16U + +#define TCR_T0SZ_SHIFT 0U +#define TCR_T0SZ(x) ((64 - (x)) << TCR_T0SZ_SHIFT) + +#define TCR_IRGN_NC (0ULL << 8) +#define TCR_IRGN_WBWA (1ULL << 8) +#define TCR_IRGN_WT (2ULL << 8) +#define TCR_IRGN_WBNWA (3ULL << 8) +#define TCR_IRGN_MASK (3ULL << 8) +#define TCR_ORGN_NC (0ULL << 10) +#define TCR_ORGN_WBWA (1ULL << 10) +#define TCR_ORGN_WT (2ULL << 10) +#define TCR_ORGN_WBNWA (3ULL << 10) +#define TCR_ORGN_MASK (3ULL << 10) +#define TCR_SHARED_NON (0ULL << 12) +#define TCR_SHARED_OUTER (2ULL << 12) +#define TCR_SHARED_INNER (3ULL << 12) +#define TCR_TG0_4K (0ULL << 14) +#define TCR_TG0_64K (1ULL << 14) +#define TCR_TG0_16K (2ULL << 14) +#define TCR_EPD1_DISABLE (1ULL << 23) + +#define TCR_PS_BITS_4GB 0x0ULL +#define TCR_PS_BITS_64GB 0x1ULL +#define TCR_PS_BITS_1TB 0x2ULL +#define TCR_PS_BITS_4TB 0x3ULL +#define TCR_PS_BITS_16TB 0x4ULL +#define TCR_PS_BITS_256TB 0x5ULL + +/* Region definition data structure */ +struct ARM_MMU_region { + /* Region BasePhysical Address */ + uintptr_t base_pa; + /* Region Base Virtual Address */ + uintptr_t base_va; + /* Region size */ + size_t size; + /* Region Name */ + const char *name; + /* Region Attributes */ + uint32_t attrs; +}; + +/* MMU configuration data structure */ +struct ARM_MMU_config { + /* Number of regions */ + unsigned int num_regions; + /* Regions */ + const struct ARM_MMU_region *mmu_regions; + /* Number of OS memory regions */ + unsigned int num_os_ranges; + /* OS memory regions */ + const struct ARM_MMU_flat_range *mmu_os_ranges; +}; + +struct ARM_MMU_flat_range { + char *name; + void *start; + void *end; + uint32_t attrs; +}; + +struct ARM_MMU_ptables { + uint64_t *base_xlat_table; +}; + +/* Convenience macros to represent the ARMv8-A-specific + * configuration for memory access permission and + * cache-ability attribution. + */ + +#define MMU_REGION_ENTRY(_name, _base_pa, _base_va, _size, _attrs) \ + {\ + .name = _name, \ + .base_pa = _base_pa, \ + .base_va = _base_va, \ + .size = _size, \ + .attrs = _attrs, \ + } + +#define MMU_REGION_FLAT_ENTRY(name, adr, sz, attrs) \ + MMU_REGION_ENTRY(name, adr, adr, sz, attrs) + +void ARM_MMU_Initialize(const struct ARM_MMU_config *MMU_config, bool is_primary_core); +int ARM_MMU_AddMap(const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs); +void ARM_MMU_InvalidateTLB(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __MMU_ARMV8A_H */ diff --git a/CMSIS/Core_AArch64/Include/timer_armv8a.h b/CMSIS/Core_AArch64/Include/timer_armv8a.h new file mode 100644 index 000000000..56542483e --- /dev/null +++ b/CMSIS/Core_AArch64/Include/timer_armv8a.h @@ -0,0 +1,145 @@ +/**************************************************************************//** + * @file timer_armv8a.h + * @brief CMSIS Cortex-Axx Generic Timer API header file + * @version V1.0.0 + * @date 05. october 2021 + ******************************************************************************/ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2022 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TIMER_ARMV8A_H +#define __TIMER_ARMV8A_H + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * Timer Data Types + ******************************************************************************/ + +/** \brief ARMv8-A Generic Timer types */ +typedef enum _ARM_TIMER_TYPE { + ARM_TIMER_PHYSICAL, /** Physical Timer */ + ARM_TIMER_VIRTUAL, /** Virtual Timer */ + ARM_TIMER_HYPERVISOR_PHYSICAL, /** Hypervisor Physical Timer */ + ARM_TIMER_PHYSICAL_SECURE, /** Physical Secure Timer */ +} ARM_TIMER_type_t; + + +/******************************************************************************* + * Timer Functions + ******************************************************************************/ + +__STATIC_INLINE void ARM_TIMER_Initialize(ARM_TIMER_type_t timer) +{ +} + +__STATIC_INLINE void ARM_TIMER_GetFreq(uint32_t *pVal) +{ + __MRS(CNTFRQ_EL0, pVal); +} + +__STATIC_INLINE void ARM_TIMER_SetInterval(ARM_TIMER_type_t timer, uint32_t val) +{ + switch (timer) { + case ARM_TIMER_PHYSICAL: + __MSR(CNTP_TVAL_EL0, val); + break; + case ARM_TIMER_VIRTUAL: + __MSR(CNTV_TVAL_EL0, val); + break; + case ARM_TIMER_HYPERVISOR_PHYSICAL: + __MSR(CNTHP_TVAL_EL2, val); + break; + case ARM_TIMER_PHYSICAL_SECURE: + __MSR(CNTPS_TVAL_EL1, val); + break; + default: + break; + } + + __DSB(); + __ISB(); +} + +__STATIC_INLINE void ARM_TIMER_GetCount(ARM_TIMER_type_t timer, uint32_t *val) +{ + switch (timer) { + case ARM_TIMER_PHYSICAL: + __MRS(CNTP_TVAL_EL0, val); + break; + case ARM_TIMER_VIRTUAL: + __MRS(CNTV_TVAL_EL0, val); + break; + case ARM_TIMER_HYPERVISOR_PHYSICAL: + __MRS(CNTHP_TVAL_EL2, val); + break; + case ARM_TIMER_PHYSICAL_SECURE: + __MRS(CNTPS_TVAL_EL1, val); + break; + default: + break; + } +} + +__STATIC_INLINE void ARM_TIMER_Start(ARM_TIMER_type_t timer, bool irq_enable) +{ + uint64_t ctl = 1UL << 0; + + if (!irq_enable) + ctl |= 1UL << 1; + + switch (timer) { + case ARM_TIMER_PHYSICAL: + __MSR(CNTP_CTL_EL0, ctl); + break; + case ARM_TIMER_VIRTUAL: + __MSR(CNTV_CTL_EL0, ctl); + break; + case ARM_TIMER_HYPERVISOR_PHYSICAL: + __MSR(CNTHP_CTL_EL2, ctl); + break; + case ARM_TIMER_PHYSICAL_SECURE: + __MSR(CNTPS_CTL_EL1, ctl); + break; + default: + break; + } +} + +__STATIC_FORCEINLINE void ARM_TIMER_GetCounterCount(ARM_TIMER_type_t timer, uint64_t *val) +{ + switch (timer) { + case ARM_TIMER_PHYSICAL: + __MRS(cntpct_el0, val); + break; + case ARM_TIMER_VIRTUAL: + __MRS(cntvct_el0, val); + break; + default: + break; + } +} + +#ifdef __cplusplus +} +#endif + +#endif /* __TIMER_ARMV8A_H */ diff --git a/CMSIS/Core_AArch64/Source/cache_armv8a.c b/CMSIS/Core_AArch64/Source/cache_armv8a.c new file mode 100644 index 000000000..35c9c47ce --- /dev/null +++ b/CMSIS/Core_AArch64/Source/cache_armv8a.c @@ -0,0 +1,137 @@ +/**************************************************************************//** + * @file cache_armv8a.c + * @brief CMSIS AARCH64 Cache Source file + * @version V1.0.0 + * @date 21. January 2022 + ******************************************************************************/ + +/* + * Copyright 2022-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "core_ca53.h" +#include "cache_armv8a.h" + +#ifndef BIT_MASK +#define BIT_MASK(n) ((1UL << n) - 1UL) +#endif + +/* CLIDR_EL1 */ +#define CLIDR_EL1_LOC_SHIFT 24 +#define CLIDR_EL1_LOC_MASK BIT_MASK(3) +#define CLIDR_EL1_CTYPE_SHIFT(l) ((l) * 3) +#define CLIDR_EL1_CTYPE_MASK BIT_MASK(3) + +/* CCSIDR_EL1 */ +#define CCSIDR_EL1_LN_SZ_SHIFT 0 +#define CCSIDR_EL1_LN_SZ_MASK BIT_MASK(3) +#define CCSIDR_EL1_WAYS_SHIFT 3 +#define CCSIDR_EL1_WAYS_MASK BIT_MASK(10) +#define CCSIDR_EL1_SETS_SHIFT 13 +#define CCSIDR_EL1_SETS_MASK BIT_MASK(15) + +/* CSSELR_EL1 */ +#define CSSELR_EL1_LEVEL_SHIFT 1 +#define CSSELR_EL1_LEVEL_MASK BIT_MASK(3) + +enum cache_ops { + CACHE_OP_C, /* Clean */ + CACHE_OP_I, /* Invalidate */ + CACHE_OP_CI /* Clean and Invalidate */ +}; + +/* + * Operation for all data cache to PoC + * op: CACHE_OP_C: clean + * CACHE_OP_I: invalidate + * CACHE_OP_CI: clean and invalidate + */ +int dcache_all(enum cache_ops op) +{ + uint32_t clidr_el1, csselr_el1, ccsidr_el1; + uint32_t num_ways, num_sets, set, way, operand; + uint8_t loc, cache_type, cache_level, set_shift, way_shift; + + __DSB(); + + __MRS(CLIDR_EL1, &clidr_el1); + + loc = (clidr_el1 >> CLIDR_EL1_LOC_SHIFT) & CLIDR_EL1_LOC_MASK; + if (!loc) + return 0; + + for (cache_level = 0; cache_level < loc; cache_level++) { + cache_type = (clidr_el1 >> CLIDR_EL1_CTYPE_SHIFT(cache_level)) & + CLIDR_EL1_CTYPE_MASK; + /* No Data or Unified cache at this level */ + if (cache_type < 2) + continue; + + /* Select cache level and Data/Unified cache */ + csselr_el1 = (cache_level & CSSELR_EL1_LEVEL_MASK) << + CSSELR_EL1_LEVEL_SHIFT; + __MSR(CSSELR_EL1, csselr_el1); + __ISB(); + + __MRS(CCSIDR_EL1, &ccsidr_el1); + set_shift = ((ccsidr_el1 >> CCSIDR_EL1_LN_SZ_SHIFT) & + CCSIDR_EL1_LN_SZ_MASK) + 4; + num_ways = ((ccsidr_el1 >> CCSIDR_EL1_WAYS_SHIFT) & + CCSIDR_EL1_WAYS_MASK) + 1; + num_sets = ((ccsidr_el1 >> CCSIDR_EL1_SETS_SHIFT) & + CCSIDR_EL1_SETS_MASK) + 1; + /* 32-log2(ways), bit position of way in DC operand */ + way_shift = __CLZ(num_ways - 1); + + for (set = 0; set < num_sets; set++) { + for (way = 0; way < num_ways; way++) { + /* cache level, aligned to pos in DC operand */ + operand = (cache_level << 1); + /* set number, aligned to pos in DC operand */ + operand |= set << set_shift; + /* way number, aligned to pos in DC operand */ + /* No way number field for direct-mapped cache */ + if (way_shift < 32) + operand |= way << way_shift; + + switch (op) { + case CACHE_OP_C: + dcache_ops(csw, operand); + break; + case CACHE_OP_I: + dcache_ops(isw, operand); + break; + case CACHE_OP_CI: + dcache_ops(cisw, operand); + break; + default: + return -1; + } + } + } + } + + __DSB(); + + /* Restore csselr_el1 to level 0 */ + __MSR(CSSELR_EL1, 0); + __ISB(); + + return 0; +} + +void dcache_clean_all(void) +{ + dcache_all(CACHE_OP_C); +} + +void dcache_invalidate_all(void) +{ + dcache_all(CACHE_OP_I); +} +void dcache_clean_invalidate_all(void) +{ + dcache_all(CACHE_OP_CI); +} diff --git a/CMSIS/Core_AArch64/Source/mmu_armv8a.c b/CMSIS/Core_AArch64/Source/mmu_armv8a.c new file mode 100644 index 000000000..6188aa64b --- /dev/null +++ b/CMSIS/Core_AArch64/Source/mmu_armv8a.c @@ -0,0 +1,716 @@ +/**************************************************************************//** + * @file mmu_armv8a.c + * @brief CMSIS Cortex-Axx MMU Source file + * @version V1.0.0 + * @date 20. october 2021 + ******************************************************************************/ +/* + * Copyright 2019 Broadcom + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + * Copyright (c) 2021 Arm Limited. All rights reserved. + * Copyright 2021-2023 NXP + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "core_ca53.h" +#include "mmu_armv8a.h" + + +/******************************************************************************* + * Definitions + ******************************************************************************/ + +#define __ASSERT(op, fmt, ...) \ + do { \ + if (!(op)) { \ + while(1) \ + /* wait here */; \ + } \ + } while (0) + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef CONFIG_MMU_PAGE_SIZE +#define CONFIG_MMU_PAGE_SIZE 4096 +#endif +#ifndef CONFIG_MAX_XLAT_TABLES +#define CONFIG_MAX_XLAT_TABLES 32 +#endif +#ifndef CONFIG_ARM64_PA_BITS +#define CONFIG_ARM64_PA_BITS 48 +#endif +#ifndef CONFIG_ARM64_VA_BITS +#define CONFIG_ARM64_VA_BITS 48 +#endif + +#define LOG_ERR(fmt, ...) (void)(fmt) +#define ARG_UNUSED(x) (void)(x) + +#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__) +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +/******************************************************************************* + * from zephyr:/arch/arm/core/aarch64/mmu/arm_mmu.h: + ******************************************************************************/ + +/* Set below flag to get debug prints */ +//#define MMU_DEBUG_PRINTS 0 + +#if defined (MMU_DEBUG_PRINTS) && (MMU_DEBUG_PRINTS == 1) +/* To dump page table entries while filling them, set DUMP_PTE macro */ +#define DUMP_PTE 0 + #define MMU_DEBUG(fmt, ...) PRINTF(fmt, ##__VA_ARGS__) +#else + #define MMU_DEBUG(...) +#endif + +/* + * 48-bit address with 4KB granule size: + * + * +------------+------------+------------+------------+-----------+ + * | VA [47:39] | VA [38:30] | VA [29:21] | VA [20:12] | VA [11:0] | + * +---------------------------------------------------------------+ + * | L0 | L1 | L2 | L3 | block off | + * +------------+------------+------------+------------+-----------+ + */ + +/* Only 4K granule is supported */ +#define PAGE_SIZE_SHIFT 12U + +/* 48-bit VA address */ +#define VA_SIZE_SHIFT_MAX 48U + +/* Maximum 4 XLAT table levels (L0 - L3) */ +#define XLAT_LAST_LEVEL 3U + +/* The VA shift of L3 depends on the granule size */ +#define L3_XLAT_VA_SIZE_SHIFT PAGE_SIZE_SHIFT + +/* Number of VA bits to assign to each table (9 bits) */ +#define Ln_XLAT_VA_SIZE_SHIFT (PAGE_SIZE_SHIFT - 3) + +/* Starting bit in the VA address for each level */ +#define L2_XLAT_VA_SIZE_SHIFT (L3_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) +#define L1_XLAT_VA_SIZE_SHIFT (L2_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) +#define L0_XLAT_VA_SIZE_SHIFT (L1_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT) + +#define LEVEL_TO_VA_SIZE_SHIFT(level) \ + (PAGE_SIZE_SHIFT + (Ln_XLAT_VA_SIZE_SHIFT * \ + (XLAT_LAST_LEVEL - (level)))) + +/* Number of entries for each table (512) */ +#define Ln_XLAT_NUM_ENTRIES ((1U << PAGE_SIZE_SHIFT) / 8U) + +/* Virtual Address Index within a given translation table level */ +#define XLAT_TABLE_VA_IDX(va_addr, level) \ + ((va_addr >> LEVEL_TO_VA_SIZE_SHIFT(level)) & (Ln_XLAT_NUM_ENTRIES - 1)) + +/* + * Calculate the initial translation table level from CONFIG_ARM64_VA_BITS + * For a 4 KB page size: + * + * (va_bits <= 20) - base level 3 + * (21 <= va_bits <= 29) - base level 2 + * (30 <= va_bits <= 38) - base level 1 + * (39 <= va_bits <= 47) - base level 0 + */ +#define GET_BASE_XLAT_LEVEL(va_bits) \ + ((va_bits > L0_XLAT_VA_SIZE_SHIFT) ? 0U \ + : (va_bits > L1_XLAT_VA_SIZE_SHIFT) ? 1U \ + : (va_bits > L2_XLAT_VA_SIZE_SHIFT) ? 2U : 3U) + +/* Level for the base XLAT */ +#define BASE_XLAT_LEVEL GET_BASE_XLAT_LEVEL(CONFIG_ARM64_VA_BITS) + +#if (CONFIG_ARM64_PA_BITS == 48) +#define TCR_PS_BITS TCR_PS_BITS_256TB +#elif (CONFIG_ARM64_PA_BITS == 44) +#define TCR_PS_BITS TCR_PS_BITS_16TB +#elif (CONFIG_ARM64_PA_BITS == 42) +#define TCR_PS_BITS TCR_PS_BITS_4TB +#elif (CONFIG_ARM64_PA_BITS == 40) +#define TCR_PS_BITS TCR_PS_BITS_1TB +#elif (CONFIG_ARM64_PA_BITS == 36) +#define TCR_PS_BITS TCR_PS_BITS_64GB +#else +#define TCR_PS_BITS TCR_PS_BITS_4GB +#endif + +/* Upper and lower attributes mask for page/block descriptor */ +#define DESC_ATTRS_UPPER_MASK GENMASK(63, 51) +#define DESC_ATTRS_LOWER_MASK GENMASK(11, 2) + +#define DESC_ATTRS_MASK (DESC_ATTRS_UPPER_MASK | DESC_ATTRS_LOWER_MASK) + +/******************************************************************************/ + +static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES] + __aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t)); +static uint16_t xlat_use_count[CONFIG_MAX_XLAT_TABLES]; + +/* Returns a reference to a free table */ +static uint64_t *new_table(void) +{ + unsigned int i; + + /* Look for a free table. */ + for (i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) { + if (xlat_use_count[i] == 0) { + xlat_use_count[i] = 1; + return &xlat_tables[i * Ln_XLAT_NUM_ENTRIES]; + } + } + + LOG_ERR("CONFIG_MAX_XLAT_TABLES, too small"); + return NULL; +} + +static inline unsigned int table_index(uint64_t *pte) +{ + unsigned int i = (pte - xlat_tables) / Ln_XLAT_NUM_ENTRIES; + + __ASSERT(i < CONFIG_MAX_XLAT_TABLES, "table %p out of range", pte); + return i; +} + +/* Makes a table free for reuse. */ +static void free_table(uint64_t *table) +{ + unsigned int i = table_index(table); + + MMU_DEBUG("freeing table [%d]%p\r\n", i, table); + __ASSERT(xlat_use_count[i] == 1, "table still in use"); + xlat_use_count[i] = 0; +} + +/* Adjusts usage count and returns current count. */ +static int table_usage(uint64_t *table, int adjustment) +{ + unsigned int i = table_index(table); + + xlat_use_count[i] += adjustment; + __ASSERT(xlat_use_count[i] > 0, "usage count underflow"); + return xlat_use_count[i]; +} + +static inline bool is_table_unused(uint64_t *table) +{ + return table_usage(table, 0) == 1; +} + +static inline bool is_free_desc(uint64_t desc) +{ + return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC; +} + +static inline bool is_table_desc(uint64_t desc, unsigned int level) +{ + return level != XLAT_LAST_LEVEL && + (desc & PTE_DESC_TYPE_MASK) == PTE_TABLE_DESC; +} + +static inline bool is_block_desc(uint64_t desc) +{ + return (desc & PTE_DESC_TYPE_MASK) == PTE_BLOCK_DESC; +} + +static inline uint64_t *pte_desc_table(uint64_t desc) +{ + uint64_t address = desc & GENMASK(47, PAGE_SIZE_SHIFT); + + return (uint64_t *)address; +} + +static inline bool is_desc_superset(uint64_t desc1, uint64_t desc2, + unsigned int level) +{ + uint64_t mask = DESC_ATTRS_MASK | GENMASK(47, LEVEL_TO_VA_SIZE_SHIFT(level)); + + return (desc1 & mask) == (desc2 & mask); +} + +#if DUMP_PTE +static void debug_show_pte(uint64_t *pte, unsigned int level) +{ + MMU_DEBUG("%.*s", level * 2, ". . . "); + MMU_DEBUG("[%d]%p: ", table_index(pte), pte); + + if (is_free_desc(*pte)) { + MMU_DEBUG("---\r\n"); + return; + } + + if (is_table_desc(*pte, level)) { + uint64_t *table = pte_desc_table(*pte); + + MMU_DEBUG("[Table] [%d]%p\r\n", table_index(table), table); + return; + } + + if (is_block_desc(*pte)) { + MMU_DEBUG("[Block] "); + } else { + MMU_DEBUG("[Page] "); + } + + uint8_t mem_type = (*pte >> 2) & MT_TYPE_MASK; + + MMU_DEBUG((mem_type == MT_NORMAL) ? "MEM" : + ((mem_type == MT_NORMAL_NC) ? "NC" : "DEV")); + MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_RO) ? "-RO" : "-RW"); + MMU_DEBUG((*pte & PTE_BLOCK_DESC_NS) ? "-NS" : "-S"); + MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh"); + MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX"); + MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX"); + MMU_DEBUG("\r\n"); +} +#else +static inline void debug_show_pte(uint64_t *pte, unsigned int level) { } +#endif + +static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int level) +{ + /* Point pte to new table */ + *pte = PTE_TABLE_DESC | (uint64_t)table; + debug_show_pte(pte, level); +} + +static void set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level) +{ + if (desc) { + desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC; + } + *pte = desc; + debug_show_pte(pte, level); +} + +static uint64_t *expand_to_table(uint64_t *pte, unsigned int level) +{ + uint64_t *table; + + __ASSERT(level < XLAT_LAST_LEVEL, "can't expand last level"); + + table = new_table(); + if (!table) { + return NULL; + } + + if (!is_free_desc(*pte)) { + /* + * If entry at current level was already populated + * then we need to reflect that in the new table. + */ + uint64_t desc = *pte; + unsigned int i, stride_shift; + + MMU_DEBUG("expanding PTE 0x%016llx into table [%d]%p\r\n", + desc, table_index(table), table); + __ASSERT(is_block_desc(desc), ""); + + if (level + 1 == XLAT_LAST_LEVEL) { + desc |= PTE_PAGE_DESC; + } + + stride_shift = LEVEL_TO_VA_SIZE_SHIFT(level + 1); + for (i = 0; i < Ln_XLAT_NUM_ENTRIES; i++) { + table[i] = desc | (i << stride_shift); + } + table_usage(table, Ln_XLAT_NUM_ENTRIES); + } else { + /* + * Adjust usage count for parent table's entry + * that will no longer be free. + */ + table_usage(pte, 1); + } + + /* Link the new table in place of the pte it replaces */ + set_pte_table_desc(pte, table, level); + table_usage(table, 1); + + return table; +} + +static int set_mapping(struct ARM_MMU_ptables *ptables, + uintptr_t virt, size_t size, + uint64_t desc, bool may_overwrite) +{ + uint64_t *pte, *ptes[XLAT_LAST_LEVEL + 1]; + uint64_t level_size; + uint64_t *table = ptables->base_xlat_table; + unsigned int level = BASE_XLAT_LEVEL; + int ret = 0; + + while (size) { + __ASSERT(level <= XLAT_LAST_LEVEL, + "max translation table level exceeded\r\n"); + + /* Locate PTE for given virtual address and page table level */ + pte = &table[XLAT_TABLE_VA_IDX(virt, level)]; + ptes[level] = pte; + + if (is_table_desc(*pte, level)) { + /* Move to the next translation table level */ + level++; + table = pte_desc_table(*pte); + continue; + } + + if (!may_overwrite && !is_free_desc(*pte)) { + /* the entry is already allocated */ + LOG_ERR("entry already in use: " + "level %d pte %p *pte 0x%016llx", + level, pte, *pte); + ret = -1; + break; + } + + level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); + + if (is_desc_superset(*pte, desc, level)) { + /* This block already covers our range */ + level_size -= (virt & (level_size - 1)); + if (level_size > size) { + level_size = size; + } + goto move_on; + } + + if ((size < level_size) || (virt & (level_size - 1))) { + /* Range doesn't fit, create subtable */ + table = expand_to_table(pte, level); + if (!table) { + ret = -1; + break; + } + level++; + continue; + } + + /* Adjust usage count for corresponding table */ + if (is_free_desc(*pte)) { + table_usage(pte, 1); + } + if (!desc) { + table_usage(pte, -1); + } + /* Create (or erase) block/page descriptor */ + set_pte_block_desc(pte, desc, level); + + /* recursively free unused tables if any */ + while (level != BASE_XLAT_LEVEL && + is_table_unused(pte)) { + free_table(pte); + pte = ptes[--level]; + set_pte_block_desc(pte, 0, level); + table_usage(pte, -1); + } + +move_on: + virt += level_size; + desc += desc ? level_size : 0; + size -= level_size; + + /* Range is mapped, start again for next range */ + table = ptables->base_xlat_table; + level = BASE_XLAT_LEVEL; + } + + return ret; +} + +static uint64_t get_region_desc(uint32_t attrs) +{ + unsigned int mem_type; + uint64_t desc = 0; + + /* NS bit for security memory access from secure state */ + desc |= (attrs & MT_NS) ? PTE_BLOCK_DESC_NS : 0; + + /* + * AP bits for EL0 / ELh Data access permission + * + * AP[2:1] ELh EL0 + * +--------------------+ + * 00 RW NA + * 01 RW RW + * 10 RO NA + * 11 RO RO + */ + + /* AP bits for Data access permission */ + desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO; + + /* Mirror permissions to EL0 */ + desc |= (attrs & MT_RW_AP_ELx) ? + PTE_BLOCK_DESC_AP_ELx : PTE_BLOCK_DESC_AP_EL_HIGHER; + + /* the access flag */ + desc |= PTE_BLOCK_DESC_AF; + + /* memory attribute index field */ + mem_type = MT_TYPE(attrs); + desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type); + + switch (mem_type) { + case MT_DEVICE_nGnRnE: + case MT_DEVICE_nGnRE: + case MT_DEVICE_GRE: + /* Access to Device memory and non-cacheable memory are coherent + * for all observers in the system and are treated as + * Outer shareable, so, for these 2 types of memory, + * it is not strictly needed to set shareability field + */ + desc |= PTE_BLOCK_DESC_OUTER_SHARE; + /* Map device memory as execute-never */ + desc |= PTE_BLOCK_DESC_PXN; + desc |= PTE_BLOCK_DESC_UXN; + break; + case MT_NORMAL_NC: + case MT_NORMAL: + /* Make Normal RW memory as execute never */ + if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER)) + desc |= PTE_BLOCK_DESC_PXN; + + if (((attrs & MT_RW) && (attrs & MT_RW_AP_ELx)) || + (attrs & MT_U_EXECUTE_NEVER)) + desc |= PTE_BLOCK_DESC_UXN; + + if (mem_type == MT_NORMAL) + desc |= PTE_BLOCK_DESC_INNER_SHARE; + else + desc |= PTE_BLOCK_DESC_OUTER_SHARE; + break; + default: + break; + } + + return desc; +} + +static int add_map(struct ARM_MMU_ptables *ptables, const char *name, + uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) +{ + uint64_t desc = get_region_desc(attrs); + bool may_overwrite = !(attrs & MT_NO_OVERWRITE); + + MMU_DEBUG("mmap [%s]: virt %lx phys %lx size %lx attr %llx\r\n", + name, virt, phys, size, desc); + __ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, + "address/size are not page aligned\r\n"); + desc |= phys; + return set_mapping(ptables, virt, size, desc, may_overwrite); +} + +/* OS execution regions with appropriate attributes */ + +static inline void add_ARM_MMU_flat_range(struct ARM_MMU_ptables *ptables, + const struct ARM_MMU_flat_range *range, + uint32_t extra_flags) +{ + uintptr_t address = (uintptr_t)range->start; + size_t size = (uintptr_t)range->end - address; + + if (size) { + add_map(ptables, range->name, address, address, + size, range->attrs | extra_flags); + } +} + +static inline void add_ARM_MMU_region(struct ARM_MMU_ptables *ptables, + const struct ARM_MMU_region *region, + uint32_t extra_flags) +{ + if (region->size || region->attrs) { + add_map(ptables, region->name, region->base_pa, region->base_va, + region->size, region->attrs | extra_flags); + } +} + +static void setup_page_tables(const struct ARM_MMU_config *MMU_config, + struct ARM_MMU_ptables *ptables) +{ + unsigned int index; + const struct ARM_MMU_flat_range *range; + const struct ARM_MMU_region *region; + uintptr_t max_va = 0, max_pa = 0; + + MMU_DEBUG("xlat tables:\r\n"); + for (index = 0; index < CONFIG_MAX_XLAT_TABLES; index++) + MMU_DEBUG("%d: %p\r\n", index, xlat_tables + index * Ln_XLAT_NUM_ENTRIES); + + for (index = 0; index < MMU_config->num_regions; index++) { + region = &MMU_config->mmu_regions[index]; + max_va = MAX(max_va, region->base_va + region->size); + max_pa = MAX(max_pa, region->base_pa + region->size); + } + + __ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS), + "Maximum VA not supported\r\n"); + __ASSERT(max_pa <= (1ULL << CONFIG_ARM64_PA_BITS), + "Maximum PA not supported\r\n"); + + /* setup translation table for OS execution regions */ + for (index = 0; index < MMU_config->num_os_ranges; index++) { + range = &MMU_config->mmu_os_ranges[index]; + add_ARM_MMU_flat_range(ptables, range, 0); + } + + /* + * Create translation tables for user provided platform regions. + * Those must not conflict with our default mapping. + */ + for (index = 0; index < MMU_config->num_regions; index++) { + region = &MMU_config->mmu_regions[index]; + add_ARM_MMU_region(ptables, region, MT_NO_OVERWRITE); + } + + ARM_MMU_InvalidateTLB(); +} + +/* Translation table control register settings */ +static uint64_t get_tcr(int el) +{ + uint64_t tcr; + uint64_t va_bits = CONFIG_ARM64_VA_BITS; + uint64_t tcr_ps_bits; + + tcr_ps_bits = TCR_PS_BITS; + + if (el == 1) { + tcr = (tcr_ps_bits << TCR_EL1_IPS_SHIFT); + /* + * TCR_EL1.EPD1: Disable translation table walk for addresses + * that are translated using TTBR1_EL1. + */ + tcr |= TCR_EPD1_DISABLE; + } else + tcr = (tcr_ps_bits << TCR_EL3_PS_SHIFT); + + tcr |= TCR_T0SZ(va_bits); + /* + * Translation table walk is cacheable, inner/outer WBWA + */ + tcr |= TCR_TG0_4K | TCR_ORGN_WBWA | TCR_IRGN_WBWA; + + return tcr; +} + +static void enable_mmu_el1(struct ARM_MMU_ptables *ptables, unsigned int flags) +{ + ARG_UNUSED(flags); + uint64_t val; + + /* Set MAIR, TCR and TBBR registers */ + __MSR(MAIR_EL1, MEMORY_ATTRIBUTES); + __MSR(TCR_EL1, get_tcr(1)); + __MSR(TTBR0_EL1, (uint64_t)ptables->base_xlat_table); + + /* Ensure these changes are seen before MMU is enabled */ + __ISB(); + + /* Enable the MMU and caches */ + __MRS(SCTLR_EL1, &val); + __MSR(SCTLR_EL1, val | SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT); + + /* Ensure the MMU enable takes effect immediately */ + __ISB(); + + MMU_DEBUG("MMU enabled with caches\r\n"); +} + +/* ARM MMU Driver Initial Setup */ + +static struct ARM_MMU_ptables kernel_ptables; + +/* + * @brief MMU default configuration + * + * This function provides the default configuration mechanism for the Memory + * Management Unit (MMU). + */ +void ARM_MMU_Initialize(const struct ARM_MMU_config *MMU_config, + bool is_primary_core) +{ + unsigned int flags = 0; + uint64_t val; + + __ASSERT(CONFIG_MMU_PAGE_SIZE == KB(4), + "Only 4K page size is supported\r\n"); + + __MRS(CURRENTEL, &val); + __ASSERT(GET_EL(val) == MODE_EL1, + "Exception level not EL1, MMU not enabled!\r\n"); + + /* Ensure that MMU is already not enabled */ + __MRS(SCTLR_EL1, &val); + __ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\r\n"); + + /* + * Only booting core setup up the page tables. + */ + if (is_primary_core) { + kernel_ptables.base_xlat_table = new_table(); + setup_page_tables(MMU_config, &kernel_ptables); + } + + /* currently only EL1 is supported */ + enable_mmu_el1(&kernel_ptables, flags); +} + +/* + * @brief MMU mapping setup + * + * This function sets a new MMU region mapping + */ +int ARM_MMU_AddMap(const char *name, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) +{ + int ret = -1; + + if ((virt + size) > (1ULL << CONFIG_ARM64_VA_BITS)) + goto exit; + + if ((phys + size) > (1ULL << CONFIG_ARM64_PA_BITS)) + goto exit; + + if (size) { + ret = add_map(&kernel_ptables, name, phys, virt, size, attrs); + + ARM_MMU_InvalidateTLB(); + } + +exit: + return ret; +} + +/* + * @brief MMU TLB invalidation + * + * This function invalidates the entire unified TLB + */ +void ARM_MMU_InvalidateTLB(void) +{ + __DSB(); + __ASM volatile("tlbi vmalle1"); + __DSB(); + __ISB(); +}