From 95ed80c83998ce72a88720577feb645f7ad42e2a Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Wed, 13 Oct 2021 14:33:15 -0500 Subject: [PATCH 01/96] Add detection for EDMM support in kernel Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/urts/linux/edmm_utility.cpp | 15 ++++++++++++--- psw/urts/linux/enclave_creator_hw.cpp | 1 - 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/psw/urts/linux/edmm_utility.cpp b/psw/urts/linux/edmm_utility.cpp index 69e701a6b..08494f6c1 100644 --- a/psw/urts/linux/edmm_utility.cpp +++ b/psw/urts/linux/edmm_utility.cpp @@ -41,7 +41,7 @@ #include #include #include - +#include #define SGX_URTS_CMD "for f in $(find /usr/$(basename $(gcc -print-multi-os-directory)) -name 'libsgx_urts.so' 2> /dev/null); do strings $f|grep 'SGX_URTS_VERSION_2'; done" #define SGX_CPUID 0x12 @@ -236,8 +236,17 @@ extern "C" bool is_cpu_support_edmm() */ extern "C" bool is_driver_support_edmm(int hdevice) { - if (-1 == hdevice) - return false; + if (-1 == hdevice){ + if(!open_se_device(SGX_DRIVER_IN_KERNEL, &hdevice)) + return false; + struct sgx_page_modp ioc; + memset(&ioc, 0, sizeof(ioc)); + + int ret = ioctl(hdevice, SGX_IOC_PAGE_MODP, &ioc); + bool supported = ret != -1 || (errno != ENOTTY); + close_se_device(&hdevice); + return supported; + } sgx_modification_param param; param.flags = 0; diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index d956e8181..9b2f70f56 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -397,7 +397,6 @@ int EnclaveCreatorHW::remove_range(uint64_t fromaddr, uint64_t numpages) return SGX_SUCCESS; } - //EDMM is supported if and only if all of the following requirements are met: //1. We operate in HW mode //2. CPU has EDMM support From 9da656c4a108fb97b6a8dd1aa831f6a473be533c Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 24 Aug 2021 00:24:36 -0500 Subject: [PATCH 02/96] EDMM support using kernel interfaces Add a runtime agnostic enclave memory manager implementation in sdk/emm. The EMM APIs are based on design proposed in this PR: https://github.com/openenclave/openenclave/pull/3991 Enclave common loader changes are in psw/enclave_common/sgx_mm_ocalls.cpp, which are basically OCall support needed for EMM. Intel SDK runtime specific support are implemented as runtime abstraction layer: sdk/trts/ema_rt.c Current limitations, steps to build and test are documented in sdk/emm/README.md Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Signed-off-by: Xiaofeng Xu <20158212+xxu36@users.noreply.github.com> --- .../SampleEnclave/Enclave/Enclave.config.xml | 20 +- .../SampleEnclave/Enclave/config.01.xml | 2 +- .../SampleEnclave/Enclave/config.02.xml | 2 +- .../SampleEnclave/Enclave/config.03.xml | 2 +- .../SampleEnclave/Enclave/config.04.xml | 2 +- common/inc/internal/arch.h | 11 + common/inc/internal/bit_array.h | 1 + common/inc/internal/ema.h | 1 + common/inc/internal/emm_private.h | 1 + common/inc/internal/enclave_creator.h | 2 +- common/inc/internal/inst.h | 4 +- common/inc/internal/linux/linux-regs.h | 1 + common/inc/internal/rts.h | 6 +- common/inc/internal/sgx_mm_rt_abstraction.h | 1 + common/inc/internal/trts_inst.h | 6 +- common/inc/sgx_mm.h | 1 + common/inc/sgx_mm_primitives.h | 1 + common/inc/sgx_mm_rt_abstraction.h | 1 + common/inc/sgx_trts_exception.h | 9 + psw/enclave_common/Makefile | 3 +- psw/enclave_common/sgx_enclave_common.cpp | 5 +- psw/enclave_common/sgx_enclave_common.h | 58 + psw/enclave_common/sgx_enclave_common.lds | 2 + psw/enclave_common/sgx_mm_ocalls.cpp | 392 ++++++ psw/urts/enclave.cpp | 10 +- psw/urts/enclave_creator_hw.h | 2 +- psw/urts/linux/Makefile | 2 + psw/urts/linux/enclave_creator_hw.cpp | 78 +- psw/urts/linux/isgx_user.h | 68 +- .../urts/linux/urts_emm.cpp | 71 +- .../trts_trim.h => psw/urts/linux/urts_emm.h | 17 +- psw/urts/loader.cpp | 21 +- psw/urts/urts_com.h | 10 +- sdk/Makefile.source | 7 +- sdk/emm/Makefile | 67 + sdk/emm/README.md | 108 ++ sdk/emm/api_tests/App/App.cpp | 419 ++++++ sdk/emm/api_tests/App/App.h | 74 ++ sdk/emm/api_tests/App/sgx.h | 228 ++++ sdk/emm/api_tests/Enclave/Enclave.cpp | 708 ++++++++++ sdk/emm/api_tests/Enclave/Enclave.edl | 45 + sdk/emm/api_tests/Enclave/Enclave.h | 48 + sdk/emm/api_tests/Enclave/Enclave.lds | 11 + .../Enclave/Enclave_private_test.pem | 39 + sdk/emm/api_tests/Enclave/config.xml | 23 + sdk/emm/api_tests/Makefile | 265 ++++ sdk/emm/api_tests/tcs.h | 61 + sdk/emm/api_tests/test_loop.sh | 17 + sdk/emm/bit_array.c | 491 +++++++ sdk/emm/ema.c | 1177 +++++++++++++++++ sdk/emm/emm_private.c | 128 ++ sdk/emm/include/bit_array.h | 117 ++ sdk/emm/include/ema.h | 130 ++ sdk/emm/include/emm_private.h | 86 ++ sdk/emm/include/sgx_mm.h | 285 ++++ sdk/emm/include/sgx_mm_primitives.h | 62 + sdk/emm/include/sgx_mm_rt_abstraction.h | 160 +++ sdk/emm/sgx_mm.c | 442 +++++++ sdk/emm/sgx_primitives.S | 86 ++ sdk/emm/ut/Makefile | 55 + sdk/{trts/trts_trim.cpp => emm/ut/stub.c} | 98 +- sdk/emm/ut/test_bit_array.c | 93 ++ sdk/emm/ut/test_ema.c | 192 +++ sdk/emm/ut/test_emm.c | 44 + sdk/emm/ut/test_public.c | 43 + .../SignTool/enclave_creator_sign.cpp | 17 +- sdk/sign_tool/SignTool/enclave_creator_sign.h | 3 +- sdk/simulation/trtssim/linux/Makefile | 3 +- .../urtssim/enclave_creator_sim.cpp | 9 +- sdk/simulation/urtssim/enclave_creator_sim.h | 1 + sdk/simulation/urtssim/linux/Makefile | 1 + sdk/tlibc/gen/sbrk.c | 11 +- sdk/tmm_rsrv/sgx_rsrv_mem.cpp | 34 +- sdk/tmm_rsrv/sgx_rsrv_mem_init.cpp | 1 - sdk/trts/Makefile | 12 +- sdk/trts/ema_init.cpp | 173 +++ sdk/trts/ema_rt.c | 194 +++ sdk/trts/init_enclave.cpp | 35 +- sdk/trts/linux/Makefile | 16 +- sdk/trts/linux/elf_parser.c | 46 +- sdk/trts/linux/trts_pic.S | 8 + sdk/trts/trts_add_trim.cpp | 213 +-- sdk/trts/trts_ecall.cpp | 44 +- sdk/trts/{trts_emodpr.h => trts_emm.h} | 8 +- sdk/trts/trts_shared_constants.h | 2 +- sdk/trts/trts_veh.cpp | 29 +- 86 files changed, 6979 insertions(+), 503 deletions(-) create mode 120000 common/inc/internal/bit_array.h create mode 120000 common/inc/internal/ema.h create mode 120000 common/inc/internal/emm_private.h create mode 120000 common/inc/internal/sgx_mm_rt_abstraction.h create mode 120000 common/inc/sgx_mm.h create mode 120000 common/inc/sgx_mm_primitives.h create mode 120000 common/inc/sgx_mm_rt_abstraction.h create mode 100644 psw/enclave_common/sgx_mm_ocalls.cpp rename sdk/trts/trts_emodpr.cpp => psw/urts/linux/urts_emm.cpp (59%) rename sdk/trts/trts_trim.h => psw/urts/linux/urts_emm.h (87%) create mode 100644 sdk/emm/Makefile create mode 100644 sdk/emm/README.md create mode 100644 sdk/emm/api_tests/App/App.cpp create mode 100644 sdk/emm/api_tests/App/App.h create mode 100644 sdk/emm/api_tests/App/sgx.h create mode 100644 sdk/emm/api_tests/Enclave/Enclave.cpp create mode 100644 sdk/emm/api_tests/Enclave/Enclave.edl create mode 100644 sdk/emm/api_tests/Enclave/Enclave.h create mode 100644 sdk/emm/api_tests/Enclave/Enclave.lds create mode 100644 sdk/emm/api_tests/Enclave/Enclave_private_test.pem create mode 100644 sdk/emm/api_tests/Enclave/config.xml create mode 100644 sdk/emm/api_tests/Makefile create mode 100644 sdk/emm/api_tests/tcs.h create mode 100755 sdk/emm/api_tests/test_loop.sh create mode 100644 sdk/emm/bit_array.c create mode 100644 sdk/emm/ema.c create mode 100644 sdk/emm/emm_private.c create mode 100644 sdk/emm/include/bit_array.h create mode 100644 sdk/emm/include/ema.h create mode 100644 sdk/emm/include/emm_private.h create mode 100644 sdk/emm/include/sgx_mm.h create mode 100644 sdk/emm/include/sgx_mm_primitives.h create mode 100644 sdk/emm/include/sgx_mm_rt_abstraction.h create mode 100644 sdk/emm/sgx_mm.c create mode 100644 sdk/emm/sgx_primitives.S create mode 100644 sdk/emm/ut/Makefile rename sdk/{trts/trts_trim.cpp => emm/ut/stub.c} (56%) create mode 100644 sdk/emm/ut/test_bit_array.c create mode 100644 sdk/emm/ut/test_ema.c create mode 100644 sdk/emm/ut/test_emm.c create mode 100644 sdk/emm/ut/test_public.c create mode 100644 sdk/trts/ema_init.cpp create mode 100644 sdk/trts/ema_rt.c rename sdk/trts/{trts_emodpr.h => trts_emm.h} (90%) diff --git a/SampleCode/SampleEnclave/Enclave/Enclave.config.xml b/SampleCode/SampleEnclave/Enclave/Enclave.config.xml index e94c9bc50..4a7edb0c7 100644 --- a/SampleCode/SampleEnclave/Enclave/Enclave.config.xml +++ b/SampleCode/SampleEnclave/Enclave/Enclave.config.xml @@ -1,12 +1,24 @@ 0 0 - 0x40000 - 0x100000 - 10 + 1 + 5 1 + 13 + + + + 0x4000 + 0x2000 + 0x900000 + 0x90000 + 0x0022000 + 0 - 0 + 1 0xFFFFFFFF diff --git a/SampleCode/SampleEnclave/Enclave/config.01.xml b/SampleCode/SampleEnclave/Enclave/config.01.xml index ca652963f..2e1354499 100644 --- a/SampleCode/SampleEnclave/Enclave/config.01.xml +++ b/SampleCode/SampleEnclave/Enclave/config.01.xml @@ -23,6 +23,6 @@ 0 - 0 + 1 0xFFFFFFFF diff --git a/SampleCode/SampleEnclave/Enclave/config.02.xml b/SampleCode/SampleEnclave/Enclave/config.02.xml index 126eac07b..59b267388 100644 --- a/SampleCode/SampleEnclave/Enclave/config.02.xml +++ b/SampleCode/SampleEnclave/Enclave/config.02.xml @@ -20,6 +20,6 @@ 1 0 - 0 + 1 0xFFFFFFFF diff --git a/SampleCode/SampleEnclave/Enclave/config.03.xml b/SampleCode/SampleEnclave/Enclave/config.03.xml index ef642799b..3ccf61abc 100644 --- a/SampleCode/SampleEnclave/Enclave/config.03.xml +++ b/SampleCode/SampleEnclave/Enclave/config.03.xml @@ -22,6 +22,6 @@ 0 - 0 + 1 0xFFFFFFFF diff --git a/SampleCode/SampleEnclave/Enclave/config.04.xml b/SampleCode/SampleEnclave/Enclave/config.04.xml index a72d611b5..028445402 100644 --- a/SampleCode/SampleEnclave/Enclave/config.04.xml +++ b/SampleCode/SampleEnclave/Enclave/config.04.xml @@ -18,6 +18,6 @@ 0 - 0 + 1 0xFFFFFFFF diff --git a/common/inc/internal/arch.h b/common/inc/internal/arch.h index 54f31ded6..8cfa683e6 100644 --- a/common/inc/internal/arch.h +++ b/common/inc/internal/arch.h @@ -124,6 +124,8 @@ typedef struct _exit_info_t #define SE_VECTOR_BP 3 #define SE_VECTOR_BR 5 #define SE_VECTOR_UD 6 +#define SE_VECTOR_GP 13 +#define SE_VECTOR_PF 14 #define SE_VECTOR_MF 16 #define SE_VECTOR_AC 17 #define SE_VECTOR_XM 19 @@ -156,6 +158,15 @@ typedef struct _ssa_gpr_t uint64_t gs; /* (176) GS register */ } ssa_gpr_t; +typedef struct _misc_exinfo +{ + uint64_t maddr; // address for #PF, #GP. + uint32_t errcd; + uint32_t reserved; +} misc_exinfo_t; + +#define MISC_BYTE_SIZE sizeof(misc_exinfo_t) + typedef uint64_t si_flags_t; #define SI_FLAG_NONE 0x0 diff --git a/common/inc/internal/bit_array.h b/common/inc/internal/bit_array.h new file mode 120000 index 000000000..018139ddf --- /dev/null +++ b/common/inc/internal/bit_array.h @@ -0,0 +1 @@ +../../../sdk/emm/include/bit_array.h \ No newline at end of file diff --git a/common/inc/internal/ema.h b/common/inc/internal/ema.h new file mode 120000 index 000000000..7163f2344 --- /dev/null +++ b/common/inc/internal/ema.h @@ -0,0 +1 @@ +../../../sdk/emm/include/ema.h \ No newline at end of file diff --git a/common/inc/internal/emm_private.h b/common/inc/internal/emm_private.h new file mode 120000 index 000000000..25af68953 --- /dev/null +++ b/common/inc/internal/emm_private.h @@ -0,0 +1 @@ +../../../sdk/emm/include/emm_private.h \ No newline at end of file diff --git a/common/inc/internal/enclave_creator.h b/common/inc/internal/enclave_creator.h index 4b5328055..8fde80547 100644 --- a/common/inc/internal/enclave_creator.h +++ b/common/inc/internal/enclave_creator.h @@ -81,10 +81,10 @@ class EnclaveCreator : private Uncopyable virtual uint32_t handle_page_fault(uint64_t pf_address) { UNUSED(pf_address); return (uint32_t)SGX_ERROR_UNEXPECTED; } #endif virtual int emodpr(uint64_t addr, uint64_t size, uint64_t flag) = 0; + virtual int alloc(uint64_t addr, uint64_t size, int flag) = 0; virtual int mktcs(uint64_t tcs_addr) = 0; virtual int trim_range(uint64_t fromaddr, uint64_t toaddr) = 0; virtual int trim_accept(uint64_t addr) = 0; - virtual int remove_range(uint64_t fromaddr, uint64_t numpages) = 0; // destructor virtual ~EnclaveCreator() {}; diff --git a/common/inc/internal/inst.h b/common/inc/internal/inst.h index 4f2b35756..aa22de72f 100644 --- a/common/inc/internal/inst.h +++ b/common/inc/internal/inst.h @@ -46,7 +46,9 @@ typedef enum { SE_ERESUME, SE_EEXIT, SE_EACCEPT, - SE_EVERIFYREPORT2 = 0x8, + SE_EMODPE, + SE_EACCEPTCOPY, + SE_EVERIFYREPORT2, SE_LAST_RING3, SE_ECREATE = 0x0, diff --git a/common/inc/internal/linux/linux-regs.h b/common/inc/internal/linux/linux-regs.h index 48a206d3b..8741edc2e 100644 --- a/common/inc/internal/linux/linux-regs.h +++ b/common/inc/internal/linux/linux-regs.h @@ -88,6 +88,7 @@ #define SE_EEXIT 4 #define SE_EACCEPT 5 #define SE_EMODPE 6 +#define SE_EACCEPTCOPY 7 #define SE_EVERIFYREPORT2 8 diff --git a/common/inc/internal/rts.h b/common/inc/internal/rts.h index e94ad56be..308baaab2 100644 --- a/common/inc/internal/rts.h +++ b/common/inc/internal/rts.h @@ -95,6 +95,8 @@ typedef struct _system_features #define BUILTIN_OCALL_2 -3 #define BUILTIN_OCALL_3 -4 #define BUILTIN_OCALL_4 -5 +#define BUILTIN_OCALL_5 -6 +#define BUILTIN_OCALL_6 -7 typedef enum { @@ -102,10 +104,12 @@ typedef enum EDMM_TRIM_COMMIT = BUILTIN_OCALL_2, EDMM_MODPR = BUILTIN_OCALL_3, EDMM_MPROTECT = BUILTIN_OCALL_4, + EDMM_ALLOC = BUILTIN_OCALL_5, + EDMM_MODIFY = BUILTIN_OCALL_6, }edmm_ocall_t; -#define is_builtin_ocall(ocall_val) (((int)ocall_val >= BUILTIN_OCALL_4) && ((int)ocall_val <= BUILTIN_OCALL_1)) +#define is_builtin_ocall(ocall_val) (((int)ocall_val >= BUILTIN_OCALL_6) && ((int)ocall_val <= BUILTIN_OCALL_1)) #pragma pack(pop) diff --git a/common/inc/internal/sgx_mm_rt_abstraction.h b/common/inc/internal/sgx_mm_rt_abstraction.h new file mode 120000 index 000000000..9300fa4d3 --- /dev/null +++ b/common/inc/internal/sgx_mm_rt_abstraction.h @@ -0,0 +1 @@ +../../../sdk/emm/include/sgx_mm_rt_abstraction.h \ No newline at end of file diff --git a/common/inc/internal/trts_inst.h b/common/inc/internal/trts_inst.h index 354148dcc..ffb7f80a2 100644 --- a/common/inc/internal/trts_inst.h +++ b/common/inc/internal/trts_inst.h @@ -74,17 +74,13 @@ struct ms_tcs extern "C" { #endif -int sgx_accept_forward(si_flags_t sfl, size_t lo, size_t hi); int do_ereport(const sgx_target_info_t *target_info, const sgx_report_data_t *report_data, sgx_report_t *report); int do_everifyreport2(const sgx_report2_mac_struct_t *report2_mac_struct); int do_egetkey(const sgx_key_request_t *key_request, sgx_key_128bit_t *key); uint32_t do_rdrand(uint32_t *rand); int do_eaccept(const sec_info_t *, size_t); +int do_eacceptcopy(const sec_info_t *, size_t, size_t); int do_emodpe(const sec_info_t*, size_t); -int apply_EPC_pages(void *start_address, size_t page_number); -int apply_pages_within_exception(void *start_address, size_t page_count); -int trim_EPC_pages(void *start_address, size_t page_number); -sgx_status_t SGXAPI trts_mprotect(size_t start, size_t size, uint64_t perms); sgx_status_t do_add_thread(void *ms); int is_dynamic_thread(void *tcs); int is_dynamic_thread_exist(void); diff --git a/common/inc/sgx_mm.h b/common/inc/sgx_mm.h new file mode 120000 index 000000000..cc284d3bf --- /dev/null +++ b/common/inc/sgx_mm.h @@ -0,0 +1 @@ +../../sdk/emm/include/sgx_mm.h \ No newline at end of file diff --git a/common/inc/sgx_mm_primitives.h b/common/inc/sgx_mm_primitives.h new file mode 120000 index 000000000..c0817f167 --- /dev/null +++ b/common/inc/sgx_mm_primitives.h @@ -0,0 +1 @@ +../../sdk/emm/include/sgx_mm_primitives.h \ No newline at end of file diff --git a/common/inc/sgx_mm_rt_abstraction.h b/common/inc/sgx_mm_rt_abstraction.h new file mode 120000 index 000000000..4b440b598 --- /dev/null +++ b/common/inc/sgx_mm_rt_abstraction.h @@ -0,0 +1 @@ +../../sdk/emm/include/sgx_mm_rt_abstraction.h \ No newline at end of file diff --git a/common/inc/sgx_trts_exception.h b/common/inc/sgx_trts_exception.h index 3e04c3255..83cdd5cb4 100644 --- a/common/inc/sgx_trts_exception.h +++ b/common/inc/sgx_trts_exception.h @@ -54,6 +54,7 @@ typedef enum _sgx_exception_vector_t SGX_EXCEPTION_VECTOR_BP = 3, /* INT 3 instruction */ SGX_EXCEPTION_VECTOR_BR = 5, /* BOUND instruction */ SGX_EXCEPTION_VECTOR_UD = 6, /* UD2 instruction or reserved opcode */ + SGX_EXCEPTION_VECTOR_PF = 14, /* page fault */ SGX_EXCEPTION_VECTOR_MF = 16, /* x87 FPU floating-point or WAIT/FWAIT instruction */ SGX_EXCEPTION_VECTOR_AC = 17, /* Any data reference in memory */ SGX_EXCEPTION_VECTOR_XM = 19, /* SSE/SSE2/SSE3 floating-point instruction */ @@ -103,11 +104,19 @@ typedef struct _cpu_context_t } sgx_cpu_context_t; #endif +typedef struct _exinfo_t +{ + uint64_t faulting_address; + uint32_t error_code; + uint32_t reserved; +}sgx_misc_exinfo_t; + typedef struct _exception_info_t { sgx_cpu_context_t cpu_context; sgx_exception_vector_t exception_vector; sgx_exception_type_t exception_type; + sgx_misc_exinfo_t exinfo; } sgx_exception_info_t; typedef int (*sgx_exception_handler_t)(sgx_exception_info_t *info); diff --git a/psw/enclave_common/Makefile b/psw/enclave_common/Makefile index 6627e99c5..7f8786460 100644 --- a/psw/enclave_common/Makefile +++ b/psw/enclave_common/Makefile @@ -47,6 +47,7 @@ CFLAGS += $(ADDED_INC) INC += -I$(SGX_HEADER_DIR) \ -I$(COMMON_DIR)/inc/internal \ -I$(COMMON_DIR)/inc/internal/linux \ + -I$(LINUX_SDK_DIR)/emm/include \ -I$(LINUX_PSW_DIR)/urts/ \ -I$(LINUX_PSW_DIR)/urts/linux \ -I$(LINUX_PSW_DIR)/enclave_common @@ -92,7 +93,7 @@ ifndef DEBUG $(OBJCOPY) --add-gnu-debuglink=$(LIBSGX_ENCLAVE_COMMON_DEBUG) $(LIBSGX_ENCLAVE_COMMON) endif -$(OBJ): %.o: %.cpp +$(OBJ): %.o: %.cpp sgx_mm_ocalls.cpp $(CXX) -c $(CXXFLAGS) $(INC) $< -o $@ $(LIBWRAPPER): diff --git a/psw/enclave_common/sgx_enclave_common.cpp b/psw/enclave_common/sgx_enclave_common.cpp index 95e7eb4f6..daa557e88 100644 --- a/psw/enclave_common/sgx_enclave_common.cpp +++ b/psw/enclave_common/sgx_enclave_common.cpp @@ -47,12 +47,12 @@ #include #include "se_memcpy.h" #include "se_lock.hpp" +#include "sgx_mm.h" //ubuntu 18.04 use glibc 2.27, doesn't support MAP_FIXED_NOREPLACE #ifndef MAP_FIXED_NOREPLACE #define MAP_FIXED_NOREPLACE 0x100000 #endif - #define POINTER_TO_U64(A) ((__u64)((uintptr_t)(A))) #define SGX_LAUNCH_SO "libsgx_launch.so.1" @@ -1066,7 +1066,6 @@ extern "C" size_t COMM_API enclave_load_data( } - /* enclave_initialize() * Parameters: * base_address [in] - The enclave base address as returned from the enclave_create API. @@ -1312,4 +1311,4 @@ extern "C" bool COMM_API enclave_set_information( return false; } - +#include "sgx_mm_ocalls.cpp" diff --git a/psw/enclave_common/sgx_enclave_common.h b/psw/enclave_common/sgx_enclave_common.h index cdedbed98..480864c6c 100644 --- a/psw/enclave_common/sgx_enclave_common.h +++ b/psw/enclave_common/sgx_enclave_common.h @@ -247,6 +247,64 @@ bool COMM_API enclave_set_information( COMM_IN void* input_info, COMM_IN size_t input_info_size, COMM_OUT_OPT uint32_t* enclave_error); + +/* + * Call OS to reserve region for EAUG, immediately or on-demand. + * + * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, page type. The untrusted side. + * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and + * translate following additional bits to proper parameters invoking mmap or other SGX specific + * syscall(s) provided by the kernel. + * The flags param of this interface should include exactly one of following for committing mode: + * - SGX_EMA_RESERVE: kernel map an address range with PROT_NONE, no EPC EAUGed. + * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * kernel is given a hint to EAUG EPC pages for the area as soon as possible. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * ORed with zero or one of the committing order flags: + * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * to lower addresses, no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * to higher addresses, no gaps in addresses below the last committed. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @retval 0 The operation was successful. + * @retval EINVAL Any parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + */ +int COMM_API enclave_alloc(uint64_t addr, size_t length, int flags); + +/* + * Call OS to change permissions, type, or notify EACCEPT done after TRIM. + * + * @param[in] addr Start address of the memory to change protections. + * @param[in] length Length of the area. This must be a multiple of the page size. + * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * Must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM or TCS + * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * range for trimmed pages may still be reserved by enclave with + * proper permissions. + * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS + * @retval 0 The operation was successful. + * @retval EINVAL A parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + */ + +int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_from, int flags_to); + #ifdef __cplusplus } #endif diff --git a/psw/enclave_common/sgx_enclave_common.lds b/psw/enclave_common/sgx_enclave_common.lds index d76001f15..8621fdf50 100644 --- a/psw/enclave_common/sgx_enclave_common.lds +++ b/psw/enclave_common/sgx_enclave_common.lds @@ -7,6 +7,8 @@ global: enclave_get_information; enclave_set_information; enclave_create_ex; + enclave_alloc; + enclave_modify; local: *; }; diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp new file mode 100644 index 000000000..64a2b2ced --- /dev/null +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -0,0 +1,392 @@ + +//////////////////////////////////////////////////////////// +// OCall impl. These will be part of sgx_enclave_common.cpp +//////////////////////////////////////////////////////////// +#include +using namespace std; +#define PROT_MASK (PROT_READ|PROT_WRITE|PROT_EXEC) +/* + * Call OS to reserve region for EAUG, immediately or on-demand. + * + * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, page type. The untrusted side. + * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and + * translate following additional bits to proper parameters invoking mmap or other SGX specific + * syscall(s) provided by the kernel. + * The flags param of this interface should include exactly one of following for committing mode: + * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * kernel is given a hint to EAUG EPC pages for the area as soon as possible. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * ORed with zero or one of the committing order flags: + * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * to lower addresses, no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * to higher addresses, no gaps in addresses below the last committed. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @retval 0 The operation was successful. + * @retval EINVAL Any parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + */ +extern "C" int COMM_API enclave_alloc(uint64_t addr, size_t length, int flags) +{ + int ret = EINVAL; + SE_TRACE(SE_TRACE_DEBUG, + "enclave_alloc for 0x%llX ( %llX ) with 0x%lX\n", + addr, length, flags); + + if (s_driver_type == SGX_DRIVER_DCAP) + { + return ret; + } + if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) + { + return mprotect((void *)addr, length, PROT_WRITE | PROT_READ); + } + int enclave_fd = get_file_handle_from_address((void *)addr); + if (enclave_fd == -1) + return ret; + int map_flags = MAP_SHARED | MAP_FIXED; + //!TODO: support COMMIT_NOW when kernel supports + if (flags & SGX_EMA_COMMIT_NOW) + { + } + //!TODO support CET + int type = flags & SGX_EMA_PAGE_TYPE_MASK; + if((type == SGX_EMA_PAGE_TYPE_SS_FIRST) | (type == SGX_EMA_PAGE_TYPE_SS_REST)) + return EFAULT; + if((type == SGX_EMA_PAGE_TYPE_SS_FIRST) && length > SE_PAGE_SIZE) + return ret; + void *out = mmap((void *)addr, length, PROT_WRITE | PROT_READ, map_flags, enclave_fd, 0); + if (out == MAP_FAILED) + { + SE_TRACE(SE_TRACE_WARNING, "mmap failed, error = %d\n", errno); + ret = errno; + }else + ret = 0; + return ret; +} + +uint64_t get_offset_for_address(uint64_t target_address) +{ + uint64_t enclave_base_addr = (uint64_t)get_enclave_base_address_from_address((void *)target_address); + assert(enclave_base_addr != 0); + assert(target_address >= enclave_base_addr); + return (uint64_t)target_address - (uint64_t)enclave_base_addr; +} + +static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) +{ + struct sgx_page_modt ioc; + if (length == 0) + return EINVAL; + memset(&ioc, 0, sizeof(ioc)); + + SE_TRACE(SE_TRACE_DEBUG, + "MODT for 0x%llX ( %llX ), type: 0x%llX\n", + addr, length, type); + memset(&ioc, 0, sizeof(ioc)); + ioc.type = type; + ioc.offset = get_offset_for_address(addr); + ioc.length = SE_PAGE_SIZE;//TODO: change back to length + do + { + int ret = ioctl(fd, SGX_IOC_PAGE_MODT, &ioc); + //TODO: use error code + if (ret && ioc.count == 0 && errno != EBUSY) + { //total failure + SE_TRACE(SE_TRACE_WARNING, + "MODT failed, error = %d for 0x%llX ( %llX ), type: 0x%llX\n", + errno, addr, length, type); + return errno; + } + ioc.offset += SE_PAGE_SIZE; + ioc.result = 0; + ioc.count = 0; + length -= SE_PAGE_SIZE; + } while (length != 0); + + return 0; +} + +static int trim(int fd, uint64_t addr, size_t length) +{ + return emodt(fd, addr, length, (SGX_EMA_PAGE_TYPE_TRIM >> SGX_EMA_PAGE_TYPE_SHIFT)); +} +static int mktcs(int fd, uint64_t addr, size_t length) +{ + + return emodt(fd, addr, length, (SGX_EMA_PAGE_TYPE_TCS >> SGX_EMA_PAGE_TYPE_SHIFT)); +} +static int trim_accept(int fd, uint64_t addr, size_t length) +{ + struct sgx_page_remove remove_ioc; + memset(&remove_ioc, 0, sizeof(remove_ioc)); + + SE_TRACE(SE_TRACE_DEBUG, + "REMOVE for 0x%llX ( %llX )\n", + addr, length); + remove_ioc.offset = get_offset_for_address(addr); + remove_ioc.length = length; + + int ret = ioctl(fd, SGX_IOC_PAGE_REMOVE, &remove_ioc); + if(ret) + { + SE_TRACE(SE_TRACE_WARNING, + "REMOVE failed, error = %d for 0x%llX ( %llX )\n", + errno, addr, length); + return errno; + }else + return 0; +} +static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) +{ + struct sgx_page_modp ioc; + if (length == 0) + return EINVAL; + memset(&ioc, 0, sizeof(ioc)); + + SE_TRACE(SE_TRACE_DEBUG, + "MODP for 0x%llX ( %llX ), prot: 0x%llX\n", + addr, length, prot); + ioc.prot = prot; + ioc.offset = get_offset_for_address(addr); + ioc.length = length; + + do + { + int ret = ioctl(fd, SGX_IOC_PAGE_MODP, &ioc); + //TODO: use error code + if (ret && ioc.count == 0 && errno != EBUSY ) + { //total failure + SE_TRACE(SE_TRACE_WARNING, + "MODP failed, error = %d for 0x%llX ( %llX ), prot: 0x%llX\n", + errno, addr, length, prot); + return errno; + } + ioc.length -= ioc.count; + ioc.offset += ioc.count; + ioc.result = 0; + ioc.count = 0; + } while (ioc.length != 0); + + return 0; +} + +// legacy support for EDMM + +static int trim_accept_legacy(int fd, uint64_t addr, size_t len) +{ + sgx_range params; + memset(¶ms, 0, sizeof(sgx_range)); + params.start_addr = (unsigned long)addr; + params.nr_pages = (unsigned int)(len / SE_PAGE_SIZE); + + int ret = ioctl(fd, SGX_IOC_ENCLAVE_NOTIFY_ACCEPT, ¶ms); + + if (ret) + { + return errno; + } + + return SGX_SUCCESS; +} + +static int trim_legacy(int fd, uint64_t fromaddr, uint64_t len) +{ + sgx_range params; + memset(¶ms, 0, sizeof(sgx_range)); + params.start_addr = (unsigned long)fromaddr; + params.nr_pages = (unsigned int)((len) / SE_PAGE_SIZE); + + int ret = ioctl(fd, SGX_IOC_ENCLAVE_TRIM, ¶ms); + if (ret) + { + return errno; + } + + return SGX_SUCCESS; +} + +static int mktcs_legacy(int fd, uint64_t tcs_addr, size_t len) +{ + if (len != SE_PAGE_SIZE) + return EINVAL; + sgx_range params; + memset(¶ms, 0, sizeof(sgx_range)); + params.start_addr = (unsigned long)tcs_addr; + params.nr_pages = 1; + + int ret = ioctl(fd, SGX_IOC_ENCLAVE_MKTCS, ¶ms); + if (ret) + { + return errno; + } + return SGX_SUCCESS; +} + +static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) +{ + sgx_modification_param params; + memset(¶ms, 0, sizeof(sgx_modification_param)); + params.range.start_addr = (unsigned long)addr; + params.range.nr_pages = (unsigned int)(size / SE_PAGE_SIZE); + params.flags = (unsigned long)flag; + + int ret = ioctl(fd, SGX_IOC_ENCLAVE_EMODPR, ¶ms); + if (ret) + { + return errno; + } + + return SGX_SUCCESS; +} + +/* + * Call OS to change permissions, type, or notify EACCEPT done after TRIM. + * + * @param[in] addr Start address of the memory to change protections. + * @param[in] length Length of the area. This must be a multiple of the page size. + * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * Must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM or TCS + * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * range for trimmed pages may still be reserved by enclave with + * proper permissions. + * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS + * @retval 0 The operation was successful. + * @retval EINVAL A parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + */ + +extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_from, int flags_to) +{ + int ret = EFAULT; + SE_TRACE(SE_TRACE_DEBUG, + "enclave_modify for 0x%llX ( %llX ) from 0x%lX to %lX\n", + addr, length, flags_from, flags_to); + if (s_driver_type == SGX_DRIVER_DCAP) + { + return ret; + } + uint64_t enclave_base = (uint64_t)get_enclave_base_address_from_address((void *)addr); + if (enclave_base == 0) + { + return EINVAL; + } + if (length % SE_PAGE_SIZE != 0) + return EINVAL; + function _trim = trim; + function _trim_accept = trim_accept; + function _mktcs = mktcs; + function _emodpr = emodpr; + int fd = get_file_handle_from_address((void *)addr); + if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) + { + _trim = trim_legacy; + _trim_accept = trim_accept_legacy; + _mktcs = mktcs_legacy; + _emodpr = emodpr_legacy; + fd = s_hdevice; + } + if(fd == -1) return EINVAL; + + int type_to = (flags_to & SGX_EMA_PAGE_TYPE_MASK); + int type_from = (flags_from & SGX_EMA_PAGE_TYPE_MASK); + if (type_from == SGX_EMA_PAGE_TYPE_TRIM && type_to != SGX_EMA_PAGE_TYPE_TRIM) + { + return EINVAL; + } + int prot_to = (flags_to & PROT_MASK); + int prot_from = (flags_from & PROT_MASK); + if ((prot_to != prot_from) && (type_to != type_from)) + { + return EINVAL; + } + + if ((type_from & type_to & SGX_EMA_PAGE_TYPE_TRIM)) + { + //user space can only do EACCEPT for PT_TRIM type + ret = _trim_accept(fd, addr, length); + if (ret) + return ret; + if (prot_to == PROT_NONE) + { + //EACCEPT done and notified. + //if user wants to remove permissions, + //only mprotect is needed + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return ret; + } + return ret; + } + + if (type_to == SGX_EMA_PAGE_TYPE_TRIM) + { + assert(type_from != SGX_EMA_PAGE_TYPE_TRIM); + if (prot_to != prot_from) + return EINVAL; + //user must be able to do EACCEPT + if (prot_to == PROT_NONE) + return EINVAL; + return _trim(fd, addr, length); + } + + if (type_to == SGX_EMA_PAGE_TYPE_TCS) + { + if (type_from != SGX_EMA_PAGE_TYPE_REG) + return EINVAL; + if ((prot_from != (SGX_EMA_PROT_READ_WRITE)) && prot_to != prot_from) + return EINVAL; + return _mktcs(fd, addr, length); + } + + if (type_to != type_from) + return EINVAL; + // type_to == type_from + // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R + // separate mprotecte is needed to change ptt.R to pte.NONE + if (prot_to == prot_from && prot_to == PROT_NONE) + { + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return errno; + } + + if (prot_to == prot_from) + { + return 0; //nothing to be done. + } + // Permissions changes. Only do emodpr for PT_REG pages + if ((type_from & type_to & SGX_EMA_PAGE_TYPE_MASK) == SGX_EMA_PAGE_TYPE_REG) + { + ret = _emodpr(fd, addr, length, prot_to); + if (ret) + return ret; + } + else + { + return EINVAL; + } + //EACCEPT needs at least pte.R, PROT_NONE case done above. + if (prot_to != PROT_NONE) + { + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return errno; + } + return ret; +} diff --git a/psw/urts/enclave.cpp b/psw/urts/enclave.cpp index b7fdc2afb..8ef2e53d5 100644 --- a/psw/urts/enclave.cpp +++ b/psw/urts/enclave.cpp @@ -40,6 +40,7 @@ #include "se_memory.h" #include "urts_trim.h" #include "urts_emodpr.h" +#include "urts_emm.h" #include "rts_cmd.h" #include #include "rts.h" @@ -352,7 +353,7 @@ sgx_status_t CEnclave::ecall(const int proc, const void *ocall_table, void *ms, se_event_wake(m_new_thread_event); pthread_join(m_pthread_tid, NULL); } - ocall_table = m_ocall_table; + /*ocall_table = m_ocall_table; std::vector threads = m_thread_pool->get_thread_list(); for (unsigned idx = 0; idx < threads.size(); ++idx) @@ -370,6 +371,7 @@ sgx_status_t CEnclave::ecall(const int proc, const void *ocall_table, void *ms, // Change TCS permission to Read only to let driver not handle the // #PF caused by TCS trim. It gives urts a chance to catch the exception // and exit the ecall with an error code. + //!TODO do we really need this? if(0 != mprotect((void *)start, TCS_SIZE, SI_FLAG_R)) { se_rdunlock(&m_rwlock); @@ -381,7 +383,7 @@ sgx_status_t CEnclave::ecall(const int proc, const void *ocall_table, void *ms, return (sgx_status_t)ret; } } - } + }*/ } ret = do_ecall(proc, m_ocall_table, ms, trust_thread); @@ -420,6 +422,10 @@ int CEnclave::ocall(const unsigned int proc, const sgx_ocall_table_t *ocall_tabl error = ocall_emodpr(ms); else if ((int)proc == EDMM_MPROTECT) error = ocall_mprotect(ms); + else if ((int)proc == EDMM_ALLOC) + error = ocall_emm_alloc(ms); + else if ((int)proc == EDMM_MODIFY) + error = ocall_emm_modify(ms); } else { diff --git a/psw/urts/enclave_creator_hw.h b/psw/urts/enclave_creator_hw.h index 15d02e200..6cfe8b4a7 100644 --- a/psw/urts/enclave_creator_hw.h +++ b/psw/urts/enclave_creator_hw.h @@ -61,10 +61,10 @@ class EnclaveCreatorHW : public EnclaveCreator int get_misc_attr(sgx_misc_attribute_t *sgx_misc_attr, metadata_t *metadata, SGXLaunchToken * const lc, uint32_t flag); bool get_plat_cap(sgx_misc_attribute_t *se_attr); int emodpr(uint64_t addr, uint64_t size, uint64_t flag); + int alloc(uint64_t addr, uint64_t size, int flag); int mktcs(uint64_t tcs_addr); int trim_range(uint64_t fromaddr, uint64_t toaddr); int trim_accept(uint64_t addr); - int remove_range(uint64_t fromaddr, uint64_t numpages); private: virtual bool open_device(); virtual void close_device(); diff --git a/psw/urts/linux/Makefile b/psw/urts/linux/Makefile index 529d7c2bc..e67b62814 100644 --- a/psw/urts/linux/Makefile +++ b/psw/urts/linux/Makefile @@ -51,6 +51,7 @@ INC += -I$(SGX_HEADER_DIR) \ -I$(LINUX_PSW_DIR)/urts/ \ -I$(LINUX_PSW_DIR)/urts/linux \ -I$(LINUX_PSW_DIR)/urts/parser \ + -I$(LINUX_SDK_DIR)/emm/include \ -I$(VTUNE_DIR)/include \ -I$(VTUNE_DIR)/sdk/src/ittnotify @@ -96,6 +97,7 @@ OBJ2 := urts.o \ get_thread_id.o \ prd_css_util.o \ urts_emodpr.o \ + urts_emm.o \ urts_trim.o \ edmm_utility.o diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index 9b2f70f56..f954ea716 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -49,7 +49,7 @@ #include #include #include - +#include "sgx_mm.h" #define POINTER_TO_U64(A) ((__u64)((uintptr_t)(A))) static EnclaveCreatorHW g_enclave_creator_hw; @@ -306,93 +306,67 @@ void EnclaveCreatorHW::close_device() m_hdevice = -1; } -int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) +int EnclaveCreatorHW::alloc(uint64_t addr, uint64_t size, int flag) { - sgx_modification_param params; - memset(¶ms, 0 ,sizeof(sgx_modification_param)); - params.range.start_addr = (unsigned long)addr; - params.range.nr_pages = (unsigned int)(size/SE_PAGE_SIZE); - params.flags = (unsigned long)flag; - - int ret = ioctl(m_hdevice, SGX_IOC_ENCLAVE_EMODPR, ¶ms); + int ret = enclave_alloc(addr, size, flag); if (ret) { - SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_EMODPR failed %d\n", errno); - return error_driver2urts(ret, errno); + SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_alloc failed %d\n", ret); + return error_api2urts(ret); } return SGX_SUCCESS; } - -int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) + +int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) { - sgx_range params; - memset(¶ms, 0 ,sizeof(sgx_range)); - params.start_addr = (unsigned long)tcs_addr; - params.nr_pages = 1; - int ret = ioctl(m_hdevice, SGX_IOC_ENCLAVE_MKTCS, ¶ms); + int ret = enclave_modify(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC|SGX_EMA_PAGE_TYPE_REG, + (int) (flag|SGX_EMA_PAGE_TYPE_REG)); if (ret) { - SE_TRACE(SE_TRACE_ERROR, "MODIFY_TYPE failed %d\n", errno); - return error_driver2urts(ret, errno); + SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_EMODPR failed %d\n", ret); + return error_api2urts(ret); } return SGX_SUCCESS; } -int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) +int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) { - sgx_range params; - memset(¶ms, 0 ,sizeof(sgx_range)); - params.start_addr = (unsigned long)fromaddr; - params.nr_pages = (unsigned int)((toaddr - fromaddr)/SE_PAGE_SIZE); - - int ret= ioctl(m_hdevice, SGX_IOC_ENCLAVE_TRIM, ¶ms); + int ret = enclave_modify(tcs_addr, SE_PAGE_SIZE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_REG, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TCS); if (ret) { - SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_TRIM failed %d\n", errno); - return error_driver2urts(ret, errno); + SE_TRACE(SE_TRACE_ERROR, "MODIFY_TYPE failed %d\n", ret); + return error_api2urts(ret); } return SGX_SUCCESS; - } -int EnclaveCreatorHW::trim_accept(uint64_t addr) +int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) { - sgx_range params; - memset(¶ms, 0 ,sizeof(sgx_range)); - params.start_addr = (unsigned long)addr; - params.nr_pages = 1; - - int ret = ioctl(m_hdevice, SGX_IOC_ENCLAVE_NOTIFY_ACCEPT, ¶ms); - + int ret= enclave_modify( fromaddr, toaddr - fromaddr, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TRIM); if (ret) { - SE_TRACE(SE_TRACE_ERROR, "TRIM_RANGE_COMMIT failed %d\n", errno); - return error_driver2urts(ret, errno); + SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_TRIM failed %d\n", ret); + return error_api2urts(ret); } return SGX_SUCCESS; + } -int EnclaveCreatorHW::remove_range(uint64_t fromaddr, uint64_t numpages) +int EnclaveCreatorHW::trim_accept(uint64_t addr) { - int ret = -1; - uint64_t i; - unsigned long start; + int ret = enclave_modify(addr, SE_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE + , SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE); - for (i = 0; i < numpages; i++) + if (ret) { - start = (unsigned long)fromaddr + (unsigned long)(i << SE_PAGE_SHIFT); - ret = ioctl(m_hdevice, SGX_IOC_ENCLAVE_PAGE_REMOVE, &start); - if (ret) - { - SE_TRACE(SE_TRACE_ERROR, "PAGE_REMOVE failed %d\n", errno); - return error_driver2urts(ret, errno); - } + SE_TRACE(SE_TRACE_ERROR, "TRIM_RANGE_COMMIT failed %d\n", ret); + return error_api2urts(ret); } return SGX_SUCCESS; diff --git a/psw/urts/linux/isgx_user.h b/psw/urts/linux/isgx_user.h index 3c4dfdd00..e937aab80 100644 --- a/psw/urts/linux/isgx_user.h +++ b/psw/urts/linux/isgx_user.h @@ -108,16 +108,25 @@ enum sgx_page_flags { _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init) #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute) +#define SGX_IOC_PAGE_MODP \ + _IOWR(SGX_MAGIC, 0x04, struct sgx_page_modp) +#define SGX_IOC_PAGE_MODT \ + _IOWR(SGX_MAGIC, 0x05, struct sgx_page_modt) +#define SGX_IOC_PAGE_REMOVE \ + _IOWR(SGX_MAGIC, 0x06, struct sgx_page_remove) + +/* Legacy OOT driver support for EDMM */ #define SGX_IOC_ENCLAVE_EMODPR \ _IOW(SGX_MAGIC, 0x09, struct sgx_modification_param) #define SGX_IOC_ENCLAVE_MKTCS \ _IOW(SGX_MAGIC, 0x0a, struct sgx_range) #define SGX_IOC_ENCLAVE_TRIM \ _IOW(SGX_MAGIC, 0x0b, struct sgx_range) + +//Legacy implementation to ensure EPC pages removed no later by this ioctl + #define SGX_IOC_ENCLAVE_NOTIFY_ACCEPT \ _IOW(SGX_MAGIC, 0x0c, struct sgx_range) -#define SGX_IOC_ENCLAVE_PAGE_REMOVE \ - _IOW(SGX_MAGIC, 0x0d, unsigned long) //Note: SGX_IOC_ENCLAVE_CREATE is the same for in-kernel except that it returns a file handle for in-kernel #define SGX_IOC_ENCLAVE_ADD_PAGES_IN_KERNEL \ @@ -267,7 +276,7 @@ struct sgx_enclave_destroy { /* - * SGX2.0 definitions + * SGX2.0 definitions for Legacy OOT driver */ #define SGX_GROW_UP_FLAG 1 @@ -283,6 +292,57 @@ struct sgx_modification_param { unsigned long flags; }; +/** + * struct sgx_page_modp - parameter structure for the %SGX_IOC_PAGE_MODP ioctl + * @offset: starting page offset + * @length: length of memory (multiple of the page size) + * @prot: new protection bits of pages in range described by @offset + * and @length. + * @result: SGX result code + * @count: bytes successfully changed (multiple of page size) + */ +struct sgx_page_modp { + __u64 offset; + __u64 length; + __u64 prot; + __u64 result; + __u64 count; +}; + +/** + * struct sgx_page_modt - parameter structure for the %SGX_IOC_PAGE_MODT ioctl + * @offset: starting page offset + * @length: length of memory (multiple of the page size) + * @prot: new type of pages in range described by @offset and @length. + * @result: SGX result code + * @count: bytes successfully changed (multiple of page size) + */ +struct sgx_page_modt { + __u64 offset; + __u64 length; + __u64 type; + __u64 result; + __u64 count; +}; + +/** + * struct sgx_page_remove - parameters for the %SGX_IOC_PAGE_REMOVE ioctl + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @count: bytes successfully changed (multiple of page size) + * + * Regular (PT_REG) or TCS (PT_TCS) can be removed from an initialized + * enclave if the system supports SGX2. First, the %SGX_IOC_PAGE_MODT ioctl + * should be used to change the page type to PT_TRIM. After that succeeds + * ENCLU[EACCEPT] should be run from within the enclave and then can this + * ioctl be used to complete the page removal. + */ +struct sgx_page_remove { + __u64 offset; + __u64 length; + __u64 count; +}; struct sgx_enclave_run; @@ -363,7 +423,7 @@ struct sgx_enclave_run { * Most exceptions reported on ENCLU, including those that occur within the * enclave, are fixed up and reported synchronously instead of being delivered * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are - * never fixed up and are always delivered via standard signals. On synchrously + * never fixed up and are always delivered via standard signals. On synchronously * reported exceptions, -EFAULT is returned and details about the exception are * recorded in @run.exception, the optional sgx_enclave_exception struct. * diff --git a/sdk/trts/trts_emodpr.cpp b/psw/urts/linux/urts_emm.cpp similarity index 59% rename from sdk/trts/trts_emodpr.cpp rename to psw/urts/linux/urts_emm.cpp index 79d85ebd8..65dea9a6b 100644 --- a/sdk/trts/trts_emodpr.cpp +++ b/psw/urts/linux/urts_emm.cpp @@ -29,51 +29,50 @@ * */ +#include "urts_emm.h" +#include "sgx_enclave_common.h" -#include "trts_emodpr.h" - -#include "sgx_trts.h" // for sgx_ocalloc, sgx_is_outside_enclave -#include "arch.h" -#include "sgx_edger8r.h" // for sgx_ocall etc. -#include "internal/rts.h" - -/* sgx_ocfree() just restores the original outside stack pointer. */ -#define OCALLOC(val, type, len) do { \ - void* __tmp = sgx_ocalloc(len); \ - if (__tmp == NULL) { \ - sgx_ocfree(); \ - return SGX_ERROR_UNEXPECTED;\ - } \ - (val) = (type)__tmp; \ -} while (0) +#ifdef SE_SIM +#include +#define PROT_MASK (PROT_READ | PROT_WRITE | PROT_EXEC) +#endif -typedef struct ms_change_permissions_ocall_t { - size_t ms_addr; - size_t ms_size; - uint64_t ms_epcm_perms; -} ms_change_permissions_ocall_t; +typedef struct ms_alloc_ocall_t { + int32_t retval; + size_t addr; + size_t size; + uint32_t flags; +} ms_emm_alloc_ocall_t; -sgx_status_t SGXAPI change_permissions_ocall(size_t addr, size_t size, uint64_t epcm_perms, const int proc) +extern "C" sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms) { + + ms_emm_alloc_ocall_t* ms = SGX_CAST(ms_emm_alloc_ocall_t*, pms); #ifdef SE_SIM - (void)addr; - (void)size; - (void)epcm_perms; - (void)proc; - return SGX_SUCCESS; + ms->retval = mprotect((void*)ms->addr, ms->size, ms->flags|PROT_MASK); #else - sgx_status_t status = SGX_SUCCESS; + ms->retval = enclave_alloc(ms->addr, ms->size, ms->flags); +#endif + return SGX_SUCCESS; +} - ms_change_permissions_ocall_t* ms; - OCALLOC(ms, ms_change_permissions_ocall_t*, sizeof(*ms)); +typedef struct ms_modify_ocall_t { + int32_t retval; + size_t addr; + size_t size; + uint32_t flags_from; + uint32_t flags_to; +} ms_emm_modify_ocall_t; - ms->ms_addr = addr; - ms->ms_size = size; - ms->ms_epcm_perms = epcm_perms; - status = sgx_ocall(proc, ms); +extern "C" sgx_status_t SGX_CDECL ocall_emm_modify(void* pms) +{ + ms_emm_modify_ocall_t* ms = SGX_CAST(ms_emm_modify_ocall_t*, pms); - sgx_ocfree(); - return status; +#ifdef SE_SIM + ms->retval = mprotect((void*)ms->addr, ms->size, ms->flags_to|PROT_MASK); +#else + ms->retval = enclave_modify(ms->addr, ms->size, ms->flags_from, ms->flags_to); #endif + return SGX_SUCCESS; } diff --git a/sdk/trts/trts_trim.h b/psw/urts/linux/urts_emm.h similarity index 87% rename from sdk/trts/trts_trim.h rename to psw/urts/linux/urts_emm.h index 8adc959f0..747094456 100644 --- a/sdk/trts/trts_trim.h +++ b/psw/urts/linux/urts_emm.h @@ -29,17 +29,14 @@ * */ - -#ifndef TRIM_RANGE_T_H__ -#define TRIM_RANGE_T_H__ +#ifndef _URTS_EMM_H_ +#define _URTS_EMM_H_ #include #include #include -#include "sgx_edger8r.h" // for sgx_ocall etc. - +#include "sgx_urts.h" -#include // for size_t #define SGX_CAST(type, item) ((type)(item)) @@ -47,11 +44,15 @@ extern "C" { #endif -sgx_status_t SGXAPI trim_range_ocall(size_t fromaddr, size_t toaddr); -sgx_status_t SGXAPI trim_range_commit_ocall(size_t addr); + + +sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms); +sgx_status_t SGX_CDECL ocall_emm_modify(void *pms); + #ifdef __cplusplus } #endif /* __cplusplus */ #endif + diff --git a/psw/urts/loader.cpp b/psw/urts/loader.cpp index 546dd78e2..d92a93302 100644 --- a/psw/urts/loader.cpp +++ b/psw/urts/loader.cpp @@ -989,7 +989,6 @@ int CLoader::set_memory_protection() { return ret; } - return SGX_SUCCESS; } @@ -1028,13 +1027,25 @@ int CLoader::set_context_protection(layout_t *layout_start, layout_t *layout_end } #endif } - - ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta), +#ifdef SE_SIM + ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta), (size_t)layout->entry.page_count << SE_PAGE_SHIFT, prot); - if(ret != 0) +#else + if((layout->entry.attributes&PAGE_ATTR_EADD)) + ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta), + (size_t)layout->entry.page_count << SE_PAGE_SHIFT, + prot); + else//dynamic allocated regions + if(prot!=PROT_NONE) + ret = get_enclave_creator()->alloc((uint64_t) GET_PTR(void, m_start_addr, layout->entry.rva + delta), + (size_t)layout->entry.page_count << SE_PAGE_SHIFT, + prot); + else ret = 0; +#endif + if(ret != 0) { - SE_TRACE(SE_TRACE_WARNING, "mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%d) failed\n", + SE_TRACE(SE_TRACE_WARNING, "mprotect/alloc(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%d) failed\n", (uint64_t)m_start_addr + layout->entry.rva + delta, (uint64_t)layout->entry.page_count << SE_PAGE_SHIFT, prot); diff --git a/psw/urts/urts_com.h b/psw/urts/urts_com.h index 40a1e0e4b..293e46e72 100644 --- a/psw/urts/urts_com.h +++ b/psw/urts/urts_com.h @@ -408,7 +408,8 @@ static int __create_enclave(BinParser &parser, if (get_enclave_creator()->is_EDMM_supported(loader.get_enclave_id())) { - layout_t *layout_start = GET_PTR(layout_t, metadata, metadata->dirs[DIR_LAYOUT].offset); + //!FIXME use version of enclave to determine action here + /*layout_t *layout_start = GET_PTR(layout_t, metadata, metadata->dirs[DIR_LAYOUT].offset); layout_t *layout_end = GET_PTR(layout_t, metadata, metadata->dirs[DIR_LAYOUT].offset + metadata->dirs[DIR_LAYOUT].size); if (SGX_SUCCESS != (ret = loader.post_init_action(layout_start, layout_end, 0))) { @@ -417,7 +418,7 @@ static int __create_enclave(BinParser &parser, generate_enclave_debug_event(URTS_EXCEPTION_PREREMOVEENCLAVE, debug_info); CEnclavePool::instance()->remove_enclave(loader.get_enclave_id(), status); goto fail; - } + }*/ } //call trts to do some initialization @@ -431,7 +432,8 @@ static int __create_enclave(BinParser &parser, if (get_enclave_creator()->is_EDMM_supported(loader.get_enclave_id())) { - + //!FIXME use version of enclave to determine action here + /* layout_t *layout_start = GET_PTR(layout_t, metadata, metadata->dirs[DIR_LAYOUT].offset); layout_t *layout_end = GET_PTR(layout_t, metadata, metadata->dirs[DIR_LAYOUT].offset + metadata->dirs[DIR_LAYOUT].size); if (SGX_SUCCESS != (ret = loader.post_init_action_commit(layout_start, layout_end, 0))) @@ -441,7 +443,7 @@ static int __create_enclave(BinParser &parser, generate_enclave_debug_event(URTS_EXCEPTION_PREREMOVEENCLAVE, debug_info); CEnclavePool::instance()->remove_enclave(loader.get_enclave_id(), status); goto fail; - } + }*/ } if(SGX_SUCCESS != (ret = loader.set_memory_protection())) diff --git a/sdk/Makefile.source b/sdk/Makefile.source index 6ebb8fb33..69283aed7 100644 --- a/sdk/Makefile.source +++ b/sdk/Makefile.source @@ -66,7 +66,7 @@ LIBTCXX := $(BUILD_DIR)/libsgx_tcxx.a LIBTSE := $(BUILD_DIR)/libsgx_tservice.a .PHONY: components -components: tstdc tcxx tservice trts tcrypto tkey_exchange ukey_exchange tprotected_fs uprotected_fs ptrace sample_crypto libcapable simulation signtool edger8r tcmalloc sgx_pcl sgx_encrypt sgx_tswitchless sgx_uswitchless pthread openmp protobuf ttls utls +components: tstdc tcxx tservice trts tcrypto tkey_exchange ukey_exchange tprotected_fs uprotected_fs ptrace sample_crypto libcapable simulation signtool edger8r tcmalloc sgx_pcl sgx_encrypt sgx_tswitchless sgx_uswitchless pthread openmp protobuf ttls utls sgx_mm # --------------------------------------------------- # tstdc @@ -176,6 +176,10 @@ ec_dh_lib: # --------------------------------------------------- # Other trusted libraries # --------------------------------------------------- +.PHONY: sgx_mm +sgx_mm: + $(MAKE) -C emm/ + .PHONY: trts trts: $(MAKE) -C trts/ @@ -291,6 +295,7 @@ clean: $(MAKE) -C tlibcxx/ clean $(MAKE) -C tseal/linux/ clean $(MAKE) -C selib/linux/ clean + $(MAKE) -C emm/ clean $(MAKE) -C trts/ clean $(MAKE) -C tsetjmp/ clean $(MAKE) -C tsafecrt/ clean diff --git a/sdk/emm/Makefile b/sdk/emm/Makefile new file mode 100644 index 000000000..9312a1f64 --- /dev/null +++ b/sdk/emm/Makefile @@ -0,0 +1,67 @@ +# Copyright (C) 2011-2021 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include ../../buildenv.mk + +HAVE_PRIMITIVES ?= 1 + +CPPFLAGS += -Iinclude \ + -I$(COMMON_DIR)/inc/tlibc \ + -Wno-missing-braces -Wno-unused-parameter + +OBJS := bit_array.o \ + ema.o \ + emm_private.o \ + sgx_mm.o + +ifneq ($(HAVE_PRIMITIVES), 1) +ASM_OBJ := sgx_primitives.o +endif + +LIB_NAME := libsgx_mm.a + +.PHONY: all +all: $(LIB_NAME) | $(BUILD_DIR) + $(CP) $(LIB_NAME) $| + +$(LIB_NAME): $(OBJS) $(ASM_OBJ) + $(AR) rcs $@ $^ + +$(ASM_OBJ): %.o: %.S + $(CC) $(COMMON_FLAGS) $(ENCLAVE_CFLAGS) $(CPPFLAGS) -c $< -o $@ + +$(OBJS): %.o: %.c + $(CC) -c $(COMMON_FLAGS) $(ENCLAVE_CFLAGS) $(CPPFLAGS) $< -o $@ + +$(BUILD_DIR): + @$(MKDIR) $@ + +.PHONY: clean +clean: + @$(RM) $(LIB_NAME) $(ASM_OBJ) $(OBJS) $(BUILD_DIR)/$(LIB_NAME) *.bak *~ + diff --git a/sdk/emm/README.md b/sdk/emm/README.md new file mode 100644 index 000000000..3bca4ce64 --- /dev/null +++ b/sdk/emm/README.md @@ -0,0 +1,108 @@ +Introduction +--------------------------------- +This directory contains a implementation of the Enclave Memory Manager proposed in [this PR](https://github.com/openenclave/openenclave/pull/3991) + +The instructions here are for developing and testing the EMM functionality only. Consult the main README for general usages. + +**Note:** This implementation is based on the current Linux kernel implementation posted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_not_submitted_v1), which has not been finalized and upstreamed. As the kernel interfaces evolve, the EMM implementation and/or interface may change. + +Prerequisites +------------------------------- + +#### Build and install kernel with EDMM support +On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki.ubuntu.com/KernelTeam/GitKernelBuild) with these changes. + +- For step 1, clone this kernel repo and checkout the branch with sgx EDMM support +``` +$ git clone https://github.com/rchatre/linux.git +$ cd linux +$ git checkout sgx/sgx2_not_submitted_v1 +``` +- For step 6, modify .config to set "CONFIG_X86_SGX=y". + +#### Verify kernel build and EDMM support +At root of the kernel source repo, +``` +$ cd tools/testing/selftests/sgx/ && make +#./test_sgx +``` +#### Add udev rules to map sgx device nodes and set right permissions +Download [10-sgx.rules](https://github.com/intel/SGXDataCenterAttestationPrimitives/blob/master/driver/linux/10-sgx.rules) and activate it as follows. +``` +$ sudo cp 10-sgx.rules /etc/udev/rules.d +$ sudo groupadd sgx_prv +$ sudo udevadm trigger +``` +Build and Install SDK and PSW +------------------------------ + +#### Clone linux-sgx repo and checkout edmm branch +``` +$ git clone https://github.com/intel/linux-sgx.git $repo_root +$ cd $repo_root +$ git checkout edmm_v1 +``` +Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. + +#### To build and install SDK with EDMM support +``` +$ cd $repo_root +$ make preparation +$ make sdk_install_pkg_no_mitigation +$ cd linux/installer/bin +$ ./sgx_linux_x64_sdk_2.15.100.3.bin +# follow its prompt to set SDK installation destination directory, $SGX_SDK +$ source $SGX_SDK/environment +``` + +#### To build and setup libsgx_enclave_common and libsgx_urts +To test EMM functionalities without involving remote attestation, we only need libsgx_enclave_common and libsgx_urts built and point LD_LIBRARY_PATH to them. + +``` +$ cd $repo_root/psw/urts/linux +$ make +$ cd /build/linux +$ ln -s libsgx_enclave_common.so libsgx_enclave_common.so.1 +$ export LD_LIBRARY_PATH=/home/sdp/linux-sgx2-poc/build/linux/ +``` + +#### To build and run API tests +``` +$ cd $repo_root/sdk/emm/api_tests/ +$ make +$ ./test_mm_api +# or run tests in loop in background +$ nohup bash ./test_loop.sh 1000 & +#check results in nohup log: +$ tail -f nohup.out +``` + +Limitations of current implementation +--------------------------------------- +1. EMM holds a global recursive mutex for the whole duration of each API invocation. + - No support for concurrent operations (modify type/permissions, commit and commit_data) on different regions. +2. EMM internally uses default heap and stack during its internal operations + - The initial heap and stack should be sufficient to bootstrap EMM initializations + - Book-keeping for heap should be created when RTS is initialized. + - RTS calls mm_init_ema to create region for the static heap (EADDed), and mm_alloc to reserve COMMIT_ON_DEMAND for dynamic heap. + - Stack expansion should be done in 1st phase exception handler and use a reserved static stack + - Such that stack is not overrun in sgx_mm API calls during stack expansion. +3. EMM requires all RTS allocations(with SGX_EMA_SYSTEM flag) are reserved up front during RTS/enclave initializations. + - EMM won't allocate any user requested region below the highest address in RTS regions. + - EMM won't serve any user request unless at least one RTS region is reserved. +4. EMM relies on vDSO interface to guarantee that fault handler is called on the same OS thread where fault happened. + - This is due to the use of the global recursive mutex. If fault handler comes in from different thread while the mutex is held, it will deadlock. + - Note a #PF could happen when more stack is needed inside EMM functions while the mutex is locked. + - vDSO user handler should ensure it re-enters enclave with the original TCS and on the same OS thread. + - To avoid potential deadlocks, no other mutex/lock should be used in this path from user handler to first phase exception handler inside enclave. +5. Not optimized for performance +6. No extensive validation, failure or incorrect error codes possible for corner cases. + +Notes on Intel SDK specific implementation +----------------------------------------- +1. Intel SDK RTS abstraction layer mutex implementation is a spinlock because there is no built-in OCalls for wait/wake on OS event. +2. Intel SDK signing tool reserves all unused address space as guard pages, leaving no space for user allocation. In this implementation, we simply changed tRTS to leave majority of that space as free. In future, we may need change the signing tool to encode this info in metadata. +3. API tests are built with Intel SDK. Though most of tests are RTS independent, the TCS related tests use hardcoded Intel thread context layout info. +4. All make files assumes linux-sgx repo layout and environment. + + diff --git a/sdk/emm/api_tests/App/App.cpp b/sdk/emm/api_tests/App/App.cpp new file mode 100644 index 000000000..adc931ec2 --- /dev/null +++ b/sdk/emm/api_tests/App/App.cpp @@ -0,0 +1,419 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +#include +#include +#include + +#include +#include +#include + +# include +# include +# define MAX_PATH FILENAME_MAX + +#include "sgx_urts.h" +#include "App.h" +#include "Enclave_u.h" +#include "../tcs.h" +using namespace std; + +/* Global EID shared by multiple threads */ +sgx_enclave_id_t global_eid = 0; + +typedef struct _sgx_errlist_t { + sgx_status_t err; + const char *msg; + const char *sug; /* Suggestion */ +} sgx_errlist_t; + +/* Error code returned by sgx_create_enclave */ +static sgx_errlist_t sgx_errlist[] = { + { + SGX_ERROR_UNEXPECTED, + "Unexpected error occurred.", + NULL + }, + { + SGX_ERROR_INVALID_PARAMETER, + "Invalid parameter.", + NULL + }, + { + SGX_ERROR_OUT_OF_MEMORY, + "Out of memory.", + NULL + }, + { + SGX_ERROR_ENCLAVE_LOST, + "Power transition occurred.", + "Please refer to the sample \"PowerTransition\" for details." + }, + { + SGX_ERROR_INVALID_ENCLAVE, + "Invalid enclave image.", + NULL + }, + { + SGX_ERROR_INVALID_ENCLAVE_ID, + "Invalid enclave identification.", + NULL + }, + { + SGX_ERROR_INVALID_SIGNATURE, + "Invalid enclave signature.", + NULL + }, + { + SGX_ERROR_OUT_OF_EPC, + "Out of EPC memory.", + NULL + }, + { + SGX_ERROR_NO_DEVICE, + "Invalid SGX device.", + "Please make sure SGX module is enabled in the BIOS, and install SGX driver afterwards." + }, + { + SGX_ERROR_MEMORY_MAP_CONFLICT, + "Memory map conflicted.", + NULL + }, + { + SGX_ERROR_INVALID_METADATA, + "Invalid enclave metadata.", + NULL + }, + { + SGX_ERROR_DEVICE_BUSY, + "SGX device was busy.", + NULL + }, + { + SGX_ERROR_INVALID_VERSION, + "Enclave version was invalid.", + NULL + }, + { + SGX_ERROR_INVALID_ATTRIBUTE, + "Enclave was not authorized.", + NULL + }, + { + SGX_ERROR_ENCLAVE_FILE_ACCESS, + "Can't open enclave file.", + NULL + }, +}; + +/* Check error conditions for loading enclave */ +void print_error_message(sgx_status_t ret) +{ + size_t idx = 0; + size_t ttl = sizeof sgx_errlist/sizeof sgx_errlist[0]; + + for (idx = 0; idx < ttl; idx++) { + if(ret == sgx_errlist[idx].err) { + if(NULL != sgx_errlist[idx].sug) + printf("Info: %s\n", sgx_errlist[idx].sug); + printf("Error: %s\n", sgx_errlist[idx].msg); + break; + } + } + + if (idx == ttl) + printf("Error code is 0x%X. Please refer to the \"Intel SGX SDK Developer Reference\" for more details.\n", ret); +} + +/* Initialize the enclave: + * Call sgx_create_enclave to initialize an enclave instance + */ +int initialize_enclave(void) +{ + sgx_status_t ret = SGX_ERROR_UNEXPECTED; + + /* Call sgx_create_enclave to initialize an enclave instance */ + /* Debug Support: set 2nd parameter to 1 */ + ret = sgx_create_enclave(ENCLAVE_FILENAME, SGX_DEBUG_FLAG, NULL, NULL, &global_eid, NULL); + if (ret != SGX_SUCCESS) { + print_error_message(ret); + return -1; + } + + return 0; +} + +/* OCall functions */ +void ocall_print_string(const char *str) +{ + /* Proxy/Bridge will check the length and null-terminate + * the input string to prevent buffer overflow. + */ + printf("%s", str); +} + +int test_tcs(); + +static atomic counter_failures; + +#include //rand +void driver(int sid) +{ + sgx_status_t ret = SGX_ERROR_UNEXPECTED; + int retval = 0; + do{ + usleep(rand()%11); + ret = ecall_test_sgx_mm(global_eid, &retval, sid); + if (ret == SGX_SUCCESS){ + printf("test_sgx_mm returned %d\n", retval); + counter_failures += retval; + }else if (ret == SGX_ERROR_OUT_OF_TCS){ + printf("!!! enclave out of TCS, retrying...\n"); + continue; + }else + { + abort(); + } + //test_tcs does its own retry + counter_failures += test_tcs(); + return; + }while(true); +} + + +int test_unsafe() +{ + sgx_status_t ret = SGX_ERROR_UNEXPECTED; + int retval = 0; + ret = ecall_test_sgx_mm_unsafe(global_eid, &retval); + if (ret == SGX_SUCCESS){ + if(retval) + printf("!!! test_sgx_mm_unsafe returned %d\n", retval); + else + printf("*** unsafe tests passed\n"); + return retval; + }else + abort(); +} + + +typedef struct ms_ecall_check_context_t { + int ms_retval; + size_t ms_tcs; +} ms_ecall_check_context_t; + +extern "C" { +#include "sgx.h" +} +vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave; + +#define EENTER 2 + +int ecall_check_context_manual(int* retval, size_t tcs) +{ + ms_ecall_check_context_t ms; + ms.ms_tcs = tcs; + ms.ms_retval= -1; + struct sgx_enclave_run run; + memset(&run, 0, sizeof(run)); + run.tcs = (__u64)tcs; +/******** + !NOTE: hardcoded ecall number. Needs update if edl changes +*******/ + int ret = vdso_sgx_enter_enclave((unsigned long)3, (unsigned long)(&ms), 0, EENTER, + 0, 0, &run); + if (ret == 0) *retval = ms.ms_retval; + return ret; +} +#include +#include +void* get_vdso_sym(const char* vdso_func_name) +{ + char* dynstr = 0; + void *ret = NULL; + + uint8_t* vdso_addr = (uint8_t*)getauxval(AT_SYSINFO_EHDR); + Elf64_Ehdr* elf_header = (Elf64_Ehdr*)vdso_addr; + Elf64_Shdr* section_header = (Elf64_Shdr*)(vdso_addr + elf_header->e_shoff); + + for (int i = 0; i < elf_header->e_shnum; i++) { + auto& s = section_header[i]; + auto& ss_ = section_header[elf_header->e_shstrndx]; + auto name = (char*)(vdso_addr + ss_.sh_offset + s.sh_name); + if (strcmp(name, ".dynstr") == 0) { + dynstr = (char*)(vdso_addr + s.sh_offset); + break; + } + } + + for (int i = 0; i < elf_header->e_shnum; i++) { + auto& s = section_header[i]; + auto& ss_ = section_header[elf_header->e_shstrndx]; + auto name = (char*)(vdso_addr + ss_.sh_offset + s.sh_name); + if (strcmp(name, ".dynsym") == 0) { + for (unsigned int si = 0; si < (s.sh_size/s.sh_entsize); si++) { + auto &sym = ((Elf64_Sym*)(vdso_addr + s.sh_offset))[si]; + auto vdname = dynstr + sym.st_name; + if (strcmp(vdname, vdso_func_name) == 0) { + ret = (vdso_addr + sym.st_value); + break; + } + } + if (ret) break; + } + } + return ret; +} + +#define fastcall __attribute__((regparm(3),noinline,visibility("default"))) +//this function is used to notify GDB scripts +//GDB is supposed to have a breakpoint on urts_add_tcs to receive debug interupt +//once the breakpoint has been hit, GDB extracts the address of tcs and sets DBGOPTIN for the tcs +extern "C" void fastcall urts_add_tcs(tcs_t * const tcs) +{ + (void)(tcs); +} + +int test_tcs() +{ + if(vdso_sgx_enter_enclave == NULL) + return 0; //skip this test + sgx_status_t ret = SGX_ERROR_UNEXPECTED; + size_t tcs = 0; + + do { + usleep(rand()%11); + ret = ecall_alloc_context(global_eid, &tcs); + if (ret == SGX_SUCCESS){ + if (tcs > 1) { + printf("*** test_alloc_context returned 0X%LX\n", tcs); + break; + } else { + printf("!!! alloc context failed\n"); + return 1; + } + }else if (ret == SGX_ERROR_OUT_OF_TCS){ + continue; + }else + abort(); + } while (true); + + urts_add_tcs((tcs_t*)tcs);//turn on sgx-gdb + int retval = 0; + int r = ecall_check_context_manual(&retval, tcs); + + if (r == 0){ + if (retval) { + printf("!!! check tcs returned %d\n", retval); + return 1; + } + else + printf("*** check tcs passed\n"); + }else + abort(); + + do { + usleep(rand()%11); + ret = ecall_dealloc_context(global_eid, &retval, tcs); + if (ret == SGX_SUCCESS) { + if(retval) { + printf("!!! test_deaalloc_context returned %d\n", retval); + return 1; + } + else{ + printf("*** dealloc context pass\n"); + return 0; + } + } else if (ret == SGX_ERROR_OUT_OF_TCS){ + continue; + }else + abort(); + } while (true); + return 0; +} + +/* ecall_thread_functions: + * Invokes thread functions including mutex, condition variable, etc. + */ +int test_sgx_mm_functions(int num_threads) +{ + vector threads; + for (int i=0; i< num_threads; i++) + threads.push_back(new thread(driver, i)); + + for (int i=0; i< num_threads; i++) + { + threads[i]->join(); + delete threads[i]; + } + + if(counter_failures) + { + printf("!!! Fail in %d threads\n", static_cast(counter_failures)); + }else + printf("*** All threads ran successfully.\n"); + return counter_failures; +} + + + +/* Application entry */ +int SGX_CDECL main(int argc, char *argv[]) +{ + (void)(argc); + (void)(argv); + + + vdso_sgx_enter_enclave = (vdso_sgx_enter_enclave_t)get_vdso_sym("__vdso_sgx_enter_enclave"); + /* Initialize the enclave */ + if(initialize_enclave() < 0){ + printf("Failed initialize enclave.\n"); + return -1; + } + //srand (time(NULL)); + srand ((3141596/1618)*271828); + int ret = 0; + + //17 threads for 100 iterations passed when this is checked in + ret += test_sgx_mm_functions(17); + ret += test_unsafe(); + + sgx_destroy_enclave(global_eid); + if (!ret) + printf("*** All tests pass.\n"); + else + printf("!!! %d test(s) failed.\n", ret); + + return ret; +} + diff --git a/sdk/emm/api_tests/App/App.h b/sdk/emm/api_tests/App/App.h new file mode 100644 index 000000000..ec2de43c8 --- /dev/null +++ b/sdk/emm/api_tests/App/App.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +#ifndef _APP_H_ +#define _APP_H_ + +#include +#include +#include +#include + +#include "sgx_error.h" /* sgx_status_t */ +#include "sgx_eid.h" /* sgx_enclave_id_t */ + +#ifndef TRUE +# define TRUE 1 +#endif + +#ifndef FALSE +# define FALSE 0 +#endif + +# define TOKEN_FILENAME "enclave.token" +# define ENCLAVE_FILENAME "enclave.signed.so" + +extern sgx_enclave_id_t global_eid; /* global enclave id */ + +#if defined(__cplusplus) +extern "C" { +#endif + +void edger8r_array_attributes(void); +void edger8r_type_attributes(void); +void edger8r_pointer_attributes(void); +void edger8r_function_attributes(void); + +void ecall_libc_functions(void); +void ecall_libcxx_functions(void); +void ecall_thread_functions(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* !_APP_H_ */ diff --git a/sdk/emm/api_tests/App/sgx.h b/sdk/emm/api_tests/App/sgx.h new file mode 100644 index 000000000..b2fbcee9d --- /dev/null +++ b/sdk/emm/api_tests/App/sgx.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright(c) 2016-20 Intel Corporation. + */ +#ifndef _UAPI_ASM_X86_SGX_H +#define _UAPI_ASM_X86_SGX_H + +#include +#include + +/** + * enum sgx_page_flags - page control flags + * %SGX_PAGE_MEASURE: Measure the page contents with a sequence of + * ENCLS[EEXTEND] operations. + */ +enum sgx_page_flags { + SGX_PAGE_MEASURE = 0x01, +}; + +#define SGX_MAGIC 0xA4 + +#define SGX_IOC_ENCLAVE_CREATE \ + _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create) +#define SGX_IOC_ENCLAVE_ADD_PAGES \ + _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages) +#define SGX_IOC_ENCLAVE_INIT \ + _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init) +#define SGX_IOC_ENCLAVE_PROVISION \ + _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision) +#define SGX_IOC_PAGE_MODP \ + _IOWR(SGX_MAGIC, 0x04, struct sgx_page_modp) +#define SGX_IOC_PAGE_MODT \ + _IOWR(SGX_MAGIC, 0x05, struct sgx_page_modt) +#define SGX_IOC_PAGE_REMOVE \ + _IOWR(SGX_MAGIC, 0x06, struct sgx_page_remove) + +/** + * struct sgx_enclave_create - parameter structure for the + * %SGX_IOC_ENCLAVE_CREATE ioctl + * @src: address for the SECS page data + */ +struct sgx_enclave_create { + __u64 src; +}; + +/** + * struct sgx_enclave_add_pages - parameter structure for the + * %SGX_IOC_ENCLAVE_ADD_PAGE ioctl + * @src: start address for the page data + * @offset: starting page offset + * @length: length of the data (multiple of the page size) + * @secinfo: address for the SECINFO data + * @flags: page control flags + * @count: number of bytes added (multiple of the page size) + */ +struct sgx_enclave_add_pages { + __u64 src; + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 flags; + __u64 count; +}; + +/** + * struct sgx_enclave_init - parameter structure for the + * %SGX_IOC_ENCLAVE_INIT ioctl + * @sigstruct: address for the SIGSTRUCT data + */ +struct sgx_enclave_init { + __u64 sigstruct; +}; + +/** + * struct sgx_enclave_provision - parameter structure for the + * %SGX_IOC_ENCLAVE_PROVISION ioctl + * @fd: file handle of /dev/sgx_provision + */ +struct sgx_enclave_provision { + __u64 fd; +}; + +/** + * struct sgx_page_modp - parameter structure for the %SGX_IOC_PAGE_MODP ioctl + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @prot: new protection bits of pages in range described by @offset + * and @length + * @result: SGX result code of ENCLS[EMODPR] function + * @count: bytes successfully changed (multiple of page size) + */ +struct sgx_page_modp { + __u64 offset; + __u64 length; + __u64 prot; + __u64 result; + __u64 count; +}; + +/** + * struct sgx_page_modt - parameter structure for the %SGX_IOC_PAGE_MODT ioctl + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @type: new type of pages in range described by @offset and @length + * @result: SGX result code of ENCLS[EMODT] function + * @count: bytes successfully changed (multiple of page size) + */ +struct sgx_page_modt { + __u64 offset; + __u64 length; + __u64 type; + __u64 result; + __u64 count; +}; + +/** + * struct sgx_page_remove - parameters for the %SGX_IOC_PAGE_REMOVE ioctl + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @count: bytes successfully changed (multiple of page size) + * + * Regular (PT_REG) or TCS (PT_TCS) can be removed from an initialized + * enclave if the system supports SGX2. First, the %SGX_IOC_PAGE_MODT ioctl + * should be used to change the page type to PT_TRIM. After that succeeds + * ENCLU[EACCEPT] should be run from within the enclave and then can this + * ioctl be used to complete the page removal. + */ +struct sgx_page_remove { + __u64 offset; + __u64 length; + __u64 count; +}; + +struct sgx_enclave_run; + +/** + * typedef sgx_enclave_user_handler_t - Exit handler function accepted by + * __vdso_sgx_enter_enclave() + * @run: The run instance given by the caller + * + * The register parameters contain the snapshot of their values at enclave + * exit. An invalid ENCLU function number will cause -EINVAL to be returned + * to the caller. + * + * Return: + * - <= 0: The given value is returned back to the caller. + * - > 0: ENCLU function to invoke, either EENTER or ERESUME. + */ +typedef int (*sgx_enclave_user_handler_t)(long rdi, long rsi, long rdx, + long rsp, long r8, long r9, + struct sgx_enclave_run *run); + +/** + * struct sgx_enclave_run - the execution context of __vdso_sgx_enter_enclave() + * @tcs: TCS used to enter the enclave + * @function: The last seen ENCLU function (EENTER, ERESUME or EEXIT) + * @exception_vector: The interrupt vector of the exception + * @exception_error_code: The exception error code pulled out of the stack + * @exception_addr: The address that triggered the exception + * @user_handler: User provided callback run on exception + * @user_data: Data passed to the user handler + * @reserved Reserved for future extensions + * + * If @user_handler is provided, the handler will be invoked on all return paths + * of the normal flow. The user handler may transfer control, e.g. via a + * longjmp() call or a C++ exception, without returning to + * __vdso_sgx_enter_enclave(). + */ +struct sgx_enclave_run { + __u64 tcs; + __u32 function; + __u16 exception_vector; + __u16 exception_error_code; + __u64 exception_addr; + __u64 user_handler; + __u64 user_data; + __u8 reserved[216]; +}; + +/** + * typedef vdso_sgx_enter_enclave_t - Prototype for __vdso_sgx_enter_enclave(), + * a vDSO function to enter an SGX enclave. + * @rdi: Pass-through value for RDI + * @rsi: Pass-through value for RSI + * @rdx: Pass-through value for RDX + * @function: ENCLU function, must be EENTER or ERESUME + * @r8: Pass-through value for R8 + * @r9: Pass-through value for R9 + * @run: struct sgx_enclave_run, must be non-NULL + * + * NOTE: __vdso_sgx_enter_enclave() does not ensure full compliance with the + * x86-64 ABI, e.g. doesn't handle XSAVE state. Except for non-volatile + * general purpose registers, EFLAGS.DF, and RSP alignment, preserving/setting + * state in accordance with the x86-64 ABI is the responsibility of the enclave + * and its runtime, i.e. __vdso_sgx_enter_enclave() cannot be called from C + * code without careful consideration by both the enclave and its runtime. + * + * All general purpose registers except RAX, RBX and RCX are passed as-is to the + * enclave. RAX, RBX and RCX are consumed by EENTER and ERESUME and are loaded + * with @function, asynchronous exit pointer, and @run.tcs respectively. + * + * RBP and the stack are used to anchor __vdso_sgx_enter_enclave() to the + * pre-enclave state, e.g. to retrieve @run.exception and @run.user_handler + * after an enclave exit. All other registers are available for use by the + * enclave and its runtime, e.g. an enclave can push additional data onto the + * stack (and modify RSP) to pass information to the optional user handler (see + * below). + * + * Most exceptions reported on ENCLU, including those that occur within the + * enclave, are fixed up and reported synchronously instead of being delivered + * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are + * never fixed up and are always delivered via standard signals. On synchronously + * reported exceptions, -EFAULT is returned and details about the exception are + * recorded in @run.exception, the optional sgx_enclave_exception struct. + * + * Return: + * - 0: ENCLU function was successfully executed. + * - -EINVAL: Invalid ENCL number (neither EENTER nor ERESUME). + */ +typedef int (*vdso_sgx_enter_enclave_t)(unsigned long rdi, unsigned long rsi, + unsigned long rdx, unsigned int function, + unsigned long r8, unsigned long r9, + struct sgx_enclave_run *run); + +#endif /* _UAPI_ASM_X86_SGX_H */ diff --git a/sdk/emm/api_tests/Enclave/Enclave.cpp b/sdk/emm/api_tests/Enclave/Enclave.cpp new file mode 100644 index 000000000..3a56172d3 --- /dev/null +++ b/sdk/emm/api_tests/Enclave/Enclave.cpp @@ -0,0 +1,708 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "Enclave.h" +#include "Enclave_t.h" /* print_string */ +#include +#include /* vsnprintf */ +#include +#include +#include "../../include/sgx_mm.h" +#define SGX_PAGE_SIZE 4096 +#include "sgx_thread.h" +#include +#include "../tcs.h" +using namespace std; +/* + * printf: + * Invokes OCALL to display the enclave buffer to the terminal. + */ +int printf(const char* fmt, ...) +{ + char buf[4096*2] = { '\0' }; + va_list ap; + va_start(ap, fmt); + vsnprintf(buf, BUFSIZ, fmt, ap); + va_end(ap); + ocall_print_string(buf); + return (int)strnlen(buf, BUFSIZ - 1) + 1; +} + +#define LOG(fmt, ...) do { \ + printf("[%s %s:%d] " fmt, __FUNCTION__, __FILE__, __LINE__, ##__VA_ARGS__); \ +}while(0) + + +#define EXPECT_EQ(a, b) \ + do { \ + if ((a) != (b)){ \ + LOG( #a " expected:" #b " got: %lu\n", (uint64_t)(a)); \ + return 1; \ + }\ + } while(0); + +#define EXPECT_NEQ(a, b) \ + do { \ + if ((a) == (b)) {\ + LOG( #a " not expected: " #b "\n" ); \ + return 1; \ + }\ + } while(0); + +const size_t ALLOC_SIZE = 0x2000; +vector allocated_blocks; +sgx_thread_mutex_t mutex = SGX_THREAD_MUTEX_INITIALIZER; + +int test_sgx_mm_alloc_dealloc() +{ + int ret = sgx_mm_dealloc(0, ALLOC_SIZE); + EXPECT_EQ(ret, EINVAL); + // we should be able to alloc, commit, uncommit + // in multiple threads without interference + void* addr = 0; + ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW, NULL, NULL, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + return 0; +} + +int test_sgx_mm_alloc_commit_uncommit() +{ + int ret = sgx_mm_dealloc(0, ALLOC_SIZE); + EXPECT_EQ(ret, EINVAL); + // we should be able to alloc, commit, uncommit + // in multiple threads without interference + void* addr = 0; + ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW, NULL, NULL, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + ret = sgx_mm_commit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + void* addr1 = NULL; + ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW | SGX_EMA_FIXED, NULL, NULL, &addr1); + + EXPECT_EQ(ret, EEXIST); + EXPECT_EQ(addr1, NULL); + ret = sgx_mm_uncommit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + ret = sgx_mm_uncommit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); //we do nothing if it's already uncommitted + + ret = sgx_mm_commit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + //no longer use these areas, ready to be + // released by any thread + sgx_thread_mutex_lock(&mutex); + allocated_blocks.push_back(addr); + sgx_thread_mutex_unlock(&mutex); + return 0; +} +/* + * Only release areas previously stored + * by other threads as ready to be released + */ +int test_sgx_mm_dealloc() +{ + int res = 0; + sgx_thread_mutex_lock(&mutex); + auto it = allocated_blocks.begin(); + while ( it!=allocated_blocks.end()){ + int ret = sgx_mm_dealloc(*it, ALLOC_SIZE); + if(ret){ + res ++; + LOG("!!! failed dealloc, errno = %d\n", ret); + it++; + }else + it = allocated_blocks.erase(it); + } + sgx_thread_mutex_unlock(&mutex); + return res; +} + +int test_sgx_mm_alloc_dealloc_unsafe1() +{ +// allocation, deallocation + int ret = sgx_mm_dealloc(0, ALLOC_SIZE); + EXPECT_EQ(ret, EINVAL); + + void* addr = 0; + ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW, NULL, NULL, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + ret = sgx_mm_commit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + void* addr1 = NULL; + ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW|SGX_EMA_FIXED, NULL, NULL, &addr1); + + EXPECT_EQ(ret, EEXIST); + EXPECT_EQ(addr1, NULL); + + ret = sgx_mm_uncommit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + ret = sgx_mm_uncommit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); //we do nothing if it's already uncommitted + + ret = sgx_mm_commit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, EINVAL); + + ret = sgx_mm_uncommit(addr, ALLOC_SIZE); + EXPECT_EQ(ret, EINVAL); // error if it's already deallocated + + void* addr2 = NULL; + ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND|SGX_EMA_FIXED, NULL, NULL, &addr2); + + EXPECT_EQ(ret, 0); + EXPECT_EQ(addr2, addr);//mm should realloc to the given addr + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND|SGX_EMA_FIXED, NULL, NULL, &addr2); + + EXPECT_EQ(ret, 0); + EXPECT_EQ(addr, addr2); + + ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND, NULL, NULL, &addr2); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, addr2); + + uint8_t *data= (uint8_t*)addr2; + data[0]=0xFF; + data[ALLOC_SIZE-1]=0xFF; + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + ret = sgx_mm_dealloc(addr2, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + + return 0; +} + +typedef struct _pfdata +{ + sgx_pfinfo pf; + union { + int access; // access that triggers PF, R/W/X + int magic; + }; + void* addr_expected; +} pf_data_t; + +int permissions_handler(const sgx_pfinfo *pfinfo, void *private_data) +{ + pf_data_t* pd = (pf_data_t *) private_data; + memcpy(private_data, pfinfo, sizeof(*pfinfo)); + void* addr = (void*) pd->pf.maddr; + if(pd->pf.pfec.rw == 1 && pd->access == SGX_EMA_PROT_WRITE){ + sgx_mm_modify_permissions(addr, SGX_PAGE_SIZE, SGX_EMA_PROT_WRITE | SGX_EMA_PROT_READ); + }else if (pd->pf.pfec.rw == 0 && (pd->access & SGX_EMA_PROT_READ )){//R or RX + sgx_mm_modify_permissions(addr, SGX_PAGE_SIZE, pd->access); + }else + abort(); + return SGX_MM_EXCEPTION_CONTINUE_EXECUTION; +} + +int commit_data_handler(const sgx_pfinfo *pfinfo, void *private_data) +{ + pf_data_t* pd = (pf_data_t *) private_data; + memcpy(private_data, pfinfo, sizeof(*pfinfo)); + void* addr = (void*) pd->pf.maddr; + + if (pd->access == SGX_EMA_PROT_WRITE + && pd->pf.pfec.rw == 1 + && addr == pd->addr_expected) + { + int ret = sgx_mm_modify_permissions(addr, SGX_PAGE_SIZE, SGX_EMA_PROT_WRITE | SGX_EMA_PROT_READ); + if(ret) abort(); + return SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + } + + if (addr == pd->addr_expected) + { + void* data = 0; + int ret = sgx_mm_alloc(NULL, SGX_PAGE_SIZE, SGX_EMA_COMMIT_NOW, + NULL, NULL, &data); + if(ret) abort(); + assert(data!=0); + memset(data, pd->magic, SGX_PAGE_SIZE); + ret = sgx_mm_commit_data(addr, SGX_PAGE_SIZE, (uint8_t*)data, + SGX_EMA_PROT_READ); + if(ret) abort(); + ret = sgx_mm_dealloc((void*)data, SGX_PAGE_SIZE); + if(ret) abort(); + return SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + }else + return SGX_MM_EXCEPTION_CONTINUE_SEARCH; +} + +int test_sgx_mm_permissions() +{ + + void* addr = 0; + pf_data_t pd; + memset((void*) &pd, 0, sizeof(pd)); + int ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW, &permissions_handler, &pd, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + uint8_t* data = (uint8_t*)addr; + uint8_t d0 = data[0]; + EXPECT_EQ(d0, 0); + EXPECT_EQ (pd.pf.pfec.errcd, 0); //Read suceess without PF + data[0] = 0xFF; + EXPECT_EQ (pd.pf.pfec.errcd, 0); //WRITE suceess without PF + + // permissions reduction + ret = sgx_mm_modify_permissions(addr, ALLOC_SIZE/2, SGX_EMA_PROT_READ); + EXPECT_EQ(ret, 0); + + pd.access = SGX_EMA_PROT_READ; + d0 = data[0]; + EXPECT_EQ(d0, 0xFF); + EXPECT_EQ (pd.pf.pfec.errcd, 0); //Read suceess without PF + + pd.access = SGX_EMA_PROT_WRITE; + data[ALLOC_SIZE-1] = 0xFF; + EXPECT_EQ (pd.pf.pfec.errcd, 0); //WRITE suceess without PF + + pd.access = SGX_EMA_PROT_WRITE; + data[0] = 0xFF; + EXPECT_NEQ (pd.pf.pfec.errcd, 0); //WRITE suceess with PF + EXPECT_EQ (pd.pf.pfec.rw, 1); //WRITE indicated in PFEC + + memset((void*) &pd, 0, sizeof(pd)); + pd.access = SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC; + + //no longer used, ready to be released by any thread + //we could dealloc here but to make it more interesting... + sgx_thread_mutex_lock(&mutex); + allocated_blocks.push_back(addr); + sgx_thread_mutex_unlock(&mutex); + + return 0; +} + + +int test_sgx_mm_permissions_dealloc() +{ + void* addr = 0; + pf_data_t pd; + memset((void*) &pd, 0, sizeof(pd)); + int ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW, &permissions_handler, &pd, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + uint8_t* data = (uint8_t*)addr; + uint8_t d0 = data[0]; + EXPECT_EQ(d0, 0); + EXPECT_EQ (pd.pf.pfec.errcd, 0); //Read suceess without PF + data[0] = 0xFF; + EXPECT_EQ (pd.pf.pfec.errcd, 0); //WRITE suceess without PF + + // permissions reduction + ret = sgx_mm_modify_permissions(addr, ALLOC_SIZE/2, SGX_EMA_PROT_READ); + EXPECT_EQ(ret, 0); + + pd.access = SGX_EMA_PROT_READ; + d0 = data[0]; + EXPECT_EQ(d0, 0xFF); + EXPECT_EQ (pd.pf.pfec.errcd, 0); //Read suceess without PF + + pd.access = SGX_EMA_PROT_WRITE; + data[ALLOC_SIZE-1] = 0xFF; + EXPECT_EQ (pd.pf.pfec.errcd, 0); //WRITE suceess without PF + + pd.access = SGX_EMA_PROT_WRITE; + data[0] = 0xFF; + EXPECT_NEQ (pd.pf.pfec.errcd, 0); //WRITE suceess with PF + EXPECT_EQ (pd.pf.pfec.rw, 1); //WRITE indicated in PFEC + + memset((void*) &pd, 0, sizeof(pd)); + pd.access = SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC; + + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + EXPECT_EQ (pd.pf.pfec.errcd, 0); // no PF + + return 0; +} + +int test_sgx_mm_commit_data() +{ + void* addr = 0; + const int MAGIC = 0x55UL; + pf_data_t pd; + memset((void*) &pd, 0, sizeof(pd)); + int ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND, + &commit_data_handler, + &pd, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + pd.addr_expected = addr; + pd.magic = MAGIC; + + uint8_t* data = (uint8_t*)addr; + for (int i =0; ioentry = (size_t)(&enclave_entry) - enclave_base; + tcs->cssa = 0; + tcs->nssa = 2; + tcs->ofs_limit = tcs->ogs_limit = (uint32_t)-1; + tcs->ossa = (size_t) ssa - enclave_base; + tcs->ofs_base = (size_t)tls - enclave_base; + tcs->ogs_base = (size_t)tls - enclave_base; + + ret =sgx_mm_modify_type(ptcs, SGX_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TCS); + + EXPECT_EQ(ret, 0); + return (size_t) ptcs; +} + +int ecall_check_context(size_t tcs) +{ + return 0; +} + +int ecall_dealloc_context(size_t tcs) +{ + size_t base = tcs - 37*SGX_PAGE_SIZE; + size_t size = (16 * 3 + 5 + 1 + 2 + 1) * SGX_PAGE_SIZE; + + int ret = sgx_mm_dealloc ((void*)base, size); + + EXPECT_EQ(ret, 0); + + return 0; +} diff --git a/sdk/emm/api_tests/Enclave/Enclave.edl b/sdk/emm/api_tests/Enclave/Enclave.edl new file mode 100644 index 000000000..be3fc5775 --- /dev/null +++ b/sdk/emm/api_tests/Enclave/Enclave.edl @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +enclave { + from "sgx_tstdc.edl" import sgx_thread_wait_untrusted_event_ocall, sgx_thread_set_untrusted_event_ocall; + trusted { + public int ecall_test_sgx_mm(int seq_id); + public int ecall_test_sgx_mm_unsafe(void); + public size_t ecall_alloc_context(void); + public int ecall_check_context(size_t tcs); + public int ecall_dealloc_context(size_t tcs); + }; + untrusted { + void ocall_print_string([in, string] const char *str); + }; + +}; diff --git a/sdk/emm/api_tests/Enclave/Enclave.h b/sdk/emm/api_tests/Enclave/Enclave.h new file mode 100644 index 000000000..f6bb82b59 --- /dev/null +++ b/sdk/emm/api_tests/Enclave/Enclave.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _ENCLAVE_H_ +#define _ENCLAVE_H_ + +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +int printf(const char* fmt, ...); + +#if defined(__cplusplus) +} +#endif + +#endif /* !_ENCLAVE_H_ */ diff --git a/sdk/emm/api_tests/Enclave/Enclave.lds b/sdk/emm/api_tests/Enclave/Enclave.lds new file mode 100644 index 000000000..0d5614f55 --- /dev/null +++ b/sdk/emm/api_tests/Enclave/Enclave.lds @@ -0,0 +1,11 @@ +enclave.so +{ + global: + g_global_data_sim; + g_global_data; + enclave_entry; + g_peak_heap_used; + g_peak_rsrv_mem_committed; + local: + *; +}; diff --git a/sdk/emm/api_tests/Enclave/Enclave_private_test.pem b/sdk/emm/api_tests/Enclave/Enclave_private_test.pem new file mode 100644 index 000000000..529d07be3 --- /dev/null +++ b/sdk/emm/api_tests/Enclave/Enclave_private_test.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ +AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ +ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr +nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b +3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H +ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD +5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW +KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC +1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe +K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z +AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q +ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6 +JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826 +5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02 +wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9 +osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm +WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i +Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9 +xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd +vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD +Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a +cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC +0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ +gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo +gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t +k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz +Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6 +O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5 +afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom +e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G +BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv +fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN +t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9 +yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp +6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg +WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH +NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk= +-----END RSA PRIVATE KEY----- diff --git a/sdk/emm/api_tests/Enclave/config.xml b/sdk/emm/api_tests/Enclave/config.xml new file mode 100644 index 000000000..aea997c8a --- /dev/null +++ b/sdk/emm/api_tests/Enclave/config.xml @@ -0,0 +1,23 @@ + + 0 + 0 + 8 + 3 + 1 + 18 + + 0x10000 + 0x2000 + 0x900000 + 0x90000 + 0x080000 + 0x00001000 + 0x00100000 + 0x90000000 + + 0 + 1 + 0xFFFFFFFF + diff --git a/sdk/emm/api_tests/Makefile b/sdk/emm/api_tests/Makefile new file mode 100644 index 000000000..4f49bc834 --- /dev/null +++ b/sdk/emm/api_tests/Makefile @@ -0,0 +1,265 @@ +# +# Copyright (C) 2011-2021 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# + +######## SGX SDK Settings ######## + +SGX_SDK ?= /opt/intel/sgxsdk +SGX_MODE ?= HW +SGX_ARCH ?= x64 +SGX_DEBUG ?= 1 + +include $(SGX_SDK)/buildenv.mk + +ifeq ($(shell getconf LONG_BIT), 32) + SGX_ARCH := x86 +else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32) + SGX_ARCH := x86 +endif + +ifeq ($(SGX_ARCH), x86) + SGX_COMMON_FLAGS := -m32 + SGX_LIBRARY_PATH := $(SGX_SDK)/lib + SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign + SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r +else + SGX_COMMON_FLAGS := -m64 + SGX_LIBRARY_PATH := $(SGX_SDK)/lib64 + SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign + SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r +endif + +ifeq ($(SGX_DEBUG), 1) +ifeq ($(SGX_PRERELEASE), 1) +$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!) +endif +endif + +ifeq ($(SGX_DEBUG), 1) + SGX_COMMON_FLAGS += -O0 -g +else + SGX_COMMON_FLAGS += -O2 +endif + +SGX_COMMON_FLAGS += -Wall -Wextra -Winit-self -Wpointer-arith -Wreturn-type \ + -Waddress -Wsequence-point -Wformat-security \ + -Wmissing-include-dirs -Wfloat-equal -Wundef -Wshadow \ + -Wcast-align -Wcast-qual -Wconversion -Wredundant-decls +SGX_COMMON_CFLAGS := $(SGX_COMMON_FLAGS) -Wjump-misses-init -Wstrict-prototypes -Wunsuffixed-float-constants +SGX_COMMON_CXXFLAGS := $(SGX_COMMON_FLAGS) -Wnon-virtual-dtor -std=c++11 + +######## App Settings ######## + +ifneq ($(SGX_MODE), HW) + Urts_Library_Name := sgx_urts_sim +else + Urts_Library_Name := sgx_urts +endif + +App_Cpp_Files := App/App.cpp +App_Include_Paths := -IApp -I$(SGX_SDK)/include + +App_C_Flags := -fPIC -Wno-attributes $(App_Include_Paths) + +# Three configuration modes - Debug, prerelease, release +# Debug - Macro DEBUG enabled. +# Prerelease - Macro NDEBUG and EDEBUG enabled. +# Release - Macro NDEBUG enabled. +ifeq ($(SGX_DEBUG), 1) + App_C_Flags += -DDEBUG -UNDEBUG -UEDEBUG +else ifeq ($(SGX_PRERELEASE), 1) + App_C_Flags += -DNDEBUG -DEDEBUG -UDEBUG +else + App_C_Flags += -DNDEBUG -UEDEBUG -UDEBUG +endif + +App_Cpp_Flags := $(App_C_Flags) +App_Link_Flags := -L$(SGX_LIBRARY_PATH) -l$(Urts_Library_Name) -lpthread + +App_Cpp_Objects := $(App_Cpp_Files:.cpp=.o) + +App_Name := test_mm_api + +######## Enclave Settings ######## + +ifneq ($(SGX_MODE), HW) + Trts_Library_Name := sgx_trts_sim + Service_Library_Name := sgx_tservice_sim +else + Trts_Library_Name := sgx_trts + Service_Library_Name := sgx_tservice +endif +Crypto_Library_Name := sgx_tcrypto + +Enclave_Cpp_Files := Enclave/Enclave.cpp +Enclave_Include_Paths := -IEnclave -I$(SGX_SDK)/include -I$(SGX_SDK)/include/tlibc -I$(SGX_SDK)/include/libcxx + +Enclave_C_Flags := $(Enclave_Include_Paths) -nostdinc -fvisibility=hidden -fpie -ffunction-sections -fdata-sections $(MITIGATION_CFLAGS) +CC_BELOW_4_9 := $(shell expr "`$(CC) -dumpversion`" \< "4.9") +ifeq ($(CC_BELOW_4_9), 1) + Enclave_C_Flags += -fstack-protector +else + Enclave_C_Flags += -fstack-protector-strong +endif + +Enclave_Cpp_Flags := $(Enclave_C_Flags) -nostdinc++ + +# Enable the security flags +Enclave_Security_Link_Flags := -Wl,-z,relro,-z,now,-z,noexecstack + +# To generate a proper enclave, it is recommended to follow below guideline to link the trusted libraries: +# 1. Link sgx_trts with the `--whole-archive' and `--no-whole-archive' options, +# so that the whole content of trts is included in the enclave. +# 2. For other libraries, you just need to pull the required symbols. +# Use `--start-group' and `--end-group' to link these libraries. +# Do NOT move the libraries linked with `--start-group' and `--end-group' within `--whole-archive' and `--no-whole-archive' options. +# Otherwise, you may get some undesirable errors. +Enclave_Link_Flags := $(MITIGATION_LDFLAGS) $(Enclave_Security_Link_Flags) \ + -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles -L$(SGX_TRUSTED_LIBRARY_PATH) \ + -Wl,--whole-archive -l$(Trts_Library_Name) -Wl,--no-whole-archive \ + -Wl,--start-group -lsgx_tstdc -lsgx_tcxx -l$(Crypto_Library_Name) -l$(Service_Library_Name) -Wl,--end-group \ + -Wl,-Bstatic -Wl,-Bsymbolic -Wl,--no-undefined \ + -Wl,-pie,-eenclave_entry -Wl,--export-dynamic \ + -Wl,--defsym,__ImageBase=0 -Wl,--gc-sections \ + -Wl,--version-script=Enclave/Enclave.lds + +Enclave_Cpp_Objects := $(sort $(Enclave_Cpp_Files:.cpp=.o)) + +Enclave_Name := enclave.so +Signed_Enclave_Name := enclave.signed.so +Enclave_Config_File := Enclave/config.xml + +ifeq ($(SGX_MODE), HW) +ifeq ($(SGX_DEBUG), 1) + Build_Mode = HW_DEBUG +else ifeq ($(SGX_PRERELEASE), 1) + Build_Mode = HW_PRERELEASE +else + Build_Mode = HW_RELEASE +endif +else +ifeq ($(SGX_DEBUG), 1) + Build_Mode = SIM_DEBUG +else ifeq ($(SGX_PRERELEASE), 1) + Build_Mode = SIM_PRERELEASE +else + Build_Mode = SIM_RELEASE +endif +endif + + +.PHONY: all target run +all: .config_$(Build_Mode)_$(SGX_ARCH) + @$(MAKE) target + +ifeq ($(Build_Mode), HW_RELEASE) +target: $(App_Name) $(Enclave_Name) + @echo "The project has been built in release hardware mode." + @echo "Please sign the $(Enclave_Name) first with your signing key before you run the $(App_Name) to launch and access the enclave." + @echo "To sign the enclave use the command:" + @echo " $(SGX_ENCLAVE_SIGNER) sign -key -enclave $(Enclave_Name) -out <$(Signed_Enclave_Name)> -config $(Enclave_Config_File)" + @echo "You can also sign the enclave using an external signing tool." + @echo "To build the project in simulation mode set SGX_MODE=SIM. To build the project in prerelease mode set SGX_PRERELEASE=1 and SGX_MODE=HW." + + +else +target: $(App_Name) $(Signed_Enclave_Name) +ifeq ($(Build_Mode), HW_DEBUG) + @echo "The project has been built in debug hardware mode." +else ifeq ($(Build_Mode), SIM_DEBUG) + @echo "The project has been built in debug simulation mode." +else ifeq ($(Build_Mode), HW_PRERELEASE) + @echo "The project has been built in pre-release hardware mode." +else ifeq ($(Build_Mode), SIM_PRERELEASE) + @echo "The project has been built in pre-release simulation mode." +else + @echo "The project has been built in release simulation mode." +endif + +endif + +run: all +ifneq ($(Build_Mode), HW_RELEASE) + @$(CURDIR)/$(App_Name) + @echo "RUN => $(App_Name) [$(SGX_MODE)|$(SGX_ARCH), OK]" +endif + +.config_$(Build_Mode)_$(SGX_ARCH): + @rm -f .config_* $(App_Name) $(Enclave_Name) $(Signed_Enclave_Name) $(App_Cpp_Objects) App/Enclave_u.* $(Enclave_Cpp_Objects) Enclave/Enclave_t.* + @touch .config_$(Build_Mode)_$(SGX_ARCH) + +######## App Objects ######## + +App/Enclave_u.h: $(SGX_EDGER8R) Enclave/Enclave.edl + @cd App && $(SGX_EDGER8R) --untrusted ../Enclave/Enclave.edl --search-path ../Enclave --search-path $(SGX_SDK)/include + @echo "GEN => $@" + +App/Enclave_u.c: App/Enclave_u.h + +App/Enclave_u.o: App/Enclave_u.c + @$(CC) $(SGX_COMMON_CFLAGS) $(App_C_Flags) -c $< -o $@ + @echo "CC <= $<" + +App/%.o: App/%.cpp App/Enclave_u.h + @$(CXX) $(SGX_COMMON_CXXFLAGS) $(App_Cpp_Flags) -c $< -o $@ + @echo "CXX <= $<" + +$(App_Name): App/Enclave_u.o $(App_Cpp_Objects) + @$(CXX) $^ -o $@ $(App_Link_Flags) + @echo "LINK => $@" + +######## Enclave Objects ######## + +Enclave/Enclave_t.h: $(SGX_EDGER8R) Enclave/Enclave.edl + @cd Enclave && $(SGX_EDGER8R) --trusted ../Enclave/Enclave.edl --search-path ../Enclave --search-path $(SGX_SDK)/include + @echo "GEN => $@" + +Enclave/Enclave_t.c: Enclave/Enclave_t.h + +Enclave/Enclave_t.o: Enclave/Enclave_t.c + @$(CC) $(SGX_COMMON_CFLAGS) $(Enclave_C_Flags) -c $< -o $@ + @echo "CC <= $<" + +Enclave/%.o: Enclave/%.cpp Enclave/Enclave_t.h + @$(CXX) $(SGX_COMMON_CXXFLAGS) $(Enclave_Cpp_Flags) -c $< -o $@ + @echo "CXX <= $<" + +$(Enclave_Name): Enclave/Enclave_t.o $(Enclave_Cpp_Objects) + @$(CXX) $^ -o $@ $(Enclave_Link_Flags) + @echo "LINK => $@" + +$(Signed_Enclave_Name): $(Enclave_Name) + @$(SGX_ENCLAVE_SIGNER) sign -key Enclave/Enclave_private_test.pem -enclave $(Enclave_Name) -out $@ -config $(Enclave_Config_File) + @echo "SIGN => $@" + +.PHONY: clean + +clean: + @rm -f .config_* $(App_Name) $(Enclave_Name) $(Signed_Enclave_Name) $(App_Cpp_Objects) App/Enclave_u.* $(Enclave_Cpp_Objects) Enclave/Enclave_t.* diff --git a/sdk/emm/api_tests/tcs.h b/sdk/emm/api_tests/tcs.h new file mode 100644 index 000000000..a6012e3a2 --- /dev/null +++ b/sdk/emm/api_tests/tcs.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef TCS_H_ +#define TCS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _tcs_t +{ + uint64_t reserved0; /* (0) */ + uint64_t flags; /* (8)bit 0: DBGOPTION */ + uint64_t ossa; /* (16)State Save Area */ + uint32_t cssa; /* (24)Current SSA slot */ + uint32_t nssa; /* (28)Number of SSA slots */ + uint64_t oentry; /* (32)Offset in enclave to which control is transferred on EENTER if enclave INACTIVE state */ + uint64_t reserved1; /* (40) */ + uint64_t ofs_base; /* (48)When added to the base address of the enclave, produces the base address FS segment inside the enclave */ + uint64_t ogs_base; /* (56)When added to the base address of the enclave, produces the base address GS segment inside the enclave */ + uint32_t ofs_limit; /* (64)Size to become the new FS limit in 32-bit mode */ + uint32_t ogs_limit; /* (68)Size to become the new GS limit in 32-bit mode */ +#define TCS_RESERVED_LENGTH 4024 + uint8_t reserved[TCS_RESERVED_LENGTH]; /* (72) */ +}tcs_t; + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/sdk/emm/api_tests/test_loop.sh b/sdk/emm/api_tests/test_loop.sh new file mode 100755 index 000000000..ea5d95d45 --- /dev/null +++ b/sdk/emm/api_tests/test_loop.sh @@ -0,0 +1,17 @@ +let fail=0 +for ((i=1;i<=$1;i++));do + ./test_mm_api + if [ $? -eq 0 ] + then + echo "pass for iteration $i" + else + echo "fail for iteration $i" + let fail=fail+1 + fi +done +if [ $fail -eq 0 ] +then + echo "passed $1 iterations" + else + echo "$fail out of $1 iterations failed" +fi diff --git a/sdk/emm/bit_array.c b/sdk/emm/bit_array.c new file mode 100644 index 000000000..47a381f6e --- /dev/null +++ b/sdk/emm/bit_array.c @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include "bit_array.h" + +#define ROUND_TO(x, align) ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) +#define NUM_OF_BYTES(nbits) (ROUND_TO((nbits), 8) >> 3) +#define TEST_BIT(A, p) ((A)[((p)/8)] & ((uint8_t)(1 << ((p)%8)))) +#define SET_BIT(A, p) ((A)[((p)/8)] |= ((uint8_t)(1 << ((p)%8)))) +#define CLEAR_BIT(A, p) ((A)[((p)/8)] &= (uint8_t)(~(1 << ((p)%8)))) +#define FLIP_BIT(A, p) ((A)[((p)/8)] ^= (uint8_t)(1 << ((p)%8))) + +struct bit_array_ { + size_t n_bytes; + size_t n_bits; + uint8_t *data; +}; + +// Create a new bit array to track the status of 'num' of bits. +// The contents of the data is uninitialized. +bit_array *bit_array_new(size_t num_of_bits) +{ + // FIXME: check against MAX + + size_t n_bytes = NUM_OF_BYTES(num_of_bits); + + if (n_bytes == 0) + return NULL; + + bit_array *ba = (bit_array *)malloc(sizeof(bit_array)); + if(!ba) return NULL; + ba->n_bytes = n_bytes; + ba->n_bits = num_of_bits; + ba->data = (uint8_t*)malloc(n_bytes); + return ba; +} + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are set (value 1). +bit_array *bit_array_new_set(size_t num_of_bits) +{ + bit_array *ba = bit_array_new(num_of_bits); + if (!ba) + return NULL; + + memset(ba->data, 0xFF, ba->n_bytes); + return ba; +} + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are reset (value 0). +bit_array *bit_array_new_reset(size_t num_of_bits) +{ + bit_array *ba = bit_array_new(num_of_bits); + if (!ba) + return NULL; + + memset(ba->data, 0, ba->n_bytes); + return ba; +} + +// Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits. +void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data) +{ + if (ba->data) { + free(ba->data); + } + + size_t n_bytes = NUM_OF_BYTES(num_of_bits); + ba->n_bytes = n_bytes; + ba->n_bits = num_of_bits; + ba->data = data; +} + +// Delete the bit_array 'ba' and the data it owns +void bit_array_delete(bit_array *ba) +{ + free(ba->data); + free(ba); +} + +#if 0 +// Returns the number of bits that are set +size_t bit_array_count(bit_array *ba) +{ + +} +#endif + +// Returns the number of tracked bits in the bit_array +size_t bit_array_size(bit_array *ba) +{ + return ba->n_bits; +} + +// Returns whether the bit at position 'pos' is set +bool bit_array_test(bit_array *ba, size_t pos) +{ + return TEST_BIT(ba->data, pos); +} +uint8_t set_mask(size_t start, size_t bits_to_set) +{ + assert(start<8); + assert(bits_to_set<=8); + assert(start + bits_to_set <= 8); + return (uint8_t) (((1 << bits_to_set) - 1) << start); +} +bool bit_array_test_range(bit_array *ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) { + uint8_t mask = set_mask(bit_index, len); + if ((ba->data[byte_index] & mask) != mask) { + return false; + } + return true; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + if ((ba->data[byte_index] & mask) != mask) { + return false; + } + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >=8) { + if (ba->data[++byte_index] != 0xFF) { + return false; + } + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) { + mask = set_mask(0, bits_remain); + if ((ba->data[++byte_index] & mask) != mask) { + return false; + } + } + + return true; +} + +bool bit_array_test_range_any(bit_array *ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) { + uint8_t mask = set_mask(bit_index, len); + if ((ba->data[byte_index] & mask)) { + return true; + } + return false; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + if ((ba->data[byte_index] & mask)) { + return true; + } + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >=8) { + if (ba->data[++byte_index]) { + return true; + } + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) { + mask = set_mask(0, bits_remain); + if ((ba->data[++byte_index] & mask)) { + return true; + } + } + return false; +} + +// Returns whether any of the bits is set +bool bit_array_any(bit_array *ba) +{ + uint8_t v = 0xFF; + size_t i; + + for (i = 0; i < ba->n_bytes - 1; ++i) { + if ((v & ba->data[i])) + return true; + } + + // check the last several bits + size_t bits_in_last_byte = ba->n_bits - ((ba->n_bytes - 1) << 3); + uint8_t mask = set_mask(0, bits_in_last_byte); + + if (mask & ba->data[i]) + return true; + + return false; +} + +// Returns whether none of the bits is set +bool bit_array_none(bit_array *ba) +{ + return !bit_array_any(ba); +} + +// Returns whether all of the bits are set +bool bit_array_all(bit_array *ba) +{ + uint8_t v = 0xFF; + size_t i; + for (i = 0; i < ba->n_bytes - 1; ++i) { + if ((v ^ ba->data[i])) + return false; + } + + // check the last several bits + size_t bits_in_last_byte = ba->n_bits - ((ba->n_bytes - 1) << 3); + uint8_t mask = set_mask(0, bits_in_last_byte); + + if ((mask & ba->data[i]) != mask) + return false; + + return true; +} + +// Set the bit at 'pos' +void bit_array_set(bit_array *ba, size_t pos) +{ + SET_BIT(ba->data, pos); +} + +void bit_array_set_range(bit_array *ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) { + uint8_t mask = set_mask(bit_index, len); + ba->data[byte_index] |= mask; + return; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + ba->data[byte_index] |= mask; + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >=8) { + ba->data[++byte_index] = 0xFF; + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) { + mask = set_mask(0, bits_remain); + ba->data[++byte_index] |= mask; + } + + return; +} + +// Set all the bits +void bit_array_set_all(bit_array *ba) +{ + memset(ba->data, 0xFF, ba->n_bytes); +} + +// Clear the bit at 'pos' +void bit_array_reset(bit_array *ba, size_t pos) +{ + CLEAR_BIT(ba->data, pos); +} + +uint8_t clear_mask(size_t start, size_t bits_to_clear) +{ + return (uint8_t)(~set_mask(start, bits_to_clear)); +} + +void bit_array_reset_range(bit_array *ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) { + uint8_t mask = clear_mask(bit_index, len); + ba->data[byte_index] &= mask; + return; + } + + uint8_t mask = clear_mask(bit_index, bits_in_first_byte); + ba->data[byte_index] &= mask; + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >=8) { + ba->data[++byte_index] = 0; + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) { + mask = clear_mask(0, bits_remain); + ba->data[++byte_index] &= mask; + } + + return; +} + +// Clear all the bits +void bit_array_reset_all(bit_array *ba) +{ + memset(ba->data, 0, ba->n_bytes); +} + +// Flip the bit at 'pos' +void bit_array_flip(bit_array *ba, size_t pos) +{ + FLIP_BIT(ba->data, pos); +} + +#if 0 +// Flip all the bits +void bit_array_flip_all(bit_array *ba) +{ + +} +#endif + +// Split the bit array at 'pos' +int bit_array_split(bit_array *ba, size_t pos, bit_array **new_lower, bit_array **new_higher) +{ + // not actually a split + if (pos == 0) { + *new_lower = NULL; + *new_higher = ba; + return 0; + } + + // not actually a split + if (pos >= ba->n_bits) { + *new_lower = ba; + *new_higher = NULL; + return 0; + } + + size_t byte_index = pos / 8; + uint8_t bit_index = pos % 8; + + size_t l_bits = (byte_index << 3) + bit_index; + size_t l_bytes = NUM_OF_BYTES(l_bits); + size_t r_bits = ba->n_bits - l_bits; + + // new data for bit_array of lower pages + uint8_t *data = (uint8_t *)malloc(l_bytes); + if (!data) return ENOMEM; + size_t i; + for (i = 0; i < byte_index; ++i) { + data[i] = ba->data[i]; + } + + if (bit_index > 0) { + uint8_t tmp = ba->data[i] & (uint8_t)((1 << bit_index) - 1); + data[i] = tmp; + } + + // new bit_array for higher pages + bit_array *ba2 = bit_array_new(r_bits); + + size_t bits_remain = r_bits; + size_t curr_byte = byte_index; + size_t dst_byte = 0; + uint8_t u1 = 0, u2 = 0; + + while (bits_remain >= 8) { + u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); + u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); + ba2->data[dst_byte++] = u1 | u2; + bits_remain -= 8; + } + + if (bits_remain > (uint8_t)(8 - bit_index)) { + u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); + u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); + ba2->data[dst_byte] = u1 | u2;; + } + else if (bits_remain > 0) { + u1 = (uint8_t)(ba->data[curr_byte] >> bit_index); + ba2->data[dst_byte] = u1; + } + + bit_array_reattach(ba, l_bits, data); + + *new_lower = ba; + *new_higher = ba2; + return 0; +} + +// Merge two bit arrays +// Returns a new bit array, merging two input bit arrays +bit_array* bit_array_merge(bit_array *ba1, bit_array *ba2) +{ + size_t total_bits = ba1->n_bits + ba2->n_bits; + bit_array *ba = bit_array_new(total_bits); + + + // copy ba1 data into new bit_array + memcpy(ba->data, ba1->data, ba1->n_bytes); + + size_t idle_bits = (ba1->n_bytes << 3) - ba1->n_bits; + + // last byte of ba1 is fully occupied, copy ba2 data as a whole + if (idle_bits == 0) { + memcpy(&ba->data[ba1->n_bytes], ba2->data, ba2->n_bytes); + bit_array_delete(ba1); + bit_array_delete(ba2); + return ba; + } + + // fix the byte copied from ba1's last byte + size_t i = ba1->n_bytes - 1; + + size_t bits_remain = ba2->n_bits; + ba->data[i++] |= (uint8_t)(ba2->data[0] << (8 - idle_bits)); + + if (bits_remain <= idle_bits) { + bit_array_delete(ba1); + bit_array_delete(ba2); + return ba; + } + + bits_remain -= idle_bits; + size_t curr_byte = 0; + size_t dst_byte = i; + uint8_t u1 = 0, u2 = 0; + + while (bits_remain >= 8) { + u1 = (uint8_t)(ba2->data[curr_byte++] >> idle_bits); + u2 = (uint8_t)(ba2->data[curr_byte] << (8 - idle_bits)); + ba->data[dst_byte++] = u1 | u2; + bits_remain -= 8; + } + + if (bits_remain > (8 - idle_bits)) { + u1 = (uint8_t)(ba2->data[curr_byte++] >> idle_bits); + u2 = (uint8_t)(ba2->data[curr_byte] << (8 - idle_bits)); + ba->data[dst_byte] = u1 | u2;; + } else if (bits_remain > 0) { + u1 = (uint8_t)(ba2->data[curr_byte] >> idle_bits); + ba->data[dst_byte] = u1; + } + + bit_array_delete(ba1); + bit_array_delete(ba2); + return ba; +} diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c new file mode 100644 index 000000000..948104f82 --- /dev/null +++ b/sdk/emm/ema.c @@ -0,0 +1,1177 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include "ema.h" +#include "bit_array.h" +#include "sgx_mm.h" +#include "sgx_mm_primitives.h" +#include "sgx_mm_rt_abstraction.h" +/* State flags */ +#define SGX_EMA_STATE_PENDING 0x8UL +#define SGX_EMA_STATE_MODIFIED 0x10UL +#define SGX_EMA_STATE_PR 0x20UL +#define ROUND_TO(x, align) (((x) + (align-1)) & ~(align-1)) +#define TRIM_TO(x, align) ((x) & ~(align-1)) +#define MIN(x, y) (((x)>(y))?(y):(x)) +#define MAX(x, y) (((x)>(y))?(x):(y)) +#define UNUSED(x) ((void)(x)) +struct ema_t_ { + size_t start_addr; // starting address, should be on a page boundary + size_t size; // bytes + uint32_t alloc_flags; // EMA_RESERVED, EMA_COMMIT_NOW, EMA_COMMIT_ON_DEMAND, + // OR'ed with EMA_SYSTEM, EMA_GROWSDOWN, ENA_GROWSUP + uint64_t si_flags; // one of EMA_PROT_NONE, READ, READ_WRITE, READ_EXEC, READ_WRITE_EXEC + // Or'd with one of EMA_PAGE_TYPE_REG, EMA_PAGE_TYPE_TCS, EMA_PAGE_TYPE_TRIM + bit_array * eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for the page at start address + // bit i in eaccept_map[j] for page at start_address+(i+j<<3)<<12 + sgx_mm_mutex * lock; // lock to prevent concurrent modification, could be sgx_thread_mutex_t/rwlock_t + int transition; // state to indicate whether a transition in progress, e.g page type/permission changes. + sgx_enclave_fault_handler_t + handler; // custom PF handler (for EACCEPTCOPY use) + void* private; // private data for handler + ema_t* next; // next in doubly linked list + ema_t* prev; // prev in doubly linked list +}; + +struct ema_root_ { + ema_t *guard; +}; + +ema_t dummy_user_ema = {.next = &dummy_user_ema, + .prev = &dummy_user_ema}; +ema_root_t g_user_ema_root = {.guard = &dummy_user_ema}; + +ema_t dummy_rts_ema = {.next = &dummy_rts_ema, + .prev = &dummy_rts_ema}; +ema_root_t g_rts_ema_root = {.guard = &dummy_rts_ema}; + +#ifdef TEST +static void dump_ema_node(ema_t *node, size_t index) +{ + printf("------ node #%lu ------\n", index); + printf("start:\t0x%lX\n", node->start_addr); + printf("size:\t0x%lX\n", node->size); +} + +void dump_ema_root(ema_root_t *root) +{ + ema_t *node = root->guard->next; + size_t index = 0; + + while (node != root->guard) { + dump_ema_node(node, index++); + node = node->next; + } +} + +#endif +void destroy_ema_root(ema_root_t *root) +{ + ema_t *node = root->guard->next; + size_t index = 0; + + while (node != root->guard) { + index++; + ema_t* next = node->next; + ema_destroy(node); + node = next; + } +#if 0 + printf("Destroy %lu nodes on the root\n", index); +#endif +} +bool ema_root_empty(ema_root_t* r) +{ + return r->guard == r->guard->next; +} + +size_t ema_root_end(ema_root_t* r) +{ + return r->guard->prev->start_addr + r->guard->prev->size; +} + +#ifdef TEST +size_t ema_base(ema_t *node) +{ + return node->start_addr; +} + +size_t ema_size(ema_t *node) +{ + return node->size; +} +#endif +#ifndef NDEBUG +ema_t *ema_next(ema_t *node) +{ + return node->next; +} +#endif + +uint32_t get_ema_alloc_flags(ema_t *node) +{ + return node->alloc_flags; +} + +uint64_t get_ema_si_flags(ema_t *node) +{ + return node->si_flags; +} + +sgx_enclave_fault_handler_t ema_fault_handler(ema_t* node, void** private_data) +{ + if(private_data) + *private_data = node->private; + return node->handler; +} + + +bool is_ema_transition(ema_t *node) +{ + return node->transition; +} + +static void ema_clone(ema_t *dst, ema_t *src) +{ + memcpy((void *)dst, (void *)src, sizeof(ema_t)); +} + +static bool ema_lower_than_addr(ema_t *ema, size_t addr) +{ + return ((ema->start_addr + ema->size) <= addr); +} + +static bool ema_higher_than_addr(ema_t *ema, size_t addr) +{ + return (ema->start_addr >= addr); +} + +static bool ema_overlap_addr(const ema_t *ema, size_t addr) +{ + if ((addr >= ema->start_addr) && (addr < ema->start_addr + ema->size)) + return true; + return false; +} + +static bool ema_overlap_range(const ema_t *ema, size_t start, size_t end) +{ + if ((end <= ema->start_addr) || (start >= ema->start_addr + ema->size)) + return false; + return true; +} + +int ema_set_eaccept_full(ema_t *node) +{ + if (!node->eaccept_map) { + node->eaccept_map = bit_array_new_set((node->size) >> SGX_PAGE_SHIFT); + if(!node->eaccept_map) + return ENOMEM; + else + return 0; + }else + bit_array_set_all(node->eaccept_map); + return 0; +} + +int ema_clear_eaccept_full(ema_t *node) +{ + if (!node->eaccept_map) { + node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); + if(!node->eaccept_map) + return ENOMEM; + else + return 0; + }else + bit_array_reset_all(node->eaccept_map); + return 0; +} + + +int ema_set_eaccept(ema_t *node, size_t start, size_t end) +{ + if (!node) { + return EINVAL; + } + + assert (start >= node->start_addr); + assert (end <= node->start_addr + node->size); + size_t pos_begin = (start - node->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (end - node->start_addr) >> SGX_PAGE_SHIFT; + + // update eaccept bit map + if (!node->eaccept_map) { + node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); + if(!node->eaccept_map) + return ENOMEM; + } + bit_array_set_range (node->eaccept_map, + pos_begin, + pos_end - pos_begin); + return 0; +} + +bool ema_page_committed(ema_t *ema, size_t addr) +{ + assert(!(addr%SGX_PAGE_SIZE)); + if (!ema->eaccept_map) { + return false; + } + + return bit_array_test(ema->eaccept_map, + (addr - ema->start_addr) >> SGX_PAGE_SHIFT); +} + +bool ema_exist_in(ema_root_t* root, size_t addr, size_t size) +{ + size_t end = addr + size; + for (ema_t *node = root->guard->next; node != root->guard; node = node->next) { + if (ema_overlap_range(node, addr, end)) { + return true; + } + } + return false; +} + +bool ema_exist(size_t addr, size_t size) +{ + return ema_exist_in(&g_rts_ema_root, addr, size) || + ema_exist_in(&g_user_ema_root, addr, size); +} + +// search for a node whose address range contains 'addr' +ema_t *search_ema(ema_root_t *root, size_t addr) +{ + for (ema_t *node = root->guard->next; node != root->guard; node = node->next) { + if (ema_overlap_addr(node, addr)) { + return node; + } + } + return NULL; +} + +// insert 'new_node' before 'node' +ema_t *insert_ema(ema_t *new_node, ema_t *node) +{ + new_node->prev = node->prev; + new_node->next = node; + node->prev->next = new_node; + node->prev = new_node; + return new_node; +} + +// Remove the 'node' from the list +static ema_t *remove_ema(ema_t *node) +{ + if (!node) + return node; + + // Sanity check pointers for corruption + if ((node->prev->next != node) || + (node->next->prev != node)) { + abort(); + } + + node->prev->next = node->next; + node->next->prev = node->prev; + return node; +} + +void push_back_ema(ema_root_t *root, ema_t *node) +{ + insert_ema(node, root->guard); +} + +// search for a range of nodes containing addresses within [start, end) +// 'ema_begin' will hold the fist ema that has address higher than /euqal to 'start' +// 'ema_end' will hold the node immediately follow the last ema that has address lower than / equal to 'end' +int search_ema_range(ema_root_t *root, size_t start, size_t end, + ema_t **ema_begin, ema_t **ema_end) +{ + ema_t *node = root->guard->next; + + // find the first node that has addr >= 'start' + while ((node != root->guard) && ema_lower_than_addr(node, start)) { + node = node->next; + } + + // empty list or all nodes are beyond [start, end) + if ((node == root->guard) || ema_higher_than_addr(node, end)) { + *ema_begin = NULL; + *ema_end = NULL; + return -1; + } + + *ema_begin = node; + + // find the last node that has addr <= 'end' + while ((node != root->guard) && + (!ema_higher_than_addr(node, end))) { + node = node->next; + } + *ema_end = node; + + return 0; +} +//TODO?do not split bit_arrays, reuse it by keeping ref-count +//and start and end offsets for multiple EMAs +int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** ret_node) +{ + //!FIXME: this is only needed for UT + // in real usage in the file, addr always overlap + if (!ema_overlap_addr(ema, addr) || !ret_node) { + return EINVAL; + } + + ema_t *new_node = (ema_t *)malloc(sizeof(ema_t)); + if (!new_node) { + return ENOMEM; + } + + bit_array *low = NULL, *high = NULL; + if (ema->eaccept_map) { + size_t pos = (addr - ema->start_addr) >> SGX_PAGE_SHIFT; + int ret = bit_array_split(ema->eaccept_map, pos, &low, &high); + if(ret) { + free(new_node); + return ret; + } + } + + //caller does not need free new_node as it is inserted + // and managed in root when this returns + ema_clone(new_node, ema); + + ema_t *lo_ema = NULL, *hi_ema = NULL; + if (new_lower) { + // new node for lower address + lo_ema = new_node; + hi_ema = ema; + insert_ema(new_node, ema); + } else { + lo_ema = ema; + hi_ema = new_node; + insert_ema(new_node, ema->next); + } + + size_t start = ema->start_addr; + size_t size = ema->size; + + lo_ema->start_addr = start; + lo_ema->size = addr - start; + hi_ema->start_addr = addr; + hi_ema->size = size - lo_ema->size; + + if (ema->eaccept_map) { + lo_ema->eaccept_map = low; + hi_ema->eaccept_map = high; + } + *ret_node = new_node; + return 0; +} + +int ema_split_ex(ema_t *ema, size_t start, size_t end, ema_t** new_node) +{ + ema_t *node = ema; + ema_t *tmp_node; + if (start > node->start_addr) { + int ret = ema_split(node, start, false, &tmp_node); + if(ret) return ret; + if(tmp_node) node = tmp_node; + } + tmp_node = NULL; + if (end < (node->start_addr + node->size)) { + int ret = ema_split(node, end, true, &tmp_node); + if(ret) return ret; + if(tmp_node) node = tmp_node; + } + *new_node = node; + return 0; +} + +ema_t *ema_merge(ema_t *lo_ema, ema_t *hi_ema) +{ + return NULL; +} + +static size_t ema_aligned_end(ema_t* ema, size_t align) +{ + size_t curr_end = ema->start_addr + ema->size; + curr_end = ROUND_TO(curr_end, align); + return curr_end; +} + +// Find a free space of size at least 'size' bytes, does not matter where the start is +bool find_free_region(ema_root_t *root, size_t size, + uint64_t align, size_t *addr, ema_t **next_ema) +{ + ema_t *ema_begin = root->guard->next; + ema_t *ema_end = root->guard; + + // we need at least one node before calling this. + if(ema_begin == ema_end){ + if(ema_root_empty(&g_rts_ema_root)) + return false;//rts has to be inited at this time + size_t tmp = ema_root_end(&g_rts_ema_root); + tmp = ROUND_TO(tmp, align); + if(!sgx_mm_is_within_enclave((void*)tmp, size)) + return false; + *addr = tmp; + *next_ema = ema_end; + return true; + } + + // iterate over the ema node within specified range + ema_t *curr = ema_begin; + ema_t *next = curr->next; + + while (next != ema_end) { + size_t curr_end = ema_aligned_end(curr, align); + size_t free_size = next->start_addr - curr_end; + if (free_size >= size) { + *next_ema = next; + *addr = curr_end; + return true; + } + curr = next; + next = curr->next; + } + + // check the last ema node + if( sgx_mm_is_within_enclave((void*)(curr->start_addr + curr->size), size)) + { + *next_ema = next; + *addr = ema_aligned_end(curr, align); + return true; + } + // we look for space in front, but do not mix user with rts + size_t tmp = ema_begin->start_addr - size; + tmp = TRIM_TO(tmp, align); + if (root == &g_user_ema_root) + { + if (ema_root_end(&g_rts_ema_root) < tmp){ + //we found gap bigger enough + *addr = tmp; + *next_ema = ema_begin; + return true; + } + }else + {//rts + if (sgx_mm_is_within_enclave((void*)tmp, size)) + { + *addr = tmp; + *next_ema = ema_begin; + return true; + } + } + *next_ema = NULL; + *addr = 0; + return false; +} + +bool find_free_region_at(ema_root_t *root, size_t addr, size_t size, ema_t **next_ema) +{ + if( !sgx_mm_is_within_enclave((void*)(addr), size)) return false; + ema_t *node = root->guard->next; + while (node != root->guard) { + if (node->start_addr >= (addr + size)) { + *next_ema = node; + return true; + } + if (addr >= (node->start_addr + node->size)) { + node = node->next; + } else { + break; + } + } + if (node == root->guard) { + *next_ema = node; + return true; + } + + *next_ema = NULL; + return false; +} + +ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void *private_data, + ema_t* next_ema) +{ + ema_t *node = (ema_t *)malloc(sizeof(ema_t)); + if (!node) + return NULL; + *node = (ema_t){ + addr, + size, + alloc_flags, + si_flags, + NULL, + NULL, //TODO, use lock? + 0, + handler, + private_data, + NULL,//next + NULL,//pev + }; + node = insert_ema(node, next_ema); + return node; +} + +void ema_destroy(ema_t *ema) +{ + remove_ema(ema); + if (ema->eaccept_map) { + free(ema->eaccept_map); + } + free(ema); +} + +static int eaccept_range_forward(const sec_info_t *si, size_t start, size_t end) +{ + while (start < end) + { + if (do_eaccept(si, start)) + abort(); + start += SGX_PAGE_SIZE; + } + return 0; +} + +static int eaccept_range_backward(const sec_info_t *si, size_t start, size_t end) +{ + assert(start < end); + do + { + end -= SGX_PAGE_SIZE; + if (do_eaccept(si, end)) + abort(); + } while (end > start); + return 0; +} + +int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up) +{ + sec_info_t si SGX_SECINFO_ALIGN = {si_flags | SGX_EMA_STATE_PENDING, 0}; + int ret = -1; + + if (grow_up) { + ret = eaccept_range_backward(&si, start, start + size); + } else { + ret = eaccept_range_forward(&si, start, start + size); + } + + return ret; +} + +int ema_do_commit(ema_t *node, size_t start, size_t end) +{ + assert(node->eaccept_map); //TODO: refactor bit_array_test/set + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_REG | + SGX_EMA_PROT_READ_WRITE | + SGX_EMA_STATE_PENDING, + 0}; + + for(size_t addr = real_start; addr < real_end; addr += SGX_PAGE_SIZE) + { + size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; + // only commit for uncommitted page + if (!bit_array_test(node->eaccept_map, pos)) { + int ret = do_eaccept(&si, addr); + if (ret != 0) { + return ret; + } + bit_array_set(node->eaccept_map, pos); + } + } + + return 0; +} + +static int ema_can_commit(ema_t* first, ema_t* last, + size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) { + if (prev_end != curr->start_addr)//there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PROT_WRITE) )) + return EACCES; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) + return EACCES; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) + return EACCES; + + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) return EINVAL; + return 0; +} + +int ema_do_commit_loop(ema_t *first, ema_t *last, size_t start, size_t end) +{ + int ret = ema_can_commit(first, last, start, end); + if(ret) return ret; + + ema_t *curr = first, *next = NULL; + + while (curr != last) { + next = curr->next; + ret = ema_do_commit(curr, start, end); + if (ret != 0) { + return ret; + } + curr = next; + } + return ret; +} + +int ema_do_uncommit(ema_t *node, size_t start, size_t end) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + uint32_t alloc_flags = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; + + // ignore if ema is in reserved state + if (alloc_flags & SGX_EMA_RESERVE) { + return 0; + } + + assert(node->eaccept_map); //TODO: refactor bit_array_test/set + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_TRIM | + SGX_EMA_STATE_MODIFIED, + 0}; + + for(size_t addr = real_start; addr < real_end; addr += SGX_PAGE_SIZE) + { + size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; + // only for committed page + if (bit_array_test(node->eaccept_map, pos)) { + int ret = sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, + prot | type, prot | SGX_EMA_PAGE_TYPE_TRIM); + if (ret != 0) { + return ret; + } + + ret = do_eaccept(&si, addr); + if (ret != 0) { + return ret; + } + bit_array_reset(node->eaccept_map, pos); + //eaccept trim notify + ret =sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, + prot | SGX_EMA_PAGE_TYPE_TRIM, + prot | SGX_EMA_PAGE_TYPE_TRIM); + if(ret) return ret; + } + } + return 0; +} + +static int ema_can_uncommit(ema_t* first, ema_t* last, + size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) { + if (prev_end != curr->start_addr)//there is a gap + return EINVAL; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) + return EACCES; + + //! TODO check transition, TRIM type + // Those are not needed due to global lock + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) return EINVAL; + return 0; +} + + +int ema_do_uncommit_loop(ema_t *first, ema_t *last, size_t start, size_t end) +{ + int ret = ema_can_uncommit(first, last, start, end); + if(ret) return ret; + + ema_t *curr = first, *next = NULL; + while (curr != last) { + next = curr->next; + ret = ema_do_uncommit(curr, start, end); + if (ret != 0) { + return ret; + } + curr = next; + } + return ret; +} + +int ema_do_dealloc(ema_t *node, size_t start, size_t end) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + int alloc_flag = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; + + if (alloc_flag & SGX_EMA_RESERVE) + {//!TODO need check range, only dealloc [start,end) + ema_destroy(node); + return 0; + } + assert(node->eaccept_map);//TODO: refactor test/set bit_array + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_TRIM | + SGX_EMA_STATE_MODIFIED, + 0}; + + for(size_t page = real_start; page < real_end; page += SGX_PAGE_SIZE) + { + size_t pos = (page - node->start_addr) >> SGX_PAGE_SHIFT; + + // only for committed page + //!TODO combine ocalls for multiple pages + if (bit_array_test(node->eaccept_map, pos)) { + //Make ocall to trim, make sure to keep READ for eaccept + int ret = sgx_mm_modify_ocall(page, SGX_PAGE_SIZE, + prot | SGX_EMA_PROT_READ | type, + prot | SGX_EMA_PROT_READ | SGX_EMA_PAGE_TYPE_TRIM ); + if (ret != 0) { + return ret; + } + + ret = do_eaccept(&si, page); + if (ret != 0) { + return ret; + } + bit_array_reset(node->eaccept_map, pos); + + //notify kernel to remove, clear all protection bits + ret = sgx_mm_modify_ocall(page, SGX_PAGE_SIZE, + prot | SGX_EMA_PAGE_TYPE_TRIM, + SGX_EMA_PROT_NONE | SGX_EMA_PAGE_TYPE_TRIM); + if (ret != 0) { + return ret; + } + } + } + + // potential ema split + ema_t *tmp_node = NULL; + if (real_start > node->start_addr) { + int ret = ema_split(node, real_start, false, &tmp_node); + if(ret) return ret; + assert(tmp_node); + node = tmp_node; + } + + tmp_node = NULL; + if (real_end < (node->start_addr + node->size)) { + int ret = ema_split(node, real_end, true, &tmp_node); + if(ret) return ret; + assert(tmp_node); + node = tmp_node; + } + + ema_destroy(node); + return 0; +} + +int ema_do_dealloc_loop(ema_t *first, ema_t *last, size_t start, size_t end) +{ + int ret = 0; + ema_t *curr = first, *next = NULL; + + while (curr != last) { + next = curr->next; + ret = ema_do_dealloc(curr, start, end); + if (ret != 0) { + return ret; + } + curr = next; + } + return ret; +} + +// change the type of the page to TCS +int ema_change_to_tcs(ema_t *node, size_t addr) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + + if (type == SGX_EMA_PAGE_TYPE_TCS) { + return 0; + } + if (type != SGX_EMA_PAGE_TYPE_REG) + return EACCES; + + if (!(prot & SGX_EMA_PROT_READ_WRITE)) + return EPERM; + + if (node->transition) return EBUSY; + + // page need to be already committed + size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; + if (!node->eaccept_map || !bit_array_test(node->eaccept_map, pos)) { + return EACCES; + } + node->transition = 1; + int ret = sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, prot | type, + prot | SGX_EMA_PAGE_TYPE_TCS); + if (ret != 0) { + goto fail; + } + + sec_info_t si SGX_SECINFO_ALIGN = {SGX_EMA_PAGE_TYPE_TCS | SGX_EMA_STATE_MODIFIED, 0}; + if (do_eaccept(&si, addr) != 0) { + abort(); + } + + // operation succeeded, update ema node: state update, split + ema_t *tcs = NULL; + ret = ema_split_ex(node, addr, addr + SGX_PAGE_SIZE, &tcs); + if(ret) goto fail; + assert(tcs); //ema_split_ex should not return NULL if node!=NULL + + tcs->si_flags = (tcs->si_flags + & (uint64_t)(~SGX_EMA_PAGE_TYPE_MASK) + & (uint64_t)(~SGX_EMA_PROT_MASK)) + | SGX_EMA_PAGE_TYPE_TCS + | SGX_EMA_PROT_NONE; + tcs->transition = 0; +fail: + node->transition = 0; + return ret; +} + +int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + if (prot == new_prot) return 0; + + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + + node->transition = 1; + int ret = sgx_mm_modify_ocall(real_start, real_end - real_start, + prot | type, new_prot | type); + if (ret != 0) { + goto fail; + } + + sec_info_t si SGX_SECINFO_ALIGN = {(uint64_t)new_prot | SGX_EMA_PAGE_TYPE_REG | SGX_EMA_STATE_PR, 0}; + + for(size_t page = real_start; page < real_end; page += SGX_PAGE_SIZE) + { + do_emodpe(&si, page); + + // new permission is RWX, no EMODPR needed in untrusted part, hence no EACCEPT + if ((new_prot & (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) != + (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) { + ret = do_eaccept(&si, page); + if (ret) goto fail; + } + } + + // all involved pages complete permission change, deal with potential + // ema node split and update permission state + node->transition = 0; + if (real_start > node->start_addr) { + ema_t *tmp_node = NULL; + ret = ema_split(node, real_start, false, &tmp_node); + if (ret) goto fail; + assert(tmp_node); + node = tmp_node; + } + + if (real_end < (node->start_addr + node->size)) { + ema_t *tmp_node = NULL; + ret = ema_split(node, real_end, true, &tmp_node); + if (ret) goto fail; + assert(tmp_node); + node = tmp_node; + } + + // 'node' is the ema node to update permission for + node->transition = 1; + node->si_flags = (node->si_flags + & (uint64_t) (~SGX_EMA_PROT_MASK)) + | (uint64_t) new_prot; + if (new_prot == SGX_EMA_PROT_NONE) + {//do mprotect if target is PROT_NONE + ret = sgx_mm_modify_ocall(real_start, real_end - real_start, + type | SGX_EMA_PROT_NONE, type | SGX_EMA_PROT_NONE); + } +fail: + node->transition = 0; + return ret; +} + +static int ema_can_modify_permissions(ema_t* first, ema_t* last, + size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) { + if (prev_end != curr->start_addr)//there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) + return EPERM; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) + return EPERM; + + if (curr->transition) return EBUSY; + + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + + size_t pos_begin = (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; + if (!curr->eaccept_map || + !bit_array_test_range(curr->eaccept_map, pos_begin, pos_end - pos_begin)) { + return EINVAL; + } + + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) return EINVAL; + return 0; +} + +static int ema_modify_permissions_loop_nocheck(ema_t *first, ema_t *last, size_t start, + size_t end, int prot) +{ + int ret = 0; + ema_t *curr = first, *next = NULL; + while (curr != last) { + next = curr->next; + ret = ema_modify_permissions(curr, start, end, prot); + if (ret != 0) { + return ret; + } + curr = next; + } + return ret; +} + +int ema_modify_permissions_loop(ema_t *first, ema_t *last, size_t start, + size_t end, int prot) +{ + int ret = ema_can_modify_permissions(first, last, start, end); + if (ret) return ret; + + return ema_modify_permissions_loop_nocheck(first, last, start, end, prot); +} + +static int ema_can_commit_data(ema_t* first, ema_t* last, + size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) { + if (prev_end != curr->start_addr)//there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PROT_WRITE) )) + return EACCES; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) + return EACCES; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) + return EACCES; + + if (!(curr->alloc_flags & (SGX_EMA_COMMIT_ON_DEMAND ))) + return EINVAL; + + if (curr->eaccept_map) + { + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + size_t pos_begin = (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; + + if(bit_array_test_range_any(curr->eaccept_map, pos_begin, pos_end - pos_begin)) + return EINVAL; + } + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) return EINVAL; + return 0; +} + +int ema_do_commit_data(ema_t *node, size_t start, size_t end, uint8_t *data, int prot) +{ + size_t addr = start; + size_t src = (size_t)data; + sec_info_t si SGX_SECINFO_ALIGN = {(uint64_t)prot | SGX_EMA_PAGE_TYPE_REG, 0}; + + while (addr < end) + { + int ret = do_eacceptcopy(&si, addr, src); + if (ret != 0) { + return EFAULT; + } + addr += SGX_PAGE_SIZE; + src += SGX_PAGE_SIZE; + } + return ema_set_eaccept(node, start, end); +} + +int ema_do_commit_data_loop(ema_t *first, ema_t * last, size_t start, size_t end, + uint8_t *data, int prot) +{ + int ret = 0; + ret = ema_can_commit_data(first, last, start, end); + if (ret) return ret; + + ema_t *curr = first; + while (curr != last) {//there is no split in this loop + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + uint8_t* real_data = data + real_start - start; + ret = ema_do_commit_data(curr, real_start, real_end, real_data, prot); + if (ret != 0) { + return ret; + } + curr = curr->next; + } + + ret = ema_modify_permissions_loop_nocheck(first, last, start, end, prot); + return ret; +} + +ema_t* ema_realloc_from_reserve_range(ema_t* first, ema_t* last, + size_t start, size_t end, + uint32_t alloc_flags, uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void *private_data) +{ + assert(first != NULL); + assert(last != NULL); + ema_t* curr = first; + assert(first->start_addr < end); + assert(last->prev->start_addr + last->prev->size > start); + //fail on any nodes not reserve or any gaps + size_t prev_end = first->start_addr; + while (curr != last) + { + if (prev_end != curr->start_addr)//there is a gap + return NULL; + if (curr->alloc_flags & SGX_EMA_RESERVE) { + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + else + return NULL; + } + + int ret = 0; + if (start > first->start_addr){ + ret = ema_split(first, start, false, &first); + if (ret) return NULL; + } + if (end < last->prev->start_addr + last->prev->size){ + ret = ema_split(last->prev, end, false, &last); + if (ret) return NULL; + } + + assert(first->alloc_flags & SGX_EMA_RESERVE); + assert(!first->eaccept_map); + + curr = first; + while(curr != last) + { + ema_t* next = curr->next; + ema_destroy(curr); + curr = next; + } + + ema_t* new_node = ema_new(start, end - start, + alloc_flags, si_flags, + handler, private_data, last); + return new_node; +} + +int ema_do_alloc(ema_t* node) +{ + uint32_t alloc_flags = node->alloc_flags; + if (alloc_flags & SGX_EMA_RESERVE) { + return 0; + } + + size_t tmp_addr = node->start_addr; + size_t size = node->size; + int ret = sgx_mm_alloc_ocall(tmp_addr, size, + (int)(alloc_flags | (node->si_flags & SGX_EMA_PAGE_TYPE_MASK))); + if (ret) { + return ret; + } + + if (alloc_flags & SGX_EMA_COMMIT_NOW) { + int grow_up = (alloc_flags & SGX_EMA_GROWSDOWN) ? 0 : 1; + ret = do_commit(tmp_addr, size, node->si_flags, grow_up); + if (ret) { + return ret; + } + } + + if(alloc_flags & SGX_EMA_COMMIT_NOW) + ret = ema_set_eaccept_full(node); + else + ret = ema_clear_eaccept_full(node); + + return ret; +} diff --git a/sdk/emm/emm_private.c b/sdk/emm/emm_private.c new file mode 100644 index 000000000..9131655aa --- /dev/null +++ b/sdk/emm/emm_private.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include "ema.h" +#include "emm_private.h" +#include "sgx_mm_rt_abstraction.h" + +extern ema_root_t g_rts_ema_root; +#define LEGAL_INIT_FLAGS (\ + SGX_EMA_PAGE_TYPE_REG \ + | SGX_EMA_PAGE_TYPE_TCS \ + | SGX_EMA_PAGE_TYPE_SS_FIRST \ + | SGX_EMA_PAGE_TYPE_SS_REST \ + | SGX_EMA_SYSTEM \ + | SGX_EMA_RESERVE \ + ) + +int mm_init_ema(void *addr, size_t size, int flags, int prot, + sgx_enclave_fault_handler_t handler, + void *handler_private) +{ + if (!sgx_mm_is_within_enclave(addr, size)) + return EACCES; + if( ((unsigned int)flags) & (~LEGAL_INIT_FLAGS)) + return EINVAL; + if(prot &(~SGX_EMA_PROT_MASK)) + return EINVAL; + ema_t* next_ema = NULL; + + if(!find_free_region_at(&g_rts_ema_root, (size_t)addr, size, &next_ema)) + return EINVAL; + + ema_t* ema = ema_new((size_t)addr, size, flags & SGX_EMA_ALLOC_FLAGS_MASK, + (uint64_t)prot | (SGX_EMA_PAGE_TYPE_MASK & flags), + handler, handler_private, next_ema); + if(!ema) return ENOMEM; + if (flags & SGX_EMA_RESERVE) + return 0; + return ema_set_eaccept_full(ema); +} + +extern int mm_alloc_internal(void *addr, size_t size, uint32_t flags, + sgx_enclave_fault_handler_t handler, + void *private, void** out_addr, ema_root_t* root); + +int mm_alloc(void *addr, size_t size, uint32_t flags, + sgx_enclave_fault_handler_t handler, + void *private, void** out_addr) +{ + return mm_alloc_internal(addr, size, flags, handler, private, + out_addr, &g_rts_ema_root); +} + +extern int mm_commit_internal(void *addr, size_t size, ema_root_t* root); + +int mm_commit(void *addr, size_t size) +{ + return mm_commit_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_uncommit_internal(void *addr, size_t size, ema_root_t* root); + +int mm_uncommit(void *addr, size_t size) +{ + return mm_uncommit_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_dealloc_internal(void *addr, size_t size, ema_root_t* root); + +int mm_dealloc(void *addr, size_t size) +{ + return mm_dealloc_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_commit_data_internal(void *addr, size_t size, + uint8_t *data, int prot, ema_root_t* root); + +int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot) +{ + return mm_commit_data_internal(addr, size, data, prot, &g_rts_ema_root); +} + +extern int mm_modify_type_internal(void *addr, size_t size, int type, ema_root_t* root); + +int mm_modify_type(void *addr, size_t size, int type) +{ + return mm_modify_type_internal(addr, size, type, &g_rts_ema_root); +} + +extern int mm_modify_permissions_internal(void *addr, size_t size, + int prot, ema_root_t* root); + +int mm_modify_permissions(void *addr, size_t size, int prot) +{ + return mm_modify_permissions_internal(addr, size, prot, &g_rts_ema_root); +} + diff --git a/sdk/emm/include/bit_array.h b/sdk/emm/include/bit_array.h new file mode 100644 index 000000000..3fcad77a4 --- /dev/null +++ b/sdk/emm/include/bit_array.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef BIT_ARRAY_H_ +#define BIT_ARRAY_H_ + +#include +#include +#include + +typedef struct bit_array_ bit_array; + +#ifdef __cplusplus +extern "C" { +#endif + +// Create a new bit array to track the status of 'num' of bits. +// The contents of the data is not initialized. +bit_array *bit_array_new(size_t num_of_bits); + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are set (value 1). +bit_array *bit_array_new_set(size_t num_of_bits); + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are reset (value 0). +bit_array *bit_array_new_reset(size_t num_of_bits); + +// Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits. +void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data); + +// Delete the bit_array 'ba' and the data it owns +void bit_array_delete(bit_array *ba); + +// Returns the number of tracked bits in the bit_array +size_t bit_array_size(bit_array *ba); + +// Returns whether the bit at position 'pos' is set +bool bit_array_test(bit_array *ba, size_t pos); + +// Return whether the bits in range [pos, pos+len) are all set +bool bit_array_test_range(bit_array *ba, size_t pos, size_t len); + +// Retuen whether any bit in range [pos, pos+len) is set +bool bit_array_test_range_any(bit_array *ba, size_t pos, size_t len); + +// Returns whether any of the bits is set +bool bit_array_any(bit_array *ba); + +// Returns whether none of the bits is set +bool bit_array_none(bit_array *ba); + +// Returns whether all of the bits are set +bool bit_array_all(bit_array *ba); + +// Set the bit at 'pos' +void bit_array_set(bit_array *ba, size_t pos); + +// Set the bits in range [pos, pos+len) +void bit_array_set_range(bit_array *ba, size_t pos, size_t len); + +// Set all the bits +void bit_array_set_all(bit_array *ba); + +// Clear the bit at 'pos' +void bit_array_reset(bit_array *ba, size_t pos); + +// Clear the bits in range [pos, pos+len) +void bit_array_reset_range(bit_array *ba, size_t pos, size_t len); + +// Clear all the bits +void bit_array_reset_all(bit_array *ba); + +// Flip the bit at 'pos' +void bit_array_flip(bit_array *ba, size_t pos); + +// Split the bit array at 'pos' +// Returns pointers to two new bit arrays +int bit_array_split(bit_array *ba, size_t pos, bit_array **, bit_array **); + +// Merge two bit arrays +// Returns a new bit array, merging two input bit arrays +bit_array* bit_array_merge(bit_array *ba1, bit_array *ba2); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sdk/emm/include/ema.h b/sdk/emm/include/ema.h new file mode 100644 index 000000000..dc348e881 --- /dev/null +++ b/sdk/emm/include/ema.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __SGX_EMA_H__ +#define __SGX_EMA_H__ + +#include +#include "sgx_mm.h" + +#ifndef SGX_SECINFO_ALIGN +#define SGX_SECINFO_ALIGN __attribute__((aligned(sizeof(sec_info_t)))) +#endif + +#define SGX_PAGE_SIZE 0x1000ULL +#define SGX_PAGE_SHIFT 12 + +typedef struct ema_root_ ema_root_t; +typedef struct ema_t_ ema_t; + +#ifdef __cplusplus +extern "C" { +#endif + +bool ema_root_empty(ema_root_t* r); +bool ema_exist_in(ema_root_t* r, size_t addr, size_t size); +bool ema_exist(size_t addr, size_t size); + +#ifndef NDEBUG +ema_t * ema_next(ema_t *node); +#endif +#ifdef TEST +void destroy_ema_root(ema_root_t *); +void dump_ema_root(ema_root_t *); +size_t ema_base(ema_t *node); +size_t ema_size(ema_t *node); +int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** new_node); +int ema_split_ex(ema_t *ema, size_t start, size_t end, ema_t** new_node); +ema_t * ema_merge(ema_t *lo_ema, ema_t *hi_ema); +#endif + +uint32_t get_ema_alloc_flags(ema_t *node); +uint64_t get_ema_si_flags(ema_t *node); + +sgx_enclave_fault_handler_t ema_fault_handler(ema_t* node, void** private_data); +bool is_ema_transition(ema_t *node); + +ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void *private_data, + ema_t* next_ema); +void ema_destroy(ema_t *ema); + +int ema_set_eaccept_full(ema_t *node); +int ema_clear_eaccept_full(ema_t *node); +int ema_set_eaccept(ema_t *node, size_t start, size_t end); +bool ema_page_committed(ema_t *ema, size_t addr); + +ema_t * search_ema(ema_root_t *root, size_t addr); +int search_ema_range(ema_root_t *root, + size_t start, size_t end, + ema_t **ema_begin, ema_t **ema_end); + +bool find_free_region(ema_root_t *root, + size_t size, size_t align, size_t *addr, + ema_t **next_ema); + +bool find_free_region_at(ema_root_t *root, + size_t addr, size_t size, + ema_t **next_ema); + + +int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up); +int ema_do_commit(ema_t *node, size_t start, size_t end); +int ema_do_commit_loop(ema_t *first, ema_t *last, size_t start, size_t end); + +int ema_do_uncommit(ema_t *node, size_t start, size_t end); +int ema_do_uncommit_loop(ema_t *first, ema_t *last, size_t start, size_t end); + +int ema_do_dealloc(ema_t *node, size_t start, size_t end); +int ema_do_dealloc_loop(ema_t *first, ema_t *last, size_t start, size_t end); + +int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot); +int ema_modify_permissions_loop(ema_t *first, ema_t *last, size_t start, size_t end, int prot); +int ema_change_to_tcs(ema_t *node, size_t addr); + +int ema_do_commit_data(ema_t *node, size_t start, size_t end, uint8_t *data, int prot); +int ema_do_commit_data_loop(ema_t *firsr, ema_t *last, size_t start, + size_t end, uint8_t *data, int prot); + +int ema_do_alloc(ema_t* node); +ema_t* ema_realloc_from_reserve_range(ema_t* first, ema_t* last, + size_t start, size_t end, + uint32_t alloc_flags, uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void *private_data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sdk/emm/include/emm_private.h b/sdk/emm/include/emm_private.h new file mode 100644 index 000000000..9c9792727 --- /dev/null +++ b/sdk/emm/include/emm_private.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef EMM_PRIVATE_H_ +#define EMM_PRIVATE_H_ + +#include +#include +#include "sgx_mm.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system */ +/* + * Initialize an EMA. This can be used to setup EMAs to account regions that + * are loaded and initialized with EADD before EINIT. + * @param[in] addr Starting address of the region, page aligned. If NULL is provided, + * then the function will select the starting address. + * @param[in] size Size of the region in multiples of page size in bytes. + * @param[in] flags SGX_EMA_SYSTEM, or SGX_EMA_SYSTEM | SGX_EMA_RESERVE + * bitwise ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_TCS: TCS page. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] prot permissions, either SGX_EMA_PROT_NONE or a bitwise OR of following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXECUTE: Pages may be executed. + * @param[in] handler A custom handler for page faults in this region, NULL if + * no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be passed + * back when the handler is called. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use. + * @retval EINVAL Invalid page type, flags, or addr and length are not page aligned. + */ +int mm_init_ema(void *addr, size_t size, int flags, int prot, + sgx_enclave_fault_handler_t handler, + void *handler_private); +// See documentation in sgx_mm.h +int mm_alloc(void *addr, size_t size, uint32_t flags, + sgx_enclave_fault_handler_t handler, void *private_data, void** out_addr); +int mm_dealloc(void *addr, size_t size); +int mm_uncommit(void *addr, size_t size); +int mm_commit(void *addr, size_t size); +int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot); +int mm_modify_type(void *addr, size_t size, int type); +int mm_modify_permissions(void *addr, size_t size, int prot); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sdk/emm/include/sgx_mm.h b/sdk/emm/include/sgx_mm.h new file mode 100644 index 000000000..5f6a3d866 --- /dev/null +++ b/sdk/emm/include/sgx_mm.h @@ -0,0 +1,285 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_H_ +#define SGX_MM_H_ + +#include +#include +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Page fault (#PF) info reported in the SGX SSA MISC region. + */ +typedef struct _sgx_pfinfo +{ + uint64_t maddr; // address for #PF. + union _pfec + { + uint32_t errcd; + struct + { // PFEC bits. + uint32_t p : 1; // P flag. + uint32_t rw : 1; // RW access flag, 0 for read, 1 for write. + uint32_t : 13; // U/S, I/O, PK and reserved bits not relevant for SGX PF. + uint32_t sgx : 1; // SGX bit. + uint32_t : 16; // reserved bits. + }; + } pfec; + uint32_t reserved; +} sgx_pfinfo; + +/** + * Custom page fault (#PF) handler, do usage specific processing upon #PF, + * e.g., loading data and verify its trustworthiness, then call sgx_mm_commit_data + * to explicitly EACCEPTCOPY data. + * This custom handler is passed into sgx_mm_alloc, and associated with the + * newly allocated region. The memory manager calls the handler when a #PF + * happens in the associated region. The handler may invoke abort() if it + * determines the exception is invalid based on certain internal states + * it maintains. + * + * @param[in] pfinfo info reported in the SSA MISC region for page fault. + * @param[in] private_data private data provided by handler in sgx_mm_alloc call. + * @retval SGX_MM_EXCEPTION_CONTINUE_EXECUTION Success on handling the exception. + * @retval SGX_MM_EXCEPTION_CONTINUE_SEARCH Exception not handled and should be passed to + * some other handler. + * + */ +typedef int (*sgx_enclave_fault_handler_t)(const sgx_pfinfo *pfinfo, void *private_data); + +/* bit 0 - 7 are allocation flags. */ +#define SGX_EMA_ALLOC_FLAGS_SHIFT 0 +#define SGX_EMA_ALLOC_FLAGS(n) ((n) << SGX_EMA_ALLOC_FLAGS_SHIFT) +#define SGX_EMA_ALLOC_FLAGS_MASK SGX_EMA_ALLOC_FLAGS(0xFF) + +/* Only reserve an address range, no physical memory committed.*/ +#define SGX_EMA_RESERVE SGX_EMA_ALLOC_FLAGS(0x1) + +/* Reserve an address range and commit physical memory. */ +#define SGX_EMA_COMMIT_NOW SGX_EMA_ALLOC_FLAGS(0x2) + +/* Reserve an address range and commit physical memory on demand.*/ +#define SGX_EMA_COMMIT_ON_DEMAND SGX_EMA_ALLOC_FLAGS(0x4) + +/* Always commit pages from higher to lower addresses, + * no gaps in addresses above the last committed. + */ +#define SGX_EMA_GROWSDOWN SGX_EMA_ALLOC_FLAGS(0x10) + +/* Always commit pages from lower to higher addresses, + * no gaps in addresses below the last committed. +*/ +#define SGX_EMA_GROWSUP SGX_EMA_ALLOC_FLAGS(0x20) + +/* Map addr must be exactly as requested. */ +#define SGX_EMA_FIXED SGX_EMA_ALLOC_FLAGS(0x40) + +/* bit 8 - 15 are page types. */ +#define SGX_EMA_PAGE_TYPE_SHIFT 8 +#define SGX_EMA_PAGE_TYPE(n) ((n) << SGX_EMA_PAGE_TYPE_SHIFT) +#define SGX_EMA_PAGE_TYPE_MASK SGX_EMA_PAGE_TYPE(0xFF) +#define SGX_EMA_PAGE_TYPE_TCS SGX_EMA_PAGE_TYPE(0x1) /* TCS page type. */ +#define SGX_EMA_PAGE_TYPE_REG SGX_EMA_PAGE_TYPE(0x2) /* regular page type, default if not specified. */ +#define SGX_EMA_PAGE_TYPE_TRIM SGX_EMA_PAGE_TYPE(0x4) /* TRIM page type. */ +#define SGX_EMA_PAGE_TYPE_SS_FIRST SGX_EMA_PAGE_TYPE(0x5) /* the first page in shadow stack. */ +#define SGX_EMA_PAGE_TYPE_SS_REST SGX_EMA_PAGE_TYPE(0x6) /* the rest pages in shadow stack. */ + +/* Use bit 24-31 for alignment masks. */ +#define SGX_EMA_ALIGNMENT_SHIFT 24 +/* + * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and + * < # bits in a pointer (32 or 64). + */ +#define SGX_EMA_ALIGNED(n) (((unsigned int)(n) << SGX_EMA_ALIGNMENT_SHIFT)) +#define SGX_EMA_ALIGNMENT_MASK SGX_EMA_ALIGNED(0xFFUL) +#define SGX_EMA_ALIGNMENT_64KB SGX_EMA_ALIGNED(16UL) +#define SGX_EMA_ALIGNMENT_16MB SGX_EMA_ALIGNED(24UL) +#define SGX_EMA_ALIGNMENT_4GB SGX_EMA_ALIGNED(32UL) + +/* Permissions flags */ +#define SGX_EMA_PROT_NONE 0x0 +#define SGX_EMA_PROT_READ 0x1 +#define SGX_EMA_PROT_WRITE 0x2 +#define SGX_EMA_PROT_EXEC 0x4 +#define SGX_EMA_PROT_READ_WRITE (SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE) +#define SGX_EMA_PROT_READ_EXEC (SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC) +#define SGX_EMA_PROT_MASK (SGX_EMA_PROT_READ_WRITE|SGX_EMA_PROT_EXEC) +/* + * Allocate a new memory region in enclave address space (ELRANGE). + * @param[in] addr Starting address of the region, page aligned. If NULL is provided, + * then the function will select the starting address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, and page type. + * Flags should include exactly one of following for committing mode: + * - SGX_EMA_RESERVE: just reserve an address range, no EPC committed. + * To allocate memory on a reserved range, call this + * function again with SGX_EMA_COMMIT_ON_DEMAND or SGX_EMA_COMMIT_NOW. + * - SGX_EMA_COMMIT_NOW: reserves memory range and commit EPC pages. EAUG and + * EACCEPT are done on SGX2 platforms. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages + * are committed (EACCEPT) on demand upon #PF on SGX2 platforms. + * ORed with zero or one of the committing order flags for SGX2 platforms: + * - SGX_EMA_GROWSDOWN: always commit pages from higher to lower addresses, + * no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: always commit pages from lower to higher addresses, + * no gaps in addresses below the last committed. + * Optionally ORed with + * - SGX_EMA_FIXED: allocate at fixed address, will return error if the + * requested address is in use. + * - SGX_EMA_ALIGNED(n): Align the region on a requested boundary. + * Fail if a suitable region cannot be found, + * The argument n specifies the binary logarithm of + * the desired alignment and must be at least 12. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * + * @param[in] handler A custom handler for page faults in this region, NULL if + * no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be passed + * back when the handler is called. + * @param[out] out_addr Pointer to store the start address of allocated range. + * Set to valid address by the function on success, NULL otherwise. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is set. + * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in SGX_EMA_ALIGNED(n). + * @retval ENOMEM Out of memory, or no free space to satisfy alignment boundary. + */ +int sgx_mm_alloc(void *addr, size_t length, int flags, + sgx_enclave_fault_handler_t handler, void *handler_private, + void **out_addr); + +/* + * Uncommit (trim) physical EPC pages in a previously committed range. + * The pages in the allocation are freed, but the address range is still reserved. + * @param[in] addr Page aligned start address of the region to be trimmed. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + */ +int sgx_mm_uncommit(void *addr, size_t length); + +/* + * Deallocate the address range. + * The pages in the allocation are freed and the address range is released for future allocation. + * @param[in] addr Page aligned start address of the region to be freed and released. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + */ +int sgx_mm_dealloc(void *addr, size_t length); + +/* + * Change permissions of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] prot permissions bitwise OR of following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXEC: Pages may be executed. + * @retval 0 The operation was successful. + * @retval EACCES Original page type can not be changed to target type. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EPERM The request permissions are not allowed, e.g., by target page type or + * SELinux policy. + */ +int sgx_mm_modify_permissions(void *addr, size_t length, int prot); + +/* + * Change the page type of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] type page type, only SGX_EMA_PAGE_TYPE_TCS is supported. + * + * @retval 0 The operation was successful. + * @retval EACCES Original page type can not be changed to target type. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EPERM Target page type is no allowed by this API, e.g., PT_TRIM, + * PT_SS_FIRST, PT_SS_REST. + */ +int sgx_mm_modify_type(void *addr, size_t length, int type); + +/* + * Commit a partial or full range of memory allocated previously with SGX_EMA_COMMIT_ON_DEMAND. + * The API will return 0 if all pages in the requested range are successfully committed. + * Calling this API on pages already committed has no effect. + * @param[in] addr Page aligned starting address. + * @param[in] length Length of the region in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL Any requested page is not in any previously allocated regions, or + * outside the enclave address range. + * @retval EFAULT All other errors. + */ +int sgx_mm_commit(void *addr, size_t length); + +/* + * Load data into target pages within a region previously allocated by sgx_mm_alloc. + * This can be called to load data and set target permissions at the same time, + * e.g., dynamic code loading. The caller has verified data to be trusted and expected + * to be loaded to the target address range. Calling this API on pages already committed + * will fail. + * + * @param[in] addr Page aligned target starting addr. + * @param[in] length Length of data, in bytes of multiples of page size. + * @param[in] data Data of @length. + * @param[in] prot Target permissions. + * @retval 0 The operation was successful. + * @retval EINVAL Any page in requested address range is not previously allocated, or + * outside the enclave address range. + * @retval EPERM Any page in requested range is previously committed. + * @retval EPERM The target permissions are not allowed by OS security policy, + * e.g., SELinux rules. + */ +int sgx_mm_commit_data(void *addr, size_t length, uint8_t *data, int prot); + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should continue searching for the next handler. + */ +#define SGX_MM_EXCEPTION_CONTINUE_SEARCH 0 + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should stop searching and continue execution. + */ +#define SGX_MM_EXCEPTION_CONTINUE_EXECUTION -1 + + +#ifdef __cplusplus +} +#endif +#endif diff --git a/sdk/emm/include/sgx_mm_primitives.h b/sdk/emm/include/sgx_mm_primitives.h new file mode 100644 index 000000000..0724ea41e --- /dev/null +++ b/sdk/emm/include/sgx_mm_primitives.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_PRIMITIVES_H_ +#define SGX_MM_PRIMITIVES_H_ + +#include +#include +#ifdef __cplusplus +extern "C" { +#endif + +//SGX primitives +typedef struct _sec_info_t +{ + uint64_t flags; + uint64_t reserved[7]; +} sec_info_t; + +// EACCEPT +int do_eaccept(const sec_info_t* si, size_t addr); + +// EMODPE +int do_emodpe(const sec_info_t* si, size_t addr); + +// EACCEPTCOPY +int do_eacceptcopy(const sec_info_t* si, size_t dest, size_t src); + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/sdk/emm/include/sgx_mm_rt_abstraction.h b/sdk/emm/include/sgx_mm_rt_abstraction.h new file mode 100644 index 000000000..e6b519f69 --- /dev/null +++ b/sdk/emm/include/sgx_mm_rt_abstraction.h @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_RT_ABSTRACTION_H_ +#define SGX_MM_RT_ABSTRACTION_H_ + +#include "sgx_mm.h" +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* + * The EMM page fault (#PF) handler. + * + * @param[in] pfinfo Info reported in the SSA MISC region for page fault. + * @retval SGX_EXCEPTION_CONTINUE_EXECUTION Success handling the exception. + * @retval SGX_EXCEPTION_CONTINUE_SEARCH The EMM does not handle the exception. + */ + typedef int (*sgx_mm_pfhandler_t)(const sgx_pfinfo *pfinfo); + +/* + * Register the EMM handler with the global exception handler registry + * The Runtime should ensure this handler is called first in case of + * a #PF before all other handlers. + * + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ + bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler); + +/* + * Unregister the EMM handler with the global exception handler registry. + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ + bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); + +/* + * Call OS to reserve region for EAUG, immediately or on-demand. + * + * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, page type. The untrusted side. + * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and + * translate following additional bits to proper parameters invoking mmap or other SGX specific + * syscall(s) provided by the kernel. + * The flags param of this interface should include exactly one of following for committing mode: + * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * kernel is given a hint to EAUG EPC pages for the area as soon as possible. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * ORed with zero or one of the committing order flags: + * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * to lower addresses, no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * to higher addresses, no gaps in addresses below the last committed. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @retval 0 The operation was successful. + * @retval EINVAL Any parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + */ + int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); + + /* + * Call OS to change permissions, type, or notify EACCEPT done after TRIM. + * + * @param[in] addr Start address of the memory to change protections. + * @param[in] length Length of the area. This must be a multiple of the page size. + * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * Must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and TCS + * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * range for trimmed pages may still be reserved by enclave with + * proper permissions. + * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS + * @retval 0 The operation was successful. + * @retval EINVAL A parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + */ + + int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_to); + + /* + * Define a mutex and init/lock/unlock/destroy functions. + */ + typedef struct _sgx_mm_mutex sgx_mm_mutex; + sgx_mm_mutex* sgx_mm_mutex_create(void); + int sgx_mm_mutex_lock(sgx_mm_mutex *mutex); + int sgx_mm_mutex_unlock(sgx_mm_mutex *mutex); + int sgx_mm_mutex_destroy(sgx_mm_mutex *mutex); + + /* + * Check whether the given buffer is strictly within the enclave. + * + * Check whether the buffer given by the **ptr** and **size** parameters is + * strictly within the enclave's memory. If so, return true. If any + * portion of the buffer lies outside the enclave's memory, return false. + * + * @param[in] ptr The pointer to the buffer. + * @param[in] size The size of the buffer. + * + * @retval true The buffer is strictly within the enclave. + * @retval false At least some part of the buffer is outside the enclave, or + * the arguments are invalid. For example, if **ptr** is null or **size** + * causes arithmetic operations to wrap. + * + */ + bool sgx_mm_is_within_enclave(const void *ptr, size_t size); + + +#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sdk/emm/sgx_mm.c b/sdk/emm/sgx_mm.c new file mode 100644 index 000000000..2a5ad5c64 --- /dev/null +++ b/sdk/emm/sgx_mm.c @@ -0,0 +1,442 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include "sgx_mm.h" +#include "ema.h" +#include "sgx_mm_rt_abstraction.h" + +extern ema_root_t g_user_ema_root; +extern ema_root_t g_rts_ema_root; +#define LEGAL_ALLOC_PAGE_TYPE (SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PAGE_TYPE_SS_FIRST | SGX_EMA_PAGE_TYPE_SS_REST) +#define TRIM_TO(x, align) ((x) & ~(align-1)) +sgx_mm_mutex *mm_lock = NULL; + +//!FIXME: assume user and system EMAs are not interleaved +// user EMAs are above the last system EMA +int mm_alloc_internal(void *addr, size_t size, int flags, + sgx_enclave_fault_handler_t handler, + void *private, void **out_addr, ema_root_t* root) +{ + int status = -1; + size_t tmp_addr = 0; + ema_t *node = NULL, *next_ema = NULL; + bool ret = false; + + uint32_t alloc_flags = (uint32_t)flags & SGX_EMA_ALLOC_FLAGS_MASK; + //Must have one of these: + if (!(alloc_flags & (SGX_EMA_RESERVE | SGX_EMA_COMMIT_NOW | SGX_EMA_COMMIT_ON_DEMAND))) + return EINVAL; + + uint64_t page_type = (uint64_t)flags & SGX_EMA_PAGE_TYPE_MASK; + if ((uint64_t)(~LEGAL_ALLOC_PAGE_TYPE) & page_type) return EINVAL; + if (page_type == 0) page_type = SGX_EMA_PAGE_TYPE_REG; + + if (size % SGX_PAGE_SIZE) return EINVAL; + + uint8_t align_flag = (uint8_t) (((uint32_t)flags & SGX_EMA_ALIGNMENT_MASK) >> SGX_EMA_ALIGNMENT_SHIFT); + if (align_flag == 0) align_flag = 12; + if (align_flag < 12) + return EINVAL; + + uint64_t align_mask = (uint64_t)(1ULL << align_flag) - 1ULL; + + tmp_addr = (size_t) addr; + //If an address is given, user must align it + if ((tmp_addr & align_mask)) + return EINVAL; + if (addr && (!sgx_mm_is_within_enclave(addr, size))) + return EACCES; + + if(sgx_mm_mutex_lock(mm_lock)) + return EFAULT; + + if (ema_root_empty(&g_rts_ema_root)){ + //the rts is not initialized + status = EFAULT; + goto unlock; + } + + uint64_t si_flags = (uint64_t)SGX_EMA_PROT_READ_WRITE | page_type ; + if (alloc_flags & SGX_EMA_RESERVE) + { + si_flags = SGX_EMA_PROT_NONE; + } + + if (tmp_addr) { + bool fixed_alloc = (alloc_flags & SGX_EMA_FIXED); + bool in_system_but_not_allowed = false; + size_t end = tmp_addr + size; + size_t start = tmp_addr; + if(root != &g_rts_ema_root && + ema_exist_in(&g_rts_ema_root, start, size)) + { + in_system_but_not_allowed = true; + if(fixed_alloc){ + status = EPERM; + goto unlock; + } + } + ema_t* first = NULL; + ema_t* last = NULL; + bool exist_in_root = !search_ema_range(root, start, end, &first, &last); + + if(exist_in_root){ + // Use the reserved space earlier + node = ema_realloc_from_reserve_range(first, last, start, end, + alloc_flags, si_flags, + handler, private); + if (node){ + goto alloc_action; + } + //can't fit with the address but fixed alloc is asked + if (fixed_alloc) { + status = EEXIST; + goto unlock; + } + // Not a fixed alloc, + // fall through to find a free space anywhere + assert(!ret); + } else { + // No existing ema overlapping with requested range + // Use the address unless it is not allowed by rts + if(!in_system_but_not_allowed){ + // make sure not in rts if this is user + ret = find_free_region_at(root, + tmp_addr, size, &next_ema); + } + //We can't use the address, fall through + } + } + // At this point, ret == false means: + // Either no address given or the given address can't be used + if (!ret) + ret = find_free_region(root, + size, (1ULL << align_flag), &tmp_addr, &next_ema); + if (!ret) { + status = ENOMEM; + goto unlock; + } +/************************************************** +* create and operate on a new node +***************************************************/ + assert(tmp_addr);//found address + assert(next_ema);//found where to insert + // create and insert the node + node = ema_new(tmp_addr, size, alloc_flags, si_flags, + handler, private, next_ema); + if (!node) { + status = ENOMEM; + goto unlock; + } +alloc_action: + assert(node); + status = ema_do_alloc(node); + if (status != 0) { + goto alloc_failed; + } + if (out_addr) { + *out_addr = (void *)tmp_addr; + } + status = 0; + goto unlock; +alloc_failed: + ema_destroy(node); + +unlock: + sgx_mm_mutex_unlock(mm_lock); + return status; +} + +int sgx_mm_alloc(void *addr, size_t size, int flags, + sgx_enclave_fault_handler_t handler, + void *private, void **out_addr) +{ + if (flags & SGX_EMA_SYSTEM) return EINVAL; + return mm_alloc_internal(addr, size, flags, + handler, private, out_addr, &g_user_ema_root); +} + +int mm_commit_internal(void *addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_commit_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_commit(void *addr, size_t size) +{ + return mm_commit_internal(addr, size, &g_user_ema_root); +} + +int mm_uncommit_internal(void *addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_uncommit_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_uncommit(void *addr, size_t size) +{ + return mm_uncommit_internal(addr, size, &g_user_ema_root); +} + +int mm_dealloc_internal(void *addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_dealloc_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_dealloc(void *addr, size_t size) +{ + return mm_dealloc_internal(addr, size, &g_user_ema_root); +} + +int mm_commit_data_internal(void *addr, size_t size, uint8_t *data, int prot, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (size == 0) + return EINVAL; + if (size % SGX_PAGE_SIZE != 0) + return EINVAL; + if (start % SGX_PAGE_SIZE != 0) + return EINVAL; + if (((size_t)data) % SGX_PAGE_SIZE != 0) + return EINVAL; + if (((uint32_t)prot) & (uint32_t)(~SGX_EMA_PROT_MASK)) + return EINVAL; + if (!sgx_mm_is_within_enclave(data, size)) + return EINVAL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_commit_data_loop(first, last, start, end, data, prot); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_commit_data(void *addr, size_t size, uint8_t *data, int prot) +{ + return mm_commit_data_internal (addr, size, data, prot, &g_user_ema_root); +} + +int mm_modify_type_internal(void *addr, size_t size, int type, ema_root_t* root) +{ + // for this API, TCS is the only valid page type + if (type != SGX_EMA_PAGE_TYPE_TCS) { + return EPERM; + } + + // TCS occupies only one page + if (size != SGX_PAGE_SIZE) { + return EINVAL; + } + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (start % SGX_PAGE_SIZE != 0) + return EINVAL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + + // one page only, covered by a single ema node + assert(ema_next(first) == last); + ret = ema_change_to_tcs(first, (size_t)addr); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_modify_type(void *addr, size_t size, int type) +{ + return mm_modify_type_internal(addr, size, type, &g_user_ema_root); +} + +int mm_modify_permissions_internal(void *addr, size_t size, int prot, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + + if (size == 0) return EINVAL; + if (size % SGX_PAGE_SIZE) return EINVAL; + if (start % SGX_PAGE_SIZE) return EINVAL; + + ema_t *first = NULL, *last = NULL; + + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) { + ret = EINVAL; + goto unlock; + } + ret = ema_modify_permissions_loop(first, last, start, end, prot); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_modify_permissions(void *addr, size_t size, int prot) +{ + return mm_modify_permissions_internal(addr, size, prot, &g_user_ema_root); +} + +int sgx_mm_enclave_pfhandler(const sgx_pfinfo *pfinfo) +{ + int ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + size_t addr = TRIM_TO((pfinfo->maddr), SGX_PAGE_SIZE); + if(sgx_mm_mutex_lock(mm_lock)) return ret; + ema_t *ema = search_ema(&g_user_ema_root, addr); + if (!ema) { + ema = search_ema(&g_rts_ema_root, addr); + if(!ema) + goto unlock; + } + void* data = NULL; + sgx_enclave_fault_handler_t eh = ema_fault_handler(ema, &data); + if(eh){ + //don't hold the lock as handlers can longjmp + sgx_mm_mutex_unlock(mm_lock); + return eh(pfinfo, data); + } + if (ema_page_committed(ema, addr)) + { + if (is_ema_transition(ema)) + {//as long as permissions expected, transition will be done + // TODO: check EXEC? + //This is never reached because of global lock + if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || + (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) + { + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + } + else + ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + } + goto unlock; + } + if (get_ema_alloc_flags(ema) & SGX_EMA_COMMIT_ON_DEMAND) + { + if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || + (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) { + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + goto unlock; + } + + //!TODO: Check GROWSUP/GROWSDOWN flags and optimize accordingly. + if (ema_do_commit(ema, addr, addr + SGX_PAGE_SIZE)){ + sgx_mm_mutex_unlock(mm_lock); + abort(); + } + + ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + goto unlock; + } + else + { + sgx_mm_mutex_unlock(mm_lock); + //we found the EMA and nothing should cause the PF + //Can't continue as we know something is wrong + abort(); + } + + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +void sgx_mm_init(void) +{ + mm_lock = sgx_mm_mutex_create(); + sgx_mm_register_pfhandler(sgx_mm_enclave_pfhandler); +} diff --git a/sdk/emm/sgx_primitives.S b/sdk/emm/sgx_primitives.S new file mode 100644 index 000000000..397e7d08b --- /dev/null +++ b/sdk/emm/sgx_primitives.S @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#define SE_EACCEPT 5 +#define SE_EMODPE 6 +#define SE_EACCEPTCOPY 7 + +.macro ENCLU +.byte 0x0f, 0x01, 0xd7 +.endm + +.macro SE_PROLOG + .cfi_startproc + + push %rbx + push %rcx + push %rdx + movq %rdi, %rbx + movq %rsi, %rcx + +.endm + + +.macro SE_EPILOG + pop %rdx + pop %rcx + pop %rbx + + ret + .cfi_endproc +.endm + + +.macro DECLARE_GLOBAL_FUNC name + .globl \name + .type \name, @function +\name: +.endm + + +DECLARE_GLOBAL_FUNC do_eaccept + SE_PROLOG + mov $SE_EACCEPT, %eax + ENCLU + SE_EPILOG + +DECLARE_GLOBAL_FUNC do_eacceptcopy + SE_PROLOG + mov $SE_EACCEPTCOPY, %eax + ENCLU + SE_EPILOG + +DECLARE_GLOBAL_FUNC do_emodpe + SE_PROLOG + mov $SE_EMODPE, %eax + ENCLU + SE_EPILOG + diff --git a/sdk/emm/ut/Makefile b/sdk/emm/ut/Makefile new file mode 100644 index 000000000..bf67ad4e6 --- /dev/null +++ b/sdk/emm/ut/Makefile @@ -0,0 +1,55 @@ +# +# Copyright (C) 2011-2021 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# + +include ../../../buildenv.mk + +CPPFLAGS += -I$(COMMON_DIR)/inc \ + -I$(COMMON_DIR)/inc/internal \ + -DTEST=1 -g + +.PHONY: all clean + +all: test_bit_array test_ema test_public test_emm + +test_bit_array: test_bit_array.c ../bit_array.c + @$(CC) $(CPPFLAGS) $^ -o $@ + +test_ema: test_ema.c stub.c ../ema.c ../bit_array.c + @$(CC) $(CPPFLAGS) $^ -o $@ + +test_public: test_public.c stub.c ../sgx_mm.c ../ema.c ../bit_array.c + @$(CC) $(CPPFLAGS) $^ -o $@ + +test_emm: test_emm.c stub.c ../emm_private.c ../sgx_mm.c ../ema.c ../bit_array.c + @$(CC) $(CPPFLAGS) $^ -o $@ + +clean: + @$(RM) test_bit_array test_ema test_public test_emm diff --git a/sdk/trts/trts_trim.cpp b/sdk/emm/ut/stub.c similarity index 56% rename from sdk/trts/trts_trim.cpp rename to sdk/emm/ut/stub.c index de447033a..2c3489557 100644 --- a/sdk/trts/trts_trim.cpp +++ b/sdk/emm/ut/stub.c @@ -29,58 +29,80 @@ * */ +#include "../sgx_mm_primitives.h" +#include "../sgx_mm_rt_abstraction.h" -#include "trts_trim.h" -#include "sgx_trts.h" // for sgx_ocalloc, sgx_is_outside_enclave -#include "internal/rts.h" +struct _sgx_mm_mutex { + void *impl; +} g_mm_lock; -/* sgx_ocfree() just restores the original outside stack pointer. */ -#define OCALLOC(val, type, len) do { \ - void* __tmp = sgx_ocalloc(len); \ - if (__tmp == NULL) { \ - sgx_ocfree(); \ - return SGX_ERROR_UNEXPECTED;\ - } \ - (val) = (type)__tmp; \ -} while (0) - -typedef struct ms_trim_range_ocall_t { - size_t ms_fromaddr; - size_t ms_toaddr; -} ms_trim_range_ocall_t; - -typedef struct ms_trim_range_commit_ocall_t { - size_t ms_addr; -} ms_trim_range_commit_ocall_t; +int do_eaccept(const sec_info_t* si, size_t addr) +{ + return 0; +} -sgx_status_t SGXAPI trim_range_ocall(size_t fromaddr, size_t toaddr) +int do_eacceptcopy(const sec_info_t* si, size_t addr, size_t src) { - sgx_status_t status = SGX_SUCCESS; + return 0; +} - ms_trim_range_ocall_t* ms; - OCALLOC(ms, ms_trim_range_ocall_t*, sizeof(*ms)); +int do_emodpe(const sec_info_t* si, size_t addr) +{ + return 0; +} - ms->ms_fromaddr = fromaddr; - ms->ms_toaddr = toaddr; - status = sgx_ocall(EDMM_TRIM, ms); +int sgx_mm_alloc_ocall(size_t addr, size_t length, int flags) +{ + return 0; +} +int sgx_mm_modify_ocall(size_t addr, size_t length, int flags_from, int flags_to) +{ + return 0; +} - sgx_ocfree(); - return status; +size_t get_rts_base() +{ + return 0; } -sgx_status_t SGXAPI trim_range_commit_ocall(size_t addr) +size_t get_rts_end() { - sgx_status_t status = SGX_SUCCESS; + return 0x7FFFFF000000; +} - ms_trim_range_commit_ocall_t* ms; - OCALLOC(ms, ms_trim_range_commit_ocall_t*, sizeof(*ms)); +size_t get_user_base() +{ + return 0x7FFFFF000000; +} - ms->ms_addr = addr; - status = sgx_ocall(EDMM_TRIM_COMMIT, ms); +size_t get_user_end() +{ + return 0x7FFFFFFFFFFF; +} +bool sgx_mm_is_within_enclave(const void *ptr, size_t size){ + return true; +} - sgx_ocfree(); - return status; +sgx_mm_mutex* sgx_mm_mutex_create(void) +{ + return &g_mm_lock; +} +int sgx_mm_mutex_lock(sgx_mm_mutex *mutex) +{ + return 0; +} +int sgx_mm_mutex_unlock(sgx_mm_mutex *mutex) +{ + return 0; } +int sgx_mm_mutex_destroy(sgx_mm_mutex *mutex) +{ + return 0; +} +bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler) +{ + return true; +} diff --git a/sdk/emm/ut/test_bit_array.c b/sdk/emm/ut/test_bit_array.c new file mode 100644 index 000000000..c329fcb88 --- /dev/null +++ b/sdk/emm/ut/test_bit_array.c @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include "bit_array.h" + +int main() +{ + bit_array *ba = bit_array_new_set(31); + + assert(bit_array_size(ba) == 31); + + assert(bit_array_any(ba) == true); + assert(bit_array_none(ba) == false); + + assert(bit_array_test(ba, 0) == true); + assert(bit_array_test(ba, 15) == true); + assert(bit_array_test(ba, 18) == true); + assert(bit_array_test(ba, 24) == true); + assert(bit_array_test(ba, 30) == true); + + assert(bit_array_test_range(ba, 17, 10) == true); + assert(bit_array_all(ba) == true); + + bit_array_reset_range(ba, 17, 10); + assert(bit_array_test_range(ba, 17, 10) == false); + assert(bit_array_test(ba, 17) == false); + assert(bit_array_test(ba, 18) == false); + assert(bit_array_test(ba, 24) == false); + assert(bit_array_test(ba, 26) == false); + assert(bit_array_test(ba, 27) == true); + + bit_array_flip(ba, 30); + assert(bit_array_test(ba, 30) == false); + + bit_array *lo = NULL, *hi = NULL; + int ret = bit_array_split(ba, 0, &lo, &hi); + assert(ret == 0); + assert(lo == NULL); + assert(hi == ba); + + ret = bit_array_split(ba, 31, &lo, &hi); + assert(ret == 0); + assert(lo == ba); + assert(hi == NULL); + + ret = bit_array_split(ba, 17, &lo, &hi); + assert(lo == ba); + assert(ret == 0); + bit_array_delete(lo); + + bit_array *new_ba = NULL, *new_hi = NULL; + ret = bit_array_split(hi, 10, &new_ba, &new_hi); + assert(ret == 0); + assert(new_ba == hi); + assert(bit_array_size(new_ba) == 10); + assert(bit_array_none(new_ba) == true); + assert(bit_array_size(new_hi) == 4); + assert(bit_array_all(new_hi) == false); + assert(bit_array_test(new_hi, 3) == false); + + bit_array_delete(new_ba); + bit_array_delete(new_hi); + return 0; +} diff --git a/sdk/emm/ut/test_ema.c b/sdk/emm/ut/test_ema.c new file mode 100644 index 000000000..7ceb61408 --- /dev/null +++ b/sdk/emm/ut/test_ema.c @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include "ema.h" + +extern ema_root_t g_user_ema_root; +extern ema_root_t g_rts_ema_root; +struct ema_root_ { + ema_t *guard; +}; + +int main() +{ + //make sure rts is inited. + ema_t *node0 = ema_new(0, 0xFFF00000, + SGX_EMA_COMMIT_NOW, + SGX_EMA_PROT_READ_WRITE | SGX_EMA_PAGE_TYPE_REG, + NULL, NULL, g_rts_ema_root.guard); + + assert(node0); + // find_build/insert node + size_t addr = 0; + ema_t *next_ema = NULL; + bool ret = find_free_region(&g_user_ema_root, + 0x2000, SGX_PAGE_SIZE, &addr, &next_ema); + + assert(ret == true); + assert(addr == 0xFFF00000); + assert(next_ema == g_user_ema_root.guard); + + node0 = ema_new(0xFFF00000, 0x2000, + SGX_EMA_COMMIT_ON_DEMAND, + SGX_EMA_PROT_READ_WRITE | SGX_EMA_PAGE_TYPE_REG, + NULL, NULL, next_ema); + + + // find at/build/push_back node + + ret = find_free_region_at(&g_user_ema_root, 0xFFF03000, + 0x1000, + &next_ema); + + assert(ret == true); + assert(next_ema == g_user_ema_root.guard); + + ema_t *node1 = ema_new(0xFFF03000, 0x1000, + SGX_EMA_COMMIT_NOW, + SGX_EMA_PROT_READ_WRITE | SGX_EMA_PAGE_TYPE_REG, + NULL, NULL, g_user_ema_root.guard); + + + // negative case for find_at + ret = find_free_region_at(&g_user_ema_root, + 0xFFF02000, 0x3000, &next_ema); + + assert(ret == false); + assert(next_ema == NULL); + + + // find_at/build/insert node + ret = find_free_region_at(&g_user_ema_root, + 0xFFF06000, 0x3000, &next_ema); + + assert(ret == true); + assert(next_ema == g_user_ema_root.guard); + + ema_t *node2 = ema_new(0xFFF06000, 0x3000, + SGX_EMA_COMMIT_ON_DEMAND, + SGX_EMA_PROT_READ_WRITE | SGX_EMA_PAGE_TYPE_REG, + NULL, NULL, next_ema); + + // dump current nodes on the root + dump_ema_root(&g_user_ema_root); + + ema_t *first = NULL, *last = NULL; + + // search_ema_range #1 + int r = search_ema_range(&g_user_ema_root, + 0xFFF00000, 0xFFF06000, + &first, &last); + assert(r == 0); + assert(first == node0); + assert(last == node2); + + + // search_ema_range #2 + r = search_ema_range(&g_user_ema_root, + 0xFFF02000, 0xFFF06000, + &first, &last); + assert(r == 0); + assert(first == node1); + assert(last == node2); + + + // search_ema_range #3 + r = search_ema_range(&g_user_ema_root, + 0xFFF02000, 0xFFF09000, + &first, &last); + assert(r == 0); + assert(first == node1); + assert(last == g_user_ema_root.guard); + + // search_ema_range #4 + r = search_ema_range(&g_user_ema_root, + 0xFFF01000, 0xFFF05000, + &first, &last); + assert(r == 0); + assert(first == node0); + assert(last == node2); + + // negative case, middle address region + r = search_ema_range(&g_user_ema_root, + 0xFFF04000, 0xFFF05000, + &first, &last); + assert(r == -1); + assert(first == NULL); + assert(last == NULL); + + // negative case, front address region + r = search_ema_range(&g_user_ema_root, + 0xFFE00000, 0xFFF00000, + &first, &last); + assert(r == -1); + assert(first == NULL); + assert(last == NULL); + + // negative case, rear address region + r = search_ema_range(&g_user_ema_root, + 0xFFF0A000, 0xFFF0B000, + &first, &last); + assert(r == -1); + assert(first == NULL); + assert(last == NULL); + + + // ema split: split point is out of range + ema_t *tmp_node = NULL; + r = ema_split(node0, 0xFFE00000, true, &tmp_node); + assert(tmp_node == NULL); + + // ema split: split point is out of range + tmp_node = NULL; + r = ema_split(node0, 0xFFF02000, false, &tmp_node); + assert(tmp_node == NULL); + + // ema split: split point is within range + size_t node0_base = ema_base(node0); + tmp_node = NULL; + ema_split(node0, 0xFFF01000, true, &tmp_node); + assert(ema_next(tmp_node) == node0); + assert(ema_base(tmp_node) == node0_base); + assert(ema_size(tmp_node) == 0x1000); + + tmp_node = NULL; + r = ema_split_ex(node2, 0xFFF07000, 0xFFF08000, &tmp_node); + assert(ema_base(tmp_node) == 0xFFF07000); + assert(ema_size(tmp_node) == 0x1000); + dump_ema_root(&g_user_ema_root); + destroy_ema_root(&g_user_ema_root); + + return 0; +} diff --git a/sdk/emm/ut/test_emm.c b/sdk/emm/ut/test_emm.c new file mode 100644 index 000000000..a82330d81 --- /dev/null +++ b/sdk/emm/ut/test_emm.c @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "emm_private.h" +#include "sgx_mm.h" + +int main() +{ + int ret = -1; + void* addr; + ret = mm_alloc((void *)0xFFFF0000, 0x10000, + SGX_EMA_COMMIT_NOW | SGX_EMA_GROWSUP, + NULL, NULL, &addr); + + return ret; +} diff --git a/sdk/emm/ut/test_public.c b/sdk/emm/ut/test_public.c new file mode 100644 index 000000000..64d15c243 --- /dev/null +++ b/sdk/emm/ut/test_public.c @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "sgx_mm.h" + +int main() +{ + int ret = -1; + + ret = sgx_mm_alloc((void *)0xFFFF0000, 0x10000, + SGX_EMA_COMMIT_NOW | SGX_EMA_GROWSUP, + NULL, NULL, NULL); + + return ret; +} diff --git a/sdk/sign_tool/SignTool/enclave_creator_sign.cpp b/sdk/sign_tool/SignTool/enclave_creator_sign.cpp index b1585a4b6..9d5896e78 100644 --- a/sdk/sign_tool/SignTool/enclave_creator_sign.cpp +++ b/sdk/sign_tool/SignTool/enclave_creator_sign.cpp @@ -305,6 +305,15 @@ int EnclaveCreatorST::get_enclave_info(uint8_t *hash, int size, uint64_t *quota) return SGX_SUCCESS; } +int EnclaveCreatorST::alloc(uint64_t addr, uint64_t size, int flag) +{ + UNUSED(addr); + UNUSED(size); + UNUSED(flag); + + return SGX_SUCCESS; +} + int EnclaveCreatorST::emodpr(uint64_t addr, uint64_t size, uint64_t flag) { UNUSED(addr); @@ -337,14 +346,6 @@ int EnclaveCreatorST::trim_accept(uint64_t addr) return SGX_SUCCESS; } -int EnclaveCreatorST::remove_range(uint64_t fromaddr, uint64_t numpages) -{ - UNUSED(fromaddr); - UNUSED(numpages); - - return SGX_SUCCESS; -} - static EnclaveCreatorST g_enclave_creator_st; EnclaveCreator* g_enclave_creator = &g_enclave_creator_st; diff --git a/sdk/sign_tool/SignTool/enclave_creator_sign.h b/sdk/sign_tool/SignTool/enclave_creator_sign.h index dd23c83ef..4703d8fc8 100644 --- a/sdk/sign_tool/SignTool/enclave_creator_sign.h +++ b/sdk/sign_tool/SignTool/enclave_creator_sign.h @@ -56,12 +56,11 @@ class EnclaveCreatorST : public EnclaveCreator bool is_EDMM_supported(sgx_enclave_id_t enclave_id); bool is_driver_compatible(); int get_enclave_info(uint8_t *hash, int size, uint64_t *quota); + int alloc(uint64_t addr, uint64_t size, int flag); int emodpr(uint64_t addr, uint64_t size, uint64_t flag); int mktcs(uint64_t tcs_addr); int trim_range(uint64_t fromaddr, uint64_t toaddr); int trim_accept(uint64_t addr); - int remove_range(uint64_t fromaddr, uint64_t numpages); - private: uint8_t m_enclave_hash[SGX_HASH_SIZE]; EVP_MD_CTX *m_ctx; diff --git a/sdk/simulation/trtssim/linux/Makefile b/sdk/simulation/trtssim/linux/Makefile index 7a0a9bf52..b079adda3 100644 --- a/sdk/simulation/trtssim/linux/Makefile +++ b/sdk/simulation/trtssim/linux/Makefile @@ -66,8 +66,7 @@ TRTS1_OBJS := init_enclave.o \ trts_veh.o \ trts_xsave.o \ init_optimized_lib.o \ - trts_emodpr.o \ - trts_add_trim.o + TRTS2_OBJS := trts_nsp.o TRTS_OBJS := $(TRTS1_OBJS) $(TRTS2_OBJS) diff --git a/sdk/simulation/urtssim/enclave_creator_sim.cpp b/sdk/simulation/urtssim/enclave_creator_sim.cpp index f1da35951..233f63a9f 100644 --- a/sdk/simulation/urtssim/enclave_creator_sim.cpp +++ b/sdk/simulation/urtssim/enclave_creator_sim.cpp @@ -46,7 +46,7 @@ #include #include "sgx_enclave_common.h" #include - +#include #include #include #include @@ -309,6 +309,13 @@ bool EnclaveCreatorSim::get_plat_cap(sgx_misc_attribute_t *se_attr) return false; } +int EnclaveCreatorSim::alloc(uint64_t addr, uint64_t size, int flag) +{ + int ret = mprotect((void*)addr, size, flag); + if (ret) return errno; + return ret; +} + int EnclaveCreatorSim::emodpr(uint64_t addr, uint64_t size, uint64_t flag) { UNUSED(addr); diff --git a/sdk/simulation/urtssim/enclave_creator_sim.h b/sdk/simulation/urtssim/enclave_creator_sim.h index 8896d452c..47a75fc43 100644 --- a/sdk/simulation/urtssim/enclave_creator_sim.h +++ b/sdk/simulation/urtssim/enclave_creator_sim.h @@ -56,6 +56,7 @@ class EnclaveCreatorSim : public EnclaveCreator int trim_range(uint64_t fromaddr, uint64_t toaddr); int trim_accept(uint64_t addr); int remove_range(uint64_t fromaddr, uint64_t numpages); + int alloc(uint64_t addr, uint64_t size, int flag); private: bool m_sig_registered; }; diff --git a/sdk/simulation/urtssim/linux/Makefile b/sdk/simulation/urtssim/linux/Makefile index f2be8b340..af6456f93 100644 --- a/sdk/simulation/urtssim/linux/Makefile +++ b/sdk/simulation/urtssim/linux/Makefile @@ -87,6 +87,7 @@ OBJ2 := urts.o \ sig_handler.o \ debugger_support.o \ get_thread_id.o \ + urts_emm.o \ urts_trim.o \ urts_emodpr.o diff --git a/sdk/tlibc/gen/sbrk.c b/sdk/tlibc/gen/sbrk.c index e05a93fed..9dff9a637 100644 --- a/sdk/tlibc/gen/sbrk.c +++ b/sdk/tlibc/gen/sbrk.c @@ -78,7 +78,8 @@ void* sbrk(intptr_t n) size_t prev_heap_used = heap_used; void * start_addr; size_t size = 0; - + assert((heap_used & (SE_PAGE_SIZE - 1)) == 0); + if (!heap_base) return (void *)(~(size_t)0); @@ -113,7 +114,8 @@ void* sbrk(intptr_t n) start_addr = (void *)((size_t)(heap_base) + heap_min_size); size = prev_heap_used - heap_min_size; } - int ret = trim_EPC_pages(start_addr, size >> SE_PAGE_SHIFT); + assert((size & (SE_PAGE_SIZE - 1)) == 0); + int ret = mm_uncommit(start_addr, size); if (ret != 0) { heap_used = prev_heap_used; @@ -131,6 +133,8 @@ void* sbrk(intptr_t n) there's no integer overflow here. */ heap_ptr = (void *)((size_t)heap_base + (size_t)heap_used); + if(n==0) return heap_ptr; + heap_used += n; /* update g_peak_heap_used */ @@ -154,7 +158,8 @@ void* sbrk(intptr_t n) start_addr = (void *)((size_t)(heap_base) + heap_min_size); size = heap_used - heap_min_size; } - int ret = apply_EPC_pages(start_addr, size >> SE_PAGE_SHIFT); + assert((size & (SE_PAGE_SIZE - 1)) == 0); + int ret = mm_commit(start_addr, size); if (ret != 0) { heap_used = prev_heap_used; diff --git a/sdk/tmm_rsrv/sgx_rsrv_mem.cpp b/sdk/tmm_rsrv/sgx_rsrv_mem.cpp index c69323b8f..5f8776f47 100644 --- a/sdk/tmm_rsrv/sgx_rsrv_mem.cpp +++ b/sdk/tmm_rsrv/sgx_rsrv_mem.cpp @@ -42,7 +42,7 @@ #include #include #include - +#include "emm_private.h" #define SGX_PAGE_NOACCESS 0x01 // - #define SGX_PAGE_READONLY 0x02 // R #define SGX_PAGE_READWRITE 0x04 // RW @@ -184,7 +184,7 @@ void * sgx_alloc_rsrv_mem_ex(void *desired_addr, size_t length) size = rsrv_mem_committed - rsrv_mem_min_size; } // EACCEPT the new pages - int ret = apply_EPC_pages(start_addr, size >> SE_PAGE_SHIFT); + int ret = mm_commit(start_addr, size); if(ret != 0) { rsrv_mem_committed = prev_rsrv_mem_committed; @@ -282,7 +282,6 @@ int sgx_free_rsrv_mem(void * addr, size_t length) #include "global_data.h" -#include "trts_emodpr.h" static sgx_status_t tprotect_internal(size_t start, size_t size, si_flags_t perms) { @@ -300,37 +299,12 @@ static sgx_status_t tprotect_internal(size_t start, size_t size, si_flags_t perm { return SGX_ERROR_UNEXPECTED; } - - // EMODPE/EACCEPT requires OS level R permission for the page. Therefore, - // If target permission is NONE, we should change the OS level permission to NONE after EACCEPT - // If original permission is NONE, we should change the OS level permission before EMODPE if(pr_needed || pe_needed) { - // Ocall to EMODPR if target perm is not RWX and mprotect() if target perm is not NONE - ret = change_permissions_ocall(start, size, perms, EDMM_MODPR); - if (ret != SGX_SUCCESS) - abort(); - } - si_flags_t sf = perms|SI_FLAG_PR|SI_FLAG_REG; - - if(pe_needed) - { - if(emodpe_pages((void *)start, size / SE_PAGE_SIZE, sf)) + int rc = mm_modify_permissions((void*)start, size, (int)perms); + if (rc != 0) abort(); } - if(pr_needed && ((perms & (SI_FLAG_W|SI_FLAG_X)) != (SI_FLAG_W|SI_FLAG_X))) - { - // If the target permission to set is RWX, no EMODPR, hence no EACCEPT. - if(accept_modified_pages((void *)start, size / SE_PAGE_SIZE, sf)) - abort(); - } - if( pr_needed && perms == SI_FLAG_NONE ) - { - // If the target permission is NONE, ocall to mprotect() to change the OS permission - ret = change_permissions_ocall(start, size, perms, EDMM_MPROTECT); - if (ret != SGX_SUCCESS) - abort(); - } return ret; } diff --git a/sdk/tmm_rsrv/sgx_rsrv_mem_init.cpp b/sdk/tmm_rsrv/sgx_rsrv_mem_init.cpp index cc030c352..ae4f32730 100644 --- a/sdk/tmm_rsrv/sgx_rsrv_mem_init.cpp +++ b/sdk/tmm_rsrv/sgx_rsrv_mem_init.cpp @@ -33,7 +33,6 @@ #include "sgx_error.h" #include "stdint.h" - void *rsrv_mem_base __attribute__((section(RELRO_SECTION_NAME))) = NULL; size_t rsrv_mem_size __attribute__((section(RELRO_SECTION_NAME))) = 0; size_t rsrv_mem_min_size __attribute__((section(RELRO_SECTION_NAME))) = 0; diff --git a/sdk/trts/Makefile b/sdk/trts/Makefile index b5b8453f9..bf681fc05 100644 --- a/sdk/trts/Makefile +++ b/sdk/trts/Makefile @@ -48,18 +48,19 @@ OBJS1 := init_enclave.o \ trts.o \ trts_ecall.o \ trts_ocall.o \ + ema_init.o \ trts_util.o \ trts_veh.o \ trts_xsave.o \ init_optimized_lib.o \ trts_version.o \ - trts_trim.o \ - trts_emodpr.o \ trts_add_trim.o OBJS2 := trts_nsp.o -OBJS := $(OBJS1) $(OBJS2) +OBJS3 := ema_rt.o + +OBJS := $(OBJS1) $(OBJS2) $(OBJ3) all: $(OBJS) elf_parser @@ -70,6 +71,11 @@ $(OBJS1): %.o: %.cpp $(OBJS2): %.o: %.cpp $(CXX) -c $(TCXXFLAGS) $(CPPFLAGS) -fPIC $< -o $@ +$(OBJS3): %.o: %.c + $(CC) -c $(TCFLAGS) $(CFLAGS) -fPIC $< -o $@ + +.PHONY: elf_parser + .PHONY: elf_parser elf_parser: $(OBJS) $(MAKE) -C linux diff --git a/sdk/trts/ema_init.cpp b/sdk/trts/ema_init.cpp new file mode 100644 index 000000000..8bc297645 --- /dev/null +++ b/sdk/trts/ema_init.cpp @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include "emm_private.h" +#include "metadata.h" +//#include "se_trace.h" +#include "util.h" +#include "se_page_attr.h" +#include "trts_internal.h" +#include "trts_util.h" + +#if 0 +void dump_layout_entry(layout_entry_t *entry) +{ + se_trace(SE_TRACE_DEBUG, "\t%s\n", __FUNCTION__); + se_trace(SE_TRACE_DEBUG, "\tEntry Id = %4u, %-16s, ", entry->id, + entry[entry->id & ~(GROUP_FLAG)]); + se_trace(SE_TRACE_DEBUG, "Page Count = %5u, ", entry->page_count); + se_trace(SE_TRACE_DEBUG, "Attributes = 0x%02X, ", entry->attributes); + se_trace(SE_TRACE_DEBUG, "Flags = 0x%016llX, ", entry->si_flags); + se_trace(SE_TRACE_DEBUG, "RVA = 0x%016llX -> ", entry->rva); +} + +void dump_layout_group(layout_t *layout) +{ + se_trace(SE_TRACE_DEBUG, "\tEntry Id(%2u) = %4u, %-16s, ", 0, + layout->entry.id, layout_id_str[layout->entry.id & ~(GROUP_FLAG)]); + se_trace(SE_TRACE_DEBUG, "Entry Count = %4u, ", layout->group.entry_count); + se_trace(SE_TRACE_DEBUG, "Load Times = %u, ", layout->group.load_times); + se_trace(SE_TRACE_DEBUG, "LStep = 0x%016llX\n", layout->group.load_step); +} +#endif + +static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) +{ + uint64_t rva = offset + entry->rva; + assert(IS_PAGE_ALIGNED(rva)); + + size_t addr = (size_t)get_enclave_base() + rva; + size_t size = entry->page_count << SE_PAGE_SHIFT; + size_t enclave_end = (size_t)get_enclave_base() + get_enclave_size(); + + // entry is guard page or has EREMOVE, build a reserved ema + if ((entry->si_flags == 0) || + (entry->attributes & PAGE_ATTR_EREMOVE)) {//TODO:is EREMOVE EVER used for sgx2? + /*********************** + Intel SDK specific. Last guard page area fills up remaining enclave space + we cut off to leave space for user. + ************************/ + if((addr + size) == enclave_end) + size = 0x10000; + int ret = mm_init_ema((void*)addr, + size, + SGX_EMA_RESERVE | SGX_EMA_SYSTEM, + SGX_EMA_PROT_NONE, + NULL, NULL); + if (ret) { + return SGX_ERROR_UNEXPECTED; + } + return SGX_SUCCESS; + } + bool post_remove = (entry->attributes & PAGE_ATTR_POST_REMOVE); + bool post_add = (entry->attributes & PAGE_ATTR_POST_ADD); + bool static_min = (entry->attributes & PAGE_ATTR_EADD) && (!post_remove); + + if(post_remove) + { + if( mm_init_ema((void*)addr, size, SGX_EMA_SYSTEM, + SGX_EMA_PROT_READ_WRITE, + NULL, NULL)) + return SGX_ERROR_UNEXPECTED; + if( 0 != mm_dealloc((void*)addr, size)) + return SGX_ERROR_UNEXPECTED; + //fall through for POST_ADD to realloc as COMMIT_ON_DEMAND + } + + if (post_add) { + // build commit-on-demand ema node + uint32_t commit_direction = SGX_EMA_GROWSUP; + uint32_t type = SGX_EMA_PAGE_TYPE_REG; + + if (entry->id == LAYOUT_ID_STACK_MAX || + entry->id == LAYOUT_ID_STACK_DYN_MAX || + entry->id == LAYOUT_ID_STACK_DYN_MIN) { + commit_direction = SGX_EMA_GROWSDOWN; + } + + int ret = mm_alloc((void*)addr, + entry->page_count << SE_PAGE_SHIFT, + SGX_EMA_COMMIT_ON_DEMAND | commit_direction + | SGX_EMA_SYSTEM | SGX_EMA_FIXED | type, + NULL, NULL, NULL); + if (ret) { + return SGX_ERROR_UNEXPECTED; + } + + } else if (static_min) { + // build static ema node + int type = SGX_EMA_PAGE_TYPE_REG; + int prot = entry->si_flags & (SGX_EMA_PROT_MASK); + + if (entry->id == LAYOUT_ID_TCS) { + type = SGX_EMA_PAGE_TYPE_TCS; + prot = SGX_EMA_PROT_NONE; + } + int ret = mm_init_ema((void*)addr, + size, + SGX_EMA_SYSTEM | type, + prot, + NULL, + NULL); + if (ret) { + return SGX_ERROR_UNEXPECTED; + } + + } + return SGX_SUCCESS; +} + +extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta) +{ + int ret = SGX_ERROR_UNEXPECTED; + + for(layout_t *layout = start; layout < end; layout++) { + //se_trace(SE_TRACE_DEBUG, "%s, step = 0x%016llX\n", __FUNCTION__, delta); + + if (!IS_GROUP_ID(layout->group.id)) { + ret = build_rts_context_nodes(&layout->entry, delta); + if (ret != SGX_SUCCESS) { + return ret; + } + } else { + uint64_t step = 0; + for(uint32_t i = 0; i < layout->group.load_times; i++) { + step += layout->group.load_step; + ret = init_rts_contexts_emas(&layout[-layout->group.entry_count], + layout, step); + if (ret != SGX_SUCCESS) { + return ret; + } + } + } + } + return SGX_SUCCESS; +} + diff --git a/sdk/trts/ema_rt.c b/sdk/trts/ema_rt.c new file mode 100644 index 000000000..a3ce79eba --- /dev/null +++ b/sdk/trts/ema_rt.c @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include "sgx_thread.h" +#include +#include +#include +#include "sgx_trts.h" // for sgx_ocalloc, sgx_is_outside_enclave +#include "arch.h" +#include "sgx_edger8r.h" // for sgx_ocall etc. +#include "internal/rts.h" +#include "sgx_mm_rt_abstraction.h" +#define OCALLOC(val, type, len) do { \ + void* __tmp = sgx_ocalloc(len); \ + if (__tmp == NULL) { \ + sgx_ocfree(); \ + return SGX_ERROR_UNEXPECTED;\ + } \ + (val) = (type)__tmp; \ +} while (0) + +typedef struct ms_emm_alloc_ocall_t { + int retval; + size_t addr; + size_t size; + int flags; +} ms_emm_alloc_ocall_t; + +int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int flags) +{ +#ifdef SE_SIM + (void)addr; + (void)size; + (void)flags; + return 0; +#else + int status = SGX_SUCCESS; + int ret = EFAULT; + ms_emm_alloc_ocall_t* ms; + OCALLOC(ms, ms_emm_alloc_ocall_t*, sizeof(*ms)); + + ms->addr = (size_t)addr; + ms->size = size; + ms->flags = flags; + + status = sgx_ocall((unsigned int)EDMM_ALLOC, ms); + if(status == SGX_SUCCESS) + ret = ms->retval; + + sgx_ocfree(); + return ret; +#endif +} + +typedef struct ms_emm_modify_ocall_t { + int retval; + size_t addr; + size_t size; + int flags_from; + int flags_to; +} ms_emm_modify_ocall_t; + +int SGXAPI sgx_mm_modify_ocall(size_t addr, size_t size, int flags_from, int flags_to) +{ +#ifdef SE_SIM + (void)addr; + (void)size; + (void)flags_from; + (void)flags_to; + return 0; +#else + int status = SGX_SUCCESS; + int ret = EFAULT; + ms_emm_modify_ocall_t* ms; + OCALLOC(ms, ms_emm_modify_ocall_t*, sizeof(*ms)); + + ms->addr = (size_t)addr; + ms->size = size; + ms->flags_from = flags_from; + ms->flags_to = flags_to; + status = sgx_ocall((unsigned int)EDMM_MODIFY, ms); + if(status == SGX_SUCCESS) + ret = ms->retval; + + sgx_ocfree(); + return ret; +#endif +} + +extern sgx_mm_pfhandler_t g_mm_pfhandler; + +bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler) +{ + if (g_mm_pfhandler != NULL) + return false; + else + { + g_mm_pfhandler = pfhandler; + return true; + } +} + +bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler) +{ + if (g_mm_pfhandler != pfhandler) + return false; + g_mm_pfhandler = NULL; + return true; +} + +typedef struct _sgx_mm_mutex { + sgx_thread_mutex_t m; +}sgx_mm_mutex; + +static int sgx_mm_mutex_init(sgx_mm_mutex* mutex) +{ + //Recursive locks needed for cases when exception happens in + // mm_x_internal functions while lock being held. For example, + // stack expansion/heap expansion during those calls as we use + // regular enclave stack and heap for internal processing and + // book keeping. + mutex->m = (sgx_thread_mutex_t)SGX_THREAD_RECURSIVE_MUTEX_INITIALIZER; + return 0; +} + +sgx_mm_mutex *sgx_mm_mutex_create() +{ + sgx_mm_mutex *mutex = (sgx_mm_mutex *)malloc(sizeof(sgx_mm_mutex)); + if (!mutex) { + return NULL; + } + sgx_mm_mutex_init(mutex); + return mutex; +} + +int sgx_mm_mutex_lock(sgx_mm_mutex* mutex) +{ + assert(mutex != NULL); + //!FIXME + //Intel SDK does not have + // WAKE/WAIT event ocalls as builtins. And TCS + // pages are addred in a "utility" thread which + // does not have those in ocall table for the ecall. + // Therefore we must not make ocalls for synchronization. + // OE has builtin ocalls for wait/wake so no trylock needed + while ( sgx_thread_mutex_trylock(&mutex->m)); + return 0; +} + +int sgx_mm_mutex_unlock(sgx_mm_mutex* mutex) +{ + assert(mutex != NULL); + return sgx_thread_mutex_unlock(&mutex->m); +} + +int sgx_mm_mutex_destroy(sgx_mm_mutex* mutex) +{ + assert(mutex != NULL); + int ret = sgx_thread_mutex_destroy(&mutex->m); + free(mutex); + return ret; +} + +bool sgx_mm_is_within_enclave(const void* addr, size_t size) +{ + return sgx_is_within_enclave(addr, size); +} diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 8d303d483..1680c754c 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -33,7 +33,7 @@ /** * File: init_enclave.cpp * Description: - * Initialize enclave by rebasing the image to the enclave base + * Initialize enclave by rebasing the image to the enclave base */ #include @@ -51,7 +51,7 @@ #include "se_memcpy.h" #include "se_cpu_feature.h" #include "se_version.h" - +#include "sgx_mm_rt_abstraction.h" // The global cpu feature bits from uRTS uint64_t g_cpu_feature_indicator __attribute__((section(RELRO_SECTION_NAME))) = 0; int EDMM_supported __attribute__((section(RELRO_SECTION_NAME))) = 0; @@ -77,7 +77,9 @@ extern sgx_status_t pcl_entry(void* enclave_base,void* ms) __attribute__((weak)) extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section(".nipx"))); extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); - +extern "C" int init_segment_emas(void* enclave_base); +extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta); +extern "C" void sgx_mm_init(); // init_enclave() // Initialize enclave. // Parameters: @@ -169,7 +171,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) { return -1; } - + if (heap_init(get_heap_base(), get_heap_size(), get_heap_min_size(), EDMM_supported) != SGX_SUCCESS) return -1; @@ -200,7 +202,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) return -1; } } - else + else { if (0 != init_optimized_libs(cpu_features, NULL, xfrm)) { @@ -214,20 +216,18 @@ extern "C" int init_enclave(void *enclave_base, void *ms) return -1; } - + if(SGX_SUCCESS != sgx_read_rand((unsigned char*)&__stack_chk_guard, sizeof(__stack_chk_guard))) { return -1; } + + return 0; } -#ifndef SE_SIM -int accept_post_remove(const volatile layout_t *layout_start, const volatile layout_t *layout_end, size_t offset); -#endif - extern size_t rsrv_mem_min_size; sgx_status_t do_init_enclave(void *ms, void *tcs) @@ -254,9 +254,6 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) /* for EDMM, we need to accept the trimming of the POST_REMOVE pages. */ if (EDMM_supported) { - if (0 != accept_post_remove(&g_global_data.layout_table[0], &g_global_data.layout_table[0] + g_global_data.layout_entry_num, 0)) - return SGX_ERROR_UNEXPECTED; - size_t heap_min_size = get_heap_min_size(); memset_s(GET_PTR(void, enclave_base, g_global_data.heap_offset), heap_min_size, 0, heap_min_size); @@ -270,6 +267,18 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) #endif g_enclave_state = ENCLAVE_INIT_DONE; + if (EDMM_supported) + { + sgx_mm_init(); + void* enclave_start = (void*)&__ImageBase; + if (init_segment_emas(enclave_start)) + return SGX_ERROR_UNEXPECTED; + int ret = init_rts_contexts_emas((layout_t*)g_global_data.layout_table, + (layout_t*)(g_global_data.layout_table + g_global_data.layout_entry_num), 0); + if (ret != SGX_SUCCESS) { + return SGX_ERROR_UNEXPECTED; + } + } return SGX_SUCCESS; } diff --git a/sdk/trts/linux/Makefile b/sdk/trts/linux/Makefile index 3ce7b24ab..3bc83ebd4 100644 --- a/sdk/trts/linux/Makefile +++ b/sdk/trts/linux/Makefile @@ -46,10 +46,11 @@ CXXFLAGS += -Werror $(ENCLAVE_CXXFLAGS) \ TCFLAGS += -nostdinc \ -I$(COMMON_DIR)/inc/tlibc/ + LDCFLAGS := -shared -nostdlib -nodefaultlibs -nostartfiles CPP_SRCS := $(wildcard ../*.cpp) -C_SRCS := $(wildcard *.c) +C_SRCS := $(wildcard *.c) $(wildcard ../*.c) ASM_SRCS := $(wildcard *.S) \ $(COMMON_DIR)/src/linux/xsave_gnu.S OBJS := $(CPP_SRCS:.cpp=.o) @@ -58,13 +59,22 @@ OBJS += $(ASM_SRCS:.S=.o) OBJS := $(sort $(OBJS)) LIBTRTS = libsgx_trts.a +LIBSGX_MM_PATH = $(LINUX_SDK_DIR)/emm +LIBSGX_MM = libsgx_mm.a .PHONY: all all: $(LIBTRTS) | $(BUILD_DIR) $(CP) $(LIBTRTS) $| -$(LIBTRTS): $(OBJS) +$(LIBSGX_MM): + $(MAKE) -C $(LIBSGX_MM_PATH) + +$(LIBTRTS): $(OBJS) $(LIBSGX_MM) $(AR) rsD $@ $(OBJS) + $(MKDIR) $(BUILD_DIR)/.libsgx_mm + $(RM) $(BUILD_DIR)/.libsgx_mm/* && cd $(BUILD_DIR)/.libsgx_mm && $(AR) x $(LIBSGX_MM_PATH)/libsgx_mm.a + $(AR) rsD $@ $(BUILD_DIR)/.libsgx_mm/*.o + @$(RM) -rf $(BUILD_DIR)/.libsgx_mm %.o: %.S echo $(ASM_SRCS) @@ -82,7 +92,7 @@ $(BUILD_DIR): .PHONY: clean clean: @$(RM) $(OBJS) $(LIBTRTS) $(BUILD_DIR)/$(LIBTRTS) - + @$(MAKE) -C $(LIBSGX_MM_PATH) clean .PHONY: rebuild rebuild: $(MAKE) clean diff --git a/sdk/trts/linux/elf_parser.c b/sdk/trts/linux/elf_parser.c index 52769932d..37d71ae88 100644 --- a/sdk/trts/linux/elf_parser.c +++ b/sdk/trts/linux/elf_parser.c @@ -39,9 +39,8 @@ #include "util.h" #include "elf_util.h" #include "global_data.h" -#include "../trts_emodpr.h" #include "trts_inst.h" - +#include "emm_private.h" static int elf_tls_aligned_virtual_size(const void *enclave_base, size_t *aligned_virtual_size); @@ -514,11 +513,11 @@ sgx_status_t change_protection(void *enclave_base) size_t end = (size_t)enclave_base + ((phdr->p_vaddr + phdr->p_memsz + SE_PAGE_SIZE - 1) & (size_t)(~(SE_PAGE_SIZE-1))); if (phdr->p_flags & PF_R) - perms |= SI_FLAG_R; + perms |= SGX_EMA_PROT_READ; if (phdr->p_flags & PF_X) - perms |= SI_FLAG_X; + perms |= SGX_EMA_PROT_EXEC; - if((status = trts_mprotect(start, end - start, perms)) != SGX_SUCCESS) + if(mm_modify_permissions((void*)start, end - start, (int)perms) != 0) return status; } @@ -527,7 +526,7 @@ sgx_status_t change_protection(void *enclave_base) size_t start = (size_t)enclave_base + (phdr->p_vaddr & (size_t)(~(SE_PAGE_SIZE-1))); size_t end = (size_t)enclave_base + ((phdr->p_vaddr + phdr->p_memsz + SE_PAGE_SIZE - 1) & (size_t)(~(SE_PAGE_SIZE-1))); if ((start != end) && - (status = trts_mprotect(start, end - start, SI_FLAG_R)) != SGX_SUCCESS) + mm_modify_permissions((void*)start, end - start, SGX_EMA_PROT_READ) != 0) return status; } } @@ -539,15 +538,42 @@ sgx_status_t change_protection(void *enclave_base) { if (g_global_data.layout_table[i].entry.id == LAYOUT_ID_RSRV_MIN && g_global_data.layout_table[i].entry.si_flags == SI_FLAGS_RWX && g_global_data.layout_table[i].entry.page_count > 0) { - if((status = trts_mprotect((size_t)((size_t)enclave_base + g_global_data.layout_table[i].entry.rva), - (size_t)g_global_data.layout_table[i].entry.page_count << SE_PAGE_SHIFT, - SI_FLAG_R|SI_FLAG_W)) != SGX_SUCCESS) + if(mm_modify_permissions((void*)((size_t)enclave_base + g_global_data.layout_table[i].entry.rva), + (size_t)g_global_data.layout_table[i].entry.page_count << SE_PAGE_SHIFT, + SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE) != 0) return status; break; } } - return SGX_SUCCESS; } +int init_segment_emas(void* enclave_base) +{ + + ElfW(Half) phnum = 0; + const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base; + const ElfW(Phdr) *phdr = get_phdr(ehdr); + uint64_t perms; + if (phdr == NULL) return -1; + int text_relocation = has_text_relo(ehdr, phdr, ehdr->e_phnum); + for (; phnum < ehdr->e_phnum; phnum++, phdr++) + { + if (phdr->p_type == PT_LOAD) + { + perms = SGX_EMA_PROT_READ; + size_t start = (size_t)enclave_base + (phdr->p_vaddr & (size_t)(~(SE_PAGE_SIZE-1))); + size_t end = (size_t)enclave_base + ((phdr->p_vaddr + phdr->p_memsz + SE_PAGE_SIZE - 1) & (size_t)(~(SE_PAGE_SIZE-1))); + + if (phdr->p_flags & PF_W || text_relocation) + perms |= SGX_EMA_PROT_WRITE; + if (phdr->p_flags & PF_X) + perms |= SGX_EMA_PROT_EXEC; + + if (mm_init_ema((void*)start, end - start, SGX_EMA_SYSTEM | SGX_EMA_PAGE_TYPE_REG, (int)perms, NULL, NULL) != 0) + return -1; + } + } + return 0; +} /* vim: set ts=4 sw=4 et cin: */ diff --git a/sdk/trts/linux/trts_pic.S b/sdk/trts/linux/trts_pic.S index 92470a492..3236ca2cd 100644 --- a/sdk/trts/linux/trts_pic.S +++ b/sdk/trts/linux/trts_pic.S @@ -547,6 +547,14 @@ DECLARE_GLOBAL_FUNC do_eaccept jnz abort SE_EPILOG +DECLARE_GLOBAL_FUNC do_eacceptcopy + SE_PROLOG + mov $SE_EACCEPTCOPY, %eax + ENCLU + cmp $SGX_SUCCESS, %eax + jnz abort + SE_EPILOG + DECLARE_GLOBAL_FUNC do_emodpe SE_PROLOG mov $SE_EMODPE, %eax diff --git a/sdk/trts/trts_add_trim.cpp b/sdk/trts/trts_add_trim.cpp index 653c6e884..59e1e11a2 100644 --- a/sdk/trts/trts_add_trim.cpp +++ b/sdk/trts/trts_add_trim.cpp @@ -34,12 +34,13 @@ #include "sgx_utils.h" #include "trts_inst.h" #include "util.h" -#include "trts_trim.h" +#include "trts_emm.h" #include "trts_util.h" #include "global_data.h" #include "se_memcpy.h" #include "se_page_attr.h" #include "trts_internal.h" +#include "emm_private.h" #ifndef SE_SIM @@ -49,52 +50,6 @@ struct dynamic_flags_attributes uint16_t attributes; }; -// Low level API to EACCEPT pages on grow-up region. -static int sgx_accept_backward(si_flags_t sfl, size_t lo, size_t hi) -{ - size_t addr = hi; - SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si; - si.flags = sfl; - for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++) - si.reserved[i] = 0; - - while (lo < addr) - { - int rc = do_eaccept(&si, addr -= SE_PAGE_SIZE); - if (rc != 0) - abort(); - } - return 0; -} - -// Low level API to EACCEPT pages on grow-up region during exception handling. -static int sgx_accept_forward_within_exception(size_t lo, size_t hi) -{ - size_t addr = lo; - SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si; - -#ifdef DEBUG - unsigned int sp_value = 0; - asm("mov %%esp, %0;" : "=r" (sp_value) :); - if ((sp_value & (SE_PAGE_SIZE -1)) <= (SE_PAGE_SIZE - (STATIC_STACK_SIZE % SE_PAGE_SIZE))) - return SGX_ERROR_UNEXPECTED; -#endif - - si.flags = SI_FLAGS_RW | SI_FLAG_PENDING; - for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++) - si.reserved[i] = 0; - - while (addr < hi) - { - int rc = do_eaccept(&si, addr); - if (rc != 0) - abort(); - addr += SE_PAGE_SIZE; - } - - return 0; -} - const volatile layout_t *get_dynamic_layout_by_id(uint16_t id) { for(uint32_t i = 0; i < g_global_data.layout_entry_num; i++) @@ -107,34 +62,6 @@ const volatile layout_t *get_dynamic_layout_by_id(uint16_t id) return NULL; } -// EACCEPT trim requests when the enclave completes initialization. -int accept_post_remove(const volatile layout_t *layout_start, const volatile layout_t *layout_end, size_t offset) -{ - int ret = -1; - for (const volatile layout_t *layout = layout_start; layout < layout_end; layout++) - { - if (!IS_GROUP_ID(layout->group.id) && (layout->entry.attributes & PAGE_ATTR_POST_REMOVE)) - { - size_t start_addr = (size_t)layout->entry.rva + offset + (size_t)get_enclave_base(); - uint32_t page_count = layout->entry.page_count; - - if (0 != (ret = sgx_accept_forward(SI_FLAG_TRIM | SI_FLAG_MODIFIED, start_addr, start_addr + ((size_t)page_count << SE_PAGE_SHIFT)))) - return ret; - } - else if (IS_GROUP_ID(layout->group.id)) - { - size_t step = 0; - for(uint32_t j = 0; j < layout->group.load_times; j++) - { - step += (size_t)layout->group.load_step; - if(0 != (ret = accept_post_remove(&layout[-layout->group.entry_count], layout, step))) - return ret; - } - } - } - return 0; -} - static int check_heap_dyn_range(void *addr, size_t page_count, struct dynamic_flags_attributes *fa) { size_t heap_dyn_start, heap_dyn_size; @@ -303,132 +230,6 @@ uint32_t get_dynamic_stack_max_page() } #endif -int sgx_accept_forward(si_flags_t sfl, size_t lo, size_t hi) -{ -#ifdef SE_SIM - (void)sfl; - (void)lo; - (void)hi; - return 0; -#else - size_t addr = lo; - SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si; - si.flags = sfl; - for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++) - si.reserved[i] = 0; - - while (addr < hi) - { - int rc = do_eaccept(&si, addr); - if (rc != 0) - abort(); - addr += SE_PAGE_SIZE; - } - - return 0; -#endif -} - -// High level API to EACCEPT pages, mainly used in exception handling -// to deal with stack expansion. -int apply_pages_within_exception(void *start_address, size_t page_count) -{ -#ifdef SE_SIM - (void)start_address; - (void)page_count; - return 0; -#else - int rc; - - if (start_address == NULL) - return -1; - - if (check_dynamic_range(start_address, page_count, NULL, NULL) != 0) - return -1; - - size_t start = (size_t)start_address; - size_t end = start + (page_count << SE_PAGE_SHIFT); - - rc = sgx_accept_forward_within_exception(start, end); - - return rc; -#endif - -} - -// High level API to EACCEPT pages -int apply_EPC_pages(void *start_address, size_t page_count) -{ -#ifdef SE_SIM - (void)start_address; - (void)page_count; - return 0; -#else - int rc; - struct dynamic_flags_attributes fa; - - if (start_address == NULL) - return -1; - - if (check_dynamic_range(start_address, page_count, NULL, &fa) != 0) - return -1; - - size_t start = (size_t)start_address; - size_t end = start + (page_count << SE_PAGE_SHIFT); - - if (fa.attributes & PAGE_DIR_GROW_DOWN) - { - rc = sgx_accept_forward(SI_FLAGS_RW | SI_FLAG_PENDING, start, end); - } - else - { - rc = sgx_accept_backward(SI_FLAGS_RW | SI_FLAG_PENDING, start, end); - } - - return rc; -#endif -} - -// High level API to trim previously EAUG-ed pages. -int trim_EPC_pages(void *start_address, size_t page_count) -{ -#ifdef SE_SIM - (void)start_address; - (void)page_count; - return 0; -#else - int rc; - - if (start_address == NULL) - return -1; - - // check range - if (check_dynamic_range(start_address, page_count, NULL, NULL) != 0) - return -1; - - size_t start = (size_t)start_address; - size_t end = start + (page_count << SE_PAGE_SHIFT); - - // trim ocall - rc = trim_range_ocall(start, end); - assert(rc == 0); - - rc = sgx_accept_forward(SI_FLAG_TRIM | SI_FLAG_MODIFIED, start, end); - assert(rc == 0); - - // trim commit ocall - size_t i = start; - while (i < end) - { - rc = trim_range_commit_ocall(i); - assert(rc == 0); - i += SE_PAGE_SIZE; - } - - return rc; -#endif -} - // Create a thread dynamically. // It will add necessary pages and transform one of them into type TCS. sgx_status_t do_add_thread(void *ptcs) @@ -460,7 +261,7 @@ sgx_status_t do_add_thread(void *ptcs) const volatile layout_t *layout = get_dynamic_layout_by_id(id); if (layout && (layout->entry.attributes & PAGE_ATTR_DYN_THREAD)) { - ret = apply_EPC_pages((void *)(enclave_base + layout->entry.rva + offset), layout->entry.page_count); + ret = mm_commit((void *)(enclave_base + layout->entry.rva + offset), layout->entry.page_count << SE_PAGE_SHIFT); if (ret != 0) return SGX_ERROR_UNEXPECTED; } @@ -475,13 +276,7 @@ sgx_status_t do_add_thread(void *ptcs) tcs->ofs_base = (size_t)GET_PTR(size_t, (void *)tcs, tcs->ofs_base) - enclave_base; tcs->ogs_base = (size_t)GET_PTR(size_t, (void *)tcs, tcs->ogs_base) - enclave_base; - //OCALL for MKTCS - ret = sgx_ocall(0, tcs); - if (ret != 0) - return SGX_ERROR_UNEXPECTED; - - //EACCEPT for MKTCS - ret = sgx_accept_backward(SI_FLAG_TCS | SI_FLAG_MODIFIED, (size_t)tcs, (size_t)tcs + SE_PAGE_SIZE); + ret = mm_modify_type((void *)tcs, SE_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TCS); if (ret != 0) return SGX_ERROR_UNEXPECTED; diff --git a/sdk/trts/trts_ecall.cpp b/sdk/trts/trts_ecall.cpp index 68f08c618..ff5a4c3dd 100644 --- a/sdk/trts/trts_ecall.cpp +++ b/sdk/trts/trts_ecall.cpp @@ -42,7 +42,8 @@ #include "global_init.h" #include "trts_internal.h" #include "trts_inst.h" -#include "trts_emodpr.h" +#include "trts_emm.h" +#include "sgx_mm.h" #include "trts_util.h" #include "metadata.h" # include "linux/elf_parser.h" @@ -51,6 +52,9 @@ #include "pthread_imp.h" #include "sgx_random_buffers.h" #include "se_page_attr.h" +#include "emm_private.h" + +extern "C" sgx_status_t change_protection(void *enclave_base); __attribute__((weak)) sgx_status_t _pthread_thread_run(void* ms) {UNUSED(ms); return SGX_SUCCESS;} __attribute__((weak)) bool _pthread_enabled() {return false;} @@ -550,7 +554,7 @@ sgx_status_t do_uninit_enclave(void *tcs) size_t start = (size_t)DEC_TCS_POINTER(tcs_node->tcs); size_t end = start + (1 << SE_PAGE_SHIFT); - int rc = sgx_accept_forward(SI_FLAG_TRIM | SI_FLAG_MODIFIED, start, end); + int rc = mm_dealloc((void*)start, end); if(rc != 0) { set_enclave_state(ENCLAVE_CRASHED); @@ -576,39 +580,3 @@ sgx_status_t do_uninit_enclave(void *tcs) return SGX_SUCCESS; } -extern sdk_version_t g_sdk_version; - -extern "C" sgx_status_t trts_mprotect(size_t start, size_t size, uint64_t perms) -{ - int rc = -1; - size_t page; - sgx_status_t ret = SGX_SUCCESS; - SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si; - - //Error return if start or size is not page-aligned or size is zero. - if (!IS_PAGE_ALIGNED(start) || (size == 0) || !IS_PAGE_ALIGNED(size)) - return SGX_ERROR_INVALID_PARAMETER; - if (g_sdk_version == SDK_VERSION_2_0) - { - ret = change_permissions_ocall(start, size, perms, EDMM_MODPR); - if (ret != SGX_SUCCESS) - return ret; - } - - si.flags = perms|SI_FLAG_REG|SI_FLAG_PR; - memset(&si.reserved, 0, sizeof(si.reserved)); - - for(page = start; page < start + size; page += SE_PAGE_SIZE) - { - do_emodpe(&si, page); - // If the target permission to set is RWX, no EMODPR, hence no EACCEPT. - if ((perms & (SI_FLAG_W|SI_FLAG_X)) != (SI_FLAG_W|SI_FLAG_X)) - { - rc = do_eaccept(&si, page); - if(rc != 0) - return (sgx_status_t)rc; - } - } - - return SGX_SUCCESS; -} diff --git a/sdk/trts/trts_emodpr.h b/sdk/trts/trts_emm.h similarity index 90% rename from sdk/trts/trts_emodpr.h rename to sdk/trts/trts_emm.h index 5cc69fef8..ec32d4e13 100644 --- a/sdk/trts/trts_emodpr.h +++ b/sdk/trts/trts_emm.h @@ -30,8 +30,8 @@ */ -#ifndef MPROTECT_T_H__ -#define MPROTECT_T_H__ +#ifndef TRTS_EMM_H__ +#define TRTS_EMM_H__ #include #include @@ -47,9 +47,9 @@ extern "C" { #endif -sgx_status_t SGXAPI change_permissions_ocall(size_t addr, size_t size, uint64_t epcm_perms, const int proc); +int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int flags); -sgx_status_t change_protection(void *enclave_base); +int SGXAPI sgx_mm_modify_ocall(size_t addr, size_t size, int flags_from, int flags_to); #ifdef __cplusplus } diff --git a/sdk/trts/trts_shared_constants.h b/sdk/trts/trts_shared_constants.h index accbb7b54..ca5cc4ef1 100644 --- a/sdk/trts/trts_shared_constants.h +++ b/sdk/trts/trts_shared_constants.h @@ -47,7 +47,7 @@ #endif -#define STATIC_STACK_SIZE 688 +#define STATIC_STACK_SIZE 1024 #endif diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index c36c56ed4..6d9f6b19c 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -50,8 +50,8 @@ #include "trts_util.h" #include "trts_shared_constants.h" #include "se_cdefs.h" - - +#include "emm_private.h" +#include "sgx_mm_rt_abstraction.h" typedef struct _handler_node_t { uintptr_t callback; @@ -62,6 +62,7 @@ static handler_node_t *g_first_node = NULL; static sgx_spinlock_t g_handler_lock = SGX_SPINLOCK_INITIALIZER; static uintptr_t g_veh_cookie = 0; +sgx_mm_pfhandler_t g_mm_pfhandler = NULL; #define ENC_VEH_POINTER(x) (uintptr_t)(x) ^ g_veh_cookie #define DEC_VEH_POINTER(x) (sgx_exception_handler_t)((x) ^ g_veh_cookie) @@ -202,6 +203,19 @@ extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_except goto failed_end; thread_data->exception_flag++; + if(info->exception_vector == SGX_EXCEPTION_VECTOR_PF && + (g_mm_pfhandler != NULL)) + { + thread_data->exception_flag--; + sgx_pfinfo* pfinfo = (sgx_pfinfo*)(&info->exinfo); + if(SGX_MM_EXCEPTION_CONTINUE_EXECUTION == g_mm_pfhandler(pfinfo)) + { + //instruction triggering the exception will be executed again. + continue_execution(info); + } + //restore old flag, and fall thru + thread_data->exception_flag++; + } // read lock sgx_spin_lock(&g_handler_lock); @@ -289,7 +303,7 @@ static int expand_stack_by_pages(void *start_addr, size_t page_count) if ((start_addr == NULL) || (page_count == 0)) return -1; - ret = apply_pages_within_exception(start_addr, page_count); + ret = mm_commit(start_addr, page_count << SE_PAGE_SHIFT); return ret; } @@ -451,7 +465,14 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) info->cpu_context.r14 = ssa_gpr->r14; info->cpu_context.r15 = ssa_gpr->r15; #endif - + if (info->exception_vector == SGX_EXCEPTION_VECTOR_PF) + // FUTURE: info->exception_vector == SGX_EXCEPTION_VECTOR_GP) + { + misc_exinfo_t* exinfo = + (misc_exinfo_t*)((uint64_t)ssa_gpr - (uint64_t)MISC_BYTE_SIZE); + info->exinfo.faulting_address = exinfo->maddr; + info->exinfo.error_code = exinfo->errcd; + } new_sp = (uintptr_t *)sp; ssa_gpr->REG(ip) = (size_t)internal_handle_exception; // prepare the ip for 2nd phrase handling ssa_gpr->REG(sp) = (size_t)new_sp; // new stack for internal_handle_exception From 607421444fbd40cc46d969ab6ae590a377aab774 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 2 Dec 2021 01:11:10 -0800 Subject: [PATCH 03/96] emm: fix typos Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 3bca4ce64..ff39082a1 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -1,6 +1,6 @@ Introduction --------------------------------- -This directory contains a implementation of the Enclave Memory Manager proposed in [this PR](https://github.com/openenclave/openenclave/pull/3991) +This directory contains an implementation of the Enclave Memory Manager proposed in [this PR](https://github.com/openenclave/openenclave/pull/3991) The instructions here are for developing and testing the EMM functionality only. Consult the main README for general usages. @@ -21,7 +21,7 @@ $ git checkout sgx/sgx2_not_submitted_v1 - For step 6, modify .config to set "CONFIG_X86_SGX=y". #### Verify kernel build and EDMM support -At root of the kernel source repo, +At the root of the kernel source repo, ``` $ cd tools/testing/selftests/sgx/ && make #./test_sgx @@ -63,7 +63,7 @@ $ cd $repo_root/psw/urts/linux $ make $ cd /build/linux $ ln -s libsgx_enclave_common.so libsgx_enclave_common.so.1 -$ export LD_LIBRARY_PATH=/home/sdp/linux-sgx2-poc/build/linux/ +$ export LD_LIBRARY_PATH=/build/linux/ ``` #### To build and run API tests @@ -81,7 +81,7 @@ Limitations of current implementation --------------------------------------- 1. EMM holds a global recursive mutex for the whole duration of each API invocation. - No support for concurrent operations (modify type/permissions, commit and commit_data) on different regions. -2. EMM internally uses default heap and stack during its internal operations +2. EMM internally uses the default heap and stack during its internal operations - The initial heap and stack should be sufficient to bootstrap EMM initializations - Book-keeping for heap should be created when RTS is initialized. - RTS calls mm_init_ema to create region for the static heap (EADDed), and mm_alloc to reserve COMMIT_ON_DEMAND for dynamic heap. @@ -101,8 +101,8 @@ Limitations of current implementation Notes on Intel SDK specific implementation ----------------------------------------- 1. Intel SDK RTS abstraction layer mutex implementation is a spinlock because there is no built-in OCalls for wait/wake on OS event. -2. Intel SDK signing tool reserves all unused address space as guard pages, leaving no space for user allocation. In this implementation, we simply changed tRTS to leave majority of that space as free. In future, we may need change the signing tool to encode this info in metadata. +2. Intel SDK signing tool reserves all unused address space as guard pages, leaving no space for user allocation. In this implementation, we simply changed tRTS to leave majority of that space as free. In future, we may need change the signing tool to encode this info in the metadata. 3. API tests are built with Intel SDK. Though most of tests are RTS independent, the TCS related tests use hardcoded Intel thread context layout info. -4. All make files assumes linux-sgx repo layout and environment. +4. All make files assume linux-sgx repo layout and environment. From e6b2ff7d6625bf7f8b875ec3003b7749f9404155 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 2 Dec 2021 01:12:45 -0800 Subject: [PATCH 04/96] update IOCTL definitions for EDMM The ioctl numbers for EDMM functions are shifted in upstream patches: https://patchwork.kernel.org/project/intel-sgx/cover/cover.1638381245.git.reinette.chatre@intel.com/ Change those definitions accordingly in isgx_user.h Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/urts/linux/isgx_user.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/psw/urts/linux/isgx_user.h b/psw/urts/linux/isgx_user.h index e937aab80..0732dd099 100644 --- a/psw/urts/linux/isgx_user.h +++ b/psw/urts/linux/isgx_user.h @@ -109,11 +109,11 @@ enum sgx_page_flags { #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute) #define SGX_IOC_PAGE_MODP \ - _IOWR(SGX_MAGIC, 0x04, struct sgx_page_modp) + _IOWR(SGX_MAGIC, 0x05, struct sgx_page_modp) #define SGX_IOC_PAGE_MODT \ - _IOWR(SGX_MAGIC, 0x05, struct sgx_page_modt) + _IOWR(SGX_MAGIC, 0x06, struct sgx_page_modt) #define SGX_IOC_PAGE_REMOVE \ - _IOWR(SGX_MAGIC, 0x06, struct sgx_page_remove) + _IOWR(SGX_MAGIC, 0x07, struct sgx_page_remove) /* Legacy OOT driver support for EDMM */ #define SGX_IOC_ENCLAVE_EMODPR \ From 5afb5c5a8f507e1aa2b126beb6fb9768e926944a Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sat, 4 Dec 2021 18:59:45 -0800 Subject: [PATCH 05/96] emm: update kernel branch in README Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/emm/README.md b/sdk/emm/README.md index ff39082a1..2f45bc61f 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -4,7 +4,7 @@ This directory contains an implementation of the Enclave Memory Manager proposed The instructions here are for developing and testing the EMM functionality only. Consult the main README for general usages. -**Note:** This implementation is based on the current Linux kernel implementation posted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_not_submitted_v1), which has not been finalized and upstreamed. As the kernel interfaces evolve, the EMM implementation and/or interface may change. +**Note:** This implementation is based on the current Linux kernel implementation posted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v1_plus_rwx), which has not been finalized and upstreamed. As the kernel interfaces evolve, the EMM implementation and/or interface may change. Prerequisites ------------------------------- @@ -16,8 +16,9 @@ On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki. ``` $ git clone https://github.com/rchatre/linux.git $ cd linux -$ git checkout sgx/sgx2_not_submitted_v1 +$ git checkout sgx/sgx2_submitted_v1_plus_rwx ``` + - For step 6, modify .config to set "CONFIG_X86_SGX=y". #### Verify kernel build and EDMM support From 5cb4cf721da07da842ba39a15b9c05ad1053d4b2 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 8 Feb 2022 13:09:31 -0800 Subject: [PATCH 06/96] urts and enclave_common: update for edmm kernel patches v2 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 79 +++++++++++++++--- psw/urts/linux/edmm_utility.cpp | 4 +- psw/urts/linux/isgx_user.h | 115 +++++++++++++++++---------- 3 files changed, 140 insertions(+), 58 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 64a2b2ced..df6ac3891 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -81,21 +81,25 @@ uint64_t get_offset_for_address(uint64_t target_address) static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) { - struct sgx_page_modt ioc; + struct sgx_enclave_modt ioc; if (length == 0) return EINVAL; - memset(&ioc, 0, sizeof(ioc)); SE_TRACE(SE_TRACE_DEBUG, "MODT for 0x%llX ( %llX ), type: 0x%llX\n", addr, length, type); memset(&ioc, 0, sizeof(ioc)); - ioc.type = type; + + sec_info_t sec_info; + memset(&sec_info, 0, sizeof(sec_info_t)); + sec_info.flags = SGX_EMA_PAGE_TYPE(type); + + ioc.secinfo = POINTER_TO_U64(&sec_info);; ioc.offset = get_offset_for_address(addr); ioc.length = SE_PAGE_SIZE;//TODO: change back to length do { - int ret = ioctl(fd, SGX_IOC_PAGE_MODT, &ioc); + int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPE, &ioc); //TODO: use error code if (ret && ioc.count == 0 && errno != EBUSY) { //total failure @@ -124,7 +128,7 @@ static int mktcs(int fd, uint64_t addr, size_t length) } static int trim_accept(int fd, uint64_t addr, size_t length) { - struct sgx_page_remove remove_ioc; + struct sgx_enclave_remove_pages remove_ioc; memset(&remove_ioc, 0, sizeof(remove_ioc)); SE_TRACE(SE_TRACE_DEBUG, @@ -133,7 +137,7 @@ static int trim_accept(int fd, uint64_t addr, size_t length) remove_ioc.offset = get_offset_for_address(addr); remove_ioc.length = length; - int ret = ioctl(fd, SGX_IOC_PAGE_REMOVE, &remove_ioc); + int ret = ioctl(fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); if(ret) { SE_TRACE(SE_TRACE_WARNING, @@ -145,21 +149,26 @@ static int trim_accept(int fd, uint64_t addr, size_t length) } static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) { - struct sgx_page_modp ioc; + struct sgx_enclave_restrict_perm ioc; if (length == 0) return EINVAL; - memset(&ioc, 0, sizeof(ioc)); SE_TRACE(SE_TRACE_DEBUG, "MODP for 0x%llX ( %llX ), prot: 0x%llX\n", addr, length, prot); - ioc.prot = prot; + memset(&ioc, 0, sizeof(ioc)); + + sec_info_t sec_info; + memset(&sec_info, 0, sizeof(sec_info_t)); + sec_info.flags = prot;//no shift + + ioc.secinfo = POINTER_TO_U64(&sec_info); ioc.offset = get_offset_for_address(addr); ioc.length = length; do { - int ret = ioctl(fd, SGX_IOC_PAGE_MODP, &ioc); + int ret = ioctl(fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); //TODO: use error code if (ret && ioc.count == 0 && errno != EBUSY ) { //total failure @@ -176,6 +185,44 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) return 0; } +static int relaxp(int fd, uint64_t addr, uint64_t length, uint64_t prot) +{ + struct sgx_enclave_relax_perm ioc; + if (length == 0) + return EINVAL; + + SE_TRACE(SE_TRACE_DEBUG, + "RELAX_PERM for 0x%llX ( %llX ), prot: 0x%llX\n", + addr, length, prot); + memset(&ioc, 0, sizeof(ioc)); + + sec_info_t sec_info; + memset(&sec_info, 0, sizeof(sec_info_t)); + sec_info.flags = prot; + + ioc.secinfo = POINTER_TO_U64(&sec_info); + ioc.offset = get_offset_for_address(addr); + ioc.length = length; + + do + { + int ret = ioctl(fd, SGX_IOC_ENCLAVE_RELAX_PERMISSIONS, &ioc); + //TODO: use error code + if (ret && ioc.count == 0 && errno != EBUSY ) + { //total failure + SE_TRACE(SE_TRACE_WARNING, + "RELAX failed, error = %d for 0x%llX ( %llX ), prot: 0x%llX\n", + errno, addr, length, prot); + return errno; + } + ioc.length -= ioc.count; + ioc.offset += ioc.count; + ioc.count = 0; + } while (ioc.length != 0); + + return 0; + +} // legacy support for EDMM @@ -245,7 +292,10 @@ static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) return SGX_SUCCESS; } - +static int relaxp_legacy(int, uint64_t, uint64_t, uint64_t) +{ + return 0; +} /* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * @@ -292,6 +342,7 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f function _trim_accept = trim_accept; function _mktcs = mktcs; function _emodpr = emodpr; + function _relaxp = relaxp; int fd = get_file_handle_from_address((void *)addr); if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { @@ -299,6 +350,7 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f _trim_accept = trim_accept_legacy; _mktcs = mktcs_legacy; _emodpr = emodpr_legacy; + _relaxp = relaxp_legacy; fd = s_hdevice; } if(fd == -1) return EINVAL; @@ -358,7 +410,7 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f return EINVAL; // type_to == type_from // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R - // separate mprotecte is needed to change ptt.R to pte.NONE + // separate mprotect is needed to change pte.R to pte.NONE if (prot_to == prot_from && prot_to == PROT_NONE) { ret = mprotect((void *)addr, length, prot_to); @@ -384,6 +436,9 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f //EACCEPT needs at least pte.R, PROT_NONE case done above. if (prot_to != PROT_NONE) { + ret = _relaxp(fd, addr, length, prot_to); + if(ret) + return ret; ret = mprotect((void *)addr, length, prot_to); if (ret == -1) return errno; diff --git a/psw/urts/linux/edmm_utility.cpp b/psw/urts/linux/edmm_utility.cpp index 08494f6c1..8ce644a4c 100644 --- a/psw/urts/linux/edmm_utility.cpp +++ b/psw/urts/linux/edmm_utility.cpp @@ -239,10 +239,10 @@ extern "C" bool is_driver_support_edmm(int hdevice) if (-1 == hdevice){ if(!open_se_device(SGX_DRIVER_IN_KERNEL, &hdevice)) return false; - struct sgx_page_modp ioc; + struct sgx_enclave_restrict_perm ioc; memset(&ioc, 0, sizeof(ioc)); - int ret = ioctl(hdevice, SGX_IOC_PAGE_MODP, &ioc); + int ret = ioctl(hdevice, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); bool supported = ret != -1 || (errno != ENOTTY); close_se_device(&hdevice); return supported; diff --git a/psw/urts/linux/isgx_user.h b/psw/urts/linux/isgx_user.h index 0732dd099..cccff44e4 100644 --- a/psw/urts/linux/isgx_user.h +++ b/psw/urts/linux/isgx_user.h @@ -108,12 +108,16 @@ enum sgx_page_flags { _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init) #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute) -#define SGX_IOC_PAGE_MODP \ - _IOWR(SGX_MAGIC, 0x05, struct sgx_page_modp) -#define SGX_IOC_PAGE_MODT \ - _IOWR(SGX_MAGIC, 0x06, struct sgx_page_modt) -#define SGX_IOC_PAGE_REMOVE \ - _IOWR(SGX_MAGIC, 0x07, struct sgx_page_remove) +#define SGX_IOC_VEPC_REMOVE_ALL \ + _IO(SGX_MAGIC, 0x04) +#define SGX_IOC_ENCLAVE_RELAX_PERMISSIONS \ + _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_relax_perm) +#define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \ + _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_restrict_perm) +#define SGX_IOC_ENCLAVE_MODIFY_TYPE \ + _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_modt) +#define SGX_IOC_ENCLAVE_REMOVE_PAGES \ + _IOWR(SGX_MAGIC, 0x08, struct sgx_enclave_remove_pages) /* Legacy OOT driver support for EDMM */ #define SGX_IOC_ENCLAVE_EMODPR \ @@ -292,56 +296,79 @@ struct sgx_modification_param { unsigned long flags; }; + + /** - * struct sgx_page_modp - parameter structure for the %SGX_IOC_PAGE_MODP ioctl - * @offset: starting page offset - * @length: length of memory (multiple of the page size) - * @prot: new protection bits of pages in range described by @offset - * and @length. - * @result: SGX result code - * @count: bytes successfully changed (multiple of page size) + * struct sgx_enclave_relax_perm - parameters for ioctl + * %SGX_IOC_ENCLAVE_RELAX_PERMISSIONS + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @secinfo: address for the SECINFO data containing the new permission bits + * for pages in range described by @offset and @length + * @count: (output) bytes successfully changed (multiple of page size) */ -struct sgx_page_modp { - __u64 offset; - __u64 length; - __u64 prot; - __u64 result; - __u64 count; +struct sgx_enclave_relax_perm { + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 count; }; /** - * struct sgx_page_modt - parameter structure for the %SGX_IOC_PAGE_MODT ioctl - * @offset: starting page offset - * @length: length of memory (multiple of the page size) - * @prot: new type of pages in range described by @offset and @length. - * @result: SGX result code - * @count: bytes successfully changed (multiple of page size) + * struct sgx_enclave_restrict_perm - parameters for ioctl + * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @secinfo: address for the SECINFO data containing the new permission bits + * for pages in range described by @offset and @length + * @result: (output) SGX result code of ENCLS[EMODPR] function + * @count: (output) bytes successfully changed (multiple of page size) + */ +struct sgx_enclave_restrict_perm { + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 result; + __u64 count; +}; + +/** + * struct sgx_enclave_modt - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @secinfo: address for the SECINFO data containing the new type + * for pages in range described by @offset and @length + * @result: (output) SGX result code of ENCLS[EMODT] function + * @count: (output) bytes successfully changed (multiple of page size) */ -struct sgx_page_modt { - __u64 offset; - __u64 length; - __u64 type; - __u64 result; - __u64 count; +struct sgx_enclave_modt { + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 result; + __u64 count; }; /** - * struct sgx_page_remove - parameters for the %SGX_IOC_PAGE_REMOVE ioctl - * @offset: starting page offset (page aligned relative to enclave base - * address defined in SECS) - * @length: length of memory (multiple of the page size) - * @count: bytes successfully changed (multiple of page size) + * struct sgx_enclave_remove_pages - %SGX_IOC_ENCLAVE_REMOVE_PAGES parameters + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) + * @count: (output) bytes successfully changed (multiple of page size) * * Regular (PT_REG) or TCS (PT_TCS) can be removed from an initialized - * enclave if the system supports SGX2. First, the %SGX_IOC_PAGE_MODT ioctl - * should be used to change the page type to PT_TRIM. After that succeeds - * ENCLU[EACCEPT] should be run from within the enclave and then can this - * ioctl be used to complete the page removal. + * enclave if the system supports SGX2. First, the %SGX_IOC_ENCLAVE_MODIFY_TYPE + * ioctl() should be used to change the page type to PT_TRIM. After that + * succeeds ENCLU[EACCEPT] should be run from within the enclave and then + * %SGX_IOC_ENCLAVE_REMOVE_PAGES can be used to complete the page removal. */ -struct sgx_page_remove { - __u64 offset; - __u64 length; - __u64 count; +struct sgx_enclave_remove_pages { + __u64 offset; + __u64 length; + __u64 count; }; From 60fda392e0f1376315e8c67d906946e6b7ae2368 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 10 Feb 2022 10:04:35 -0800 Subject: [PATCH 07/96] sgx_mm_ocalls: add more robust error handling for ioctl() calls Add error checking for remove ioctl() Do emodt for multiple pages in on call Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 37 +++++++++++++++++----------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index df6ac3891..286f68848 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -96,7 +96,7 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) ioc.secinfo = POINTER_TO_U64(&sec_info);; ioc.offset = get_offset_for_address(addr); - ioc.length = SE_PAGE_SIZE;//TODO: change back to length + ioc.length = length; do { int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPE, &ioc); @@ -108,10 +108,11 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) errno, addr, length, type); return errno; } - ioc.offset += SE_PAGE_SIZE; + //for recoverable partial errors + length -= ioc.count; + ioc.offset += ioc.count; ioc.result = 0; ioc.count = 0; - length -= SE_PAGE_SIZE; } while (length != 0); return 0; @@ -128,23 +129,29 @@ static int mktcs(int fd, uint64_t addr, size_t length) } static int trim_accept(int fd, uint64_t addr, size_t length) { - struct sgx_enclave_remove_pages remove_ioc; - memset(&remove_ioc, 0, sizeof(remove_ioc)); + struct sgx_enclave_remove_pages ioc; + memset(&ioc, 0, sizeof(ioc)); SE_TRACE(SE_TRACE_DEBUG, "REMOVE for 0x%llX ( %llX )\n", addr, length); - remove_ioc.offset = get_offset_for_address(addr); - remove_ioc.length = length; + ioc.offset = get_offset_for_address(addr); + ioc.length = length; + int ret = 0; + do { + ret = ioctl(fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &ioc); + if(ret && ioc.count == 0 && errno != EBUSY && errno != EAGAIN ) + { //total failure + SE_TRACE(SE_TRACE_WARNING, + "REMOVE failed, error = %d for 0x%llX ( %llX )\n", + errno, addr, length); + return errno; + } + ioc.length -= ioc.count; + ioc.offset += ioc.count; + ioc.count = 0; + } while (ioc.length != 0); - int ret = ioctl(fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); - if(ret) - { - SE_TRACE(SE_TRACE_WARNING, - "REMOVE failed, error = %d for 0x%llX ( %llX )\n", - errno, addr, length); - return errno; - }else return 0; } static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) From 022a6630cd6918771807d51ce23ec32254f1cf95 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 10 Feb 2022 19:25:48 -0800 Subject: [PATCH 08/96] EMM: Update README for the kernel patches V2 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 2f45bc61f..64f4329df 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -4,7 +4,9 @@ This directory contains an implementation of the Enclave Memory Manager proposed The instructions here are for developing and testing the EMM functionality only. Consult the main README for general usages. -**Note:** This implementation is based on the current Linux kernel implementation posted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v1_plus_rwx), which has not been finalized and upstreamed. As the kernel interfaces evolve, the EMM implementation and/or interface may change. +**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1644274683.git.reinette.chatre@intel.com/). Please refer to the cover letter of the series for changes between versions. + +This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v2_plus_rwx), which includes a temporary patch to allow RWX pages. As the kernel interfaces evolve, the EMM implementation and/or interface may change. Prerequisites ------------------------------- @@ -16,7 +18,7 @@ On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki. ``` $ git clone https://github.com/rchatre/linux.git $ cd linux -$ git checkout sgx/sgx2_submitted_v1_plus_rwx +$ git checkout sgx/sgx2_submitted_v2_plus_rwx ``` - For step 6, modify .config to set "CONFIG_X86_SGX=y". @@ -41,7 +43,7 @@ Build and Install SDK and PSW ``` $ git clone https://github.com/intel/linux-sgx.git $repo_root $ cd $repo_root -$ git checkout edmm_v1 +$ git checkout edmm_v2 ``` Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. From 1398ae3ca8b9f4cf28ea0d382775420a708c3120 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 7 Mar 2022 14:03:39 -0800 Subject: [PATCH 09/96] urts: check if /dev is mounted with noexec Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/urts/linux/edmm_utility.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/psw/urts/linux/edmm_utility.cpp b/psw/urts/linux/edmm_utility.cpp index 8ce644a4c..c8569f78a 100644 --- a/psw/urts/linux/edmm_utility.cpp +++ b/psw/urts/linux/edmm_utility.cpp @@ -34,6 +34,7 @@ #include "se_trace.h" #include "isgx_user.h" #include "cpuid.h" +#include "arch.h" #include #include #include @@ -44,7 +45,7 @@ #include #define SGX_URTS_CMD "for f in $(find /usr/$(basename $(gcc -print-multi-os-directory)) -name 'libsgx_urts.so' 2> /dev/null); do strings $f|grep 'SGX_URTS_VERSION_2'; done" #define SGX_CPUID 0x12 - +#include /* is_urts_support_edmm() * Parameters: * None. @@ -126,6 +127,20 @@ bool get_driver_type(int *driver_type) else { sgx_driver_type = SGX_DRIVER_IN_KERNEL; + +#define ERR_LOG \ +"mmap() failed for PROT_EXEC|PROT_READ.\n" \ +" Was /dev mounted with noexec set?\n" \ +" If so, remount it with exec: sudo mount -o remount,exec /dev\n" + + void* ptr = mmap(NULL, SE_PAGE_SIZE, PROT_READ|PROT_EXEC, MAP_SHARED, hdev, 0); + if (ptr == (void *)-1) { + SE_PROD_LOG(ERR_LOG); + close(hdev); + return false; + } + munmap(ptr, SE_PAGE_SIZE); + } close(hdev); From 3dda98ba815915480b3b18e085fce875d113dc60 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 7 Mar 2022 17:58:55 -0800 Subject: [PATCH 10/96] emm: add design doc and update README Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/README.md | 23 +- .../design_docs/SGX_EDMM_driver_interface.md | 296 ++++++++ sdk/emm/design_docs/SGX_EMM.md | 697 ++++++++++++++++++ .../design_docs/images/SGX2_alloc_direct.svg | 213 ++++++ sdk/emm/design_docs/images/SGX2_alloc_pf.svg | 220 ++++++ sdk/emm/design_docs/images/SGX2_eaccept.svg | 301 ++++++++ sdk/emm/design_docs/images/SGX2_eaccept2.svg | 278 +++++++ sdk/emm/design_docs/images/SGX2_perms.svg | 227 ++++++ sdk/emm/design_docs/images/SGX2_tcs.svg | 216 ++++++ sdk/emm/design_docs/images/SGX2_trim.svg | 250 +++++++ 10 files changed, 2717 insertions(+), 4 deletions(-) create mode 100644 sdk/emm/design_docs/SGX_EDMM_driver_interface.md create mode 100644 sdk/emm/design_docs/SGX_EMM.md create mode 100644 sdk/emm/design_docs/images/SGX2_alloc_direct.svg create mode 100644 sdk/emm/design_docs/images/SGX2_alloc_pf.svg create mode 100644 sdk/emm/design_docs/images/SGX2_eaccept.svg create mode 100644 sdk/emm/design_docs/images/SGX2_eaccept2.svg create mode 100644 sdk/emm/design_docs/images/SGX2_perms.svg create mode 100644 sdk/emm/design_docs/images/SGX2_tcs.svg create mode 100644 sdk/emm/design_docs/images/SGX2_trim.svg diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 64f4329df..7894d2ab2 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -1,12 +1,27 @@ Introduction --------------------------------- -This directory contains an implementation of the Enclave Memory Manager proposed in [this PR](https://github.com/openenclave/openenclave/pull/3991) +This directory contains an implementation of the Enclave Memory Manager proposed in [this design doc](design_docs/SGX_EMM.md). -The instructions here are for developing and testing the EMM functionality only. Consult the main README for general usages. +Its public APIs as defined in [sgx_mm.h](include/sgx_mm.h) are intended to encapsulate low level details +of managing the basic EDMM flows for dynamically allocating/deallocating EPC pages, changing EPC page +permissions and page types. -**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1644274683.git.reinette.chatre@intel.com/). Please refer to the cover letter of the series for changes between versions. +The typical target users of these APIs are intermediate level components in SGX runtimes: heap, stack managers +with dynamic expansion capabilities, mmap/mprotect/pthread API implementations for enclaves, dynamic code +loader and JIT compilers,etc. + +This implementation aims to be reusable in any SGX runtime that provides a minimal C runtime (malloc required) and +implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](include/sgx_mm_rt_abstraction.h). -This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v2_plus_rwx), which includes a temporary patch to allow RWX pages. As the kernel interfaces evolve, the EMM implementation and/or interface may change. +The instructions here are for developing and testing the EMM functionality only. +Consult the main README of this repo for general usages. + +**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1644274683.git.reinette.chatre@intel.com/). +Please refer to the cover letter of the series for changes between versions. + +This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v2_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. + +As the kernel interfaces evolve, this EMM implementation and/or interface may change. However, the goal is to minimize the EMM public API changes so that impact to upper layer implementations are minimized. Prerequisites ------------------------------- diff --git a/sdk/emm/design_docs/SGX_EDMM_driver_interface.md b/sdk/emm/design_docs/SGX_EDMM_driver_interface.md new file mode 100644 index 000000000..5c67d59d7 --- /dev/null +++ b/sdk/emm/design_docs/SGX_EDMM_driver_interface.md @@ -0,0 +1,296 @@ +SGX EDMM Linux Driver Interface Design +===================================== + +## Motivation + +This document describes possible Linux driver interfaces to facilitate discussions among SGX runtime implementors (e.g., https://github.com/openenclave/openenclave/pull/3639) on supporting different SGX EDMM flows. + +Although interfaces described here are inspired to be as likely as possible a candidate for future Linux kernel adoption, they are not intended to be a proposal for kernel implementation and are assumed to be implemented as an OOT driver. We hope from discussions enabled by this document, requirements and usage models can be identified to help shape future kernel interfaces. + +Without losing generality, this document may describe how upper layer user space components would use the interfaces. However, details of design and implementation of those components are intentionally left out. The PR mentioned above would provide more contexts on other user space components and their relationships. Further, for those who may want to learn basic principles behind Intel(R) SGX EDMM instructions and how they are typically used, please refer to following references: +- [HASP@ISCA 2016: 11:1-11:9](https://caslab.csl.yale.edu/workshops/hasp2016/HASP16-17.pdf) +- [Intel SDM Vol.4, Ch.36-42](https://software.intel.com/content/www/us/en/develop/articles/intel-sdm.html) + +For design and implementation of current SGX1 support in upstream Linux kernel (merged in 5.11RC), please refer to [this patch series](https://lwn.net/Articles/837121/) + +## Basic EDMM flows + +SGX EDMM instructions support dynamic EPC page allocation/deallocation for enclaves and page property modification post-EINIT. Following are the basic EDMM flows on which other more advanced usages of EDMM can be built. + +**Note:** This document is Linux specific. The term "kernel" and "kernel space" are used in this document when general Linux kernel space actions are described whether implemented in an OOT driver or in kernel tree. Kernel specific implementation details will be explicitly stated as "future kernel" or "kernel patches". And implementation details such as OCalls issued by enclaves, ETRACK and inter-processor interrupts (IPIs) issued in kernel are generally omitted for brevity. + +- Allocate a new page at an address in ELRANGE of an enclave. + - This can be an explicit syscall or triggered by a page fault (#PF) when an unavailable page is accessed. + - Kernel issues EAUG for the page. All new pages should have RW permissions initially. + - The enclave then issues EACCEPT. +- Deallocate an existing page + - Enclave signals via a syscall to kernel that a page is no longer in use. + - Kernel issues EMODT to change page type to PT_TRIM + - The enclave issues EACCEPT + - Kernel issues EREMOVE on the page at appropriate time +- Change page type, for example, from PT_REG to PT_TCS or PT_TRIM. + - Enclave requests via a syscall to kernel to change type of a page from PT_REG to PT_TCS/PT_TRIM + - Kernel issues EMODT to change page type to PT_TCS/PT_TRIM + - The enclave issues EACCEPT +- Extend EPCM permissions of a page, e.g., R->RW/RX + - Enclave issues EMODPE for the page + - Enclave requests via a syscall that the kernel update the page table permissions to match. + - Kernel modifies permissions in PTE +- Reduce EPCM permissions of a page, e.g. RW/RX->R + - Enclave requests that the kernel restrict the permissions of an EPC page + - Kernel performs EMODPR, updates page tables to match the new EPCM permissions, + - Enclave issues EACCEPT + +**Note:** Flows related to CET support inside enclave will be considered as a future enhancement. + +Future kernel may extend mmap and mprotect syscalls to support SGX EDMM usages. But we can't add/change syscall interfaces from an out-of-tree driver. So, in this proposal for possible driver implementation, we reuse mmap for dynamic enclave memory mapping and expose a new IOCTL, sgx_enclave_mprotect, for enclave page modification. + +## mmap + +After enclave is initialized (EINIT IOCTL done), the standard Linux mmap syscall can be used to create a new mapping configured for dynamically allocating enclave memory using EAUG. Following comments are specific to SGX EDMM usages, please refer to [mmap man page](https://man7.org/linux/man-pages/man2/mmap.2.html) for generic definitions. + +### Remarks + +- To create a mapping for dynamic enclave memory allocation, mmap must be called with an open enclave file descriptor and with PROT_READ | PROT_WRITE for protection flags. + - Enclave must issue EACCEPT for the pages after mmap before it can modify the content of the pages and extend/reduce permissions in secure way. +- The offset in mmap parameter must be zero for enclaves. +- MAP_* flags must be MAP_SHARED | MAP_FIXED masked with optional flags: + - MAP_POPULATE: hint for kernel to EAUG pages as soon as possible. + - MAP_GROWSDOWN: used for stacks. The mapping will grow down to the next mapping. +- If and only if the address range are within the ELRANGE of the enclave associated with the file descriptor, the mapping will be created. However, user space should not expect EAUG be done by the mmap call. + - The kernel can choose EAUG pages immediately (likely for MAP_POPULATE), or EAUG pages upon page faults within the VMA, similar to how kernel would allocate regular memory. +- The kernel will assume the newly requested mapping is for dynamic allocation and initial permissions must be RW until user space request changes later. + +**Implementation Notes:** Current [SGX kernel patches](https://patchwork.kernel.org/project/intel-sgx/patch/20201112220135.165028-11-jarkko@kernel.org/) limit PTE permissions to the EPCM permissions given in SEC_INFO during EADD IOCTL calls. The dynamic allocation mappings should not be subject to those limits. A possible implementation may have these changes: + - sgx_encl_may_map + - enforces RW permissions for pages other than those loaded due to EADD or ECREATE. + - set up flags to track dynamic pages: type, permissions flag + - sgx_vma_mprotect + - Allow permissions changes to dynamic pages within limitations of OS policies, e.g., + - never allow WX + - SELinux policy specific to SGX enclaves + - update flags for the dynamic pages + +## munmap +Calling munmap on an enclave page (dynamic allocated or not) has exactly the same effect of calling munmap on a regular RAM page. No sgx specific interface is needed. No behavior changes to current kernel space implementation. + +### Remarks + +- Enclave memory mapings are shared (MAP_SHARED). The mappings in shared processes are kept alive and independently until the process exits + - munmap and closing file descriptors are not required for user space. A dead process automatically releases all mappings and file descriptors. +- Upon all enclave mappings are removed and file handles to the enclave are closed, either by explicit munmap/fclose syscalls or when all hosting apps exited: + - The kernel may mark its remaining pages are reclaimable and issue EREMOVE on them any time the kernel deems appropriate. + +## mprotect IOCTL +This IOCTL emulates the mprotect syscall with SGX specific extensions. In future kernel implementation, it could be mprotect or pkey_mprotect syscall with sgx extensions for the "prot" parameter. + +``` +#define SGX_IOC_ENCLAVE_MPROTECT _IOW(SGX_MAGIC, 0x06, struct sgx_enclave_mprotect) +/** + * struct sgx_enclave_mprotect - parameter structure for the + * %SGX_IOC_ENCLAVE_MPROTECT ioctl + * @addr: address of the memory to change protections + * @length: length of the area. This must be a multiple of the page size. + * @prot: this must be or'ed of following: + PROT_READ + PROT_WRITE + PROT_EXEC + PROT_TRIM (new): change the page type to PT_TRIM, implies RW. User space should immediately EACCEPT, and then call mprotect with PROT_NONE. + PROT_TCS (new): change the page type to PT_TCS + PROT_NONE: Signal the kernel EACCEPT is done for PT_TRIM pages. Kernel can EREMOVE the pages at a time it deems appropriate. + */ +struct sgx_enclave_mprotect { + __u64 addr; + __u64 length; + __u64 prot; +}; +``` + +### Remarks + +Kernel should ensure that SGX instructions can succeed or catch and handle any fault. + - The kernel may maintain EPCM information on each page which includes access permission RWX, page types of PT_REG, PT_TRIM, PT_TCS. + - The kernel should EREMOVE pages of PT_TRIM only after user space signals kernel EACCEPT is done with mprotect(...,PROT_NONE,...). This is because EACCEPT may cause undesired #PF if the target page is already EREMOVED. + - The kernel catches fault on EMODPR, EMODT and converts to error code returned to user space. + +The enclave run-time (or trusted run-time) may implement a parallel memory management structure which would provide information to the enclave on the enclave memory mappings. The run-time can have a trusted API analogous to mmap which makes a call out of the enclave to issue the mmap and then either perform EACCEPT on the pages and update the internal memory structures or configure the enclave to perform the EACCEPT when a #PF is delivered to the enclave. With the outlined kernel interface, either implementation is possible. +## Sequence Diagrams for Basic Flows + +### Direct Allocation with MAP_POPULATE + +![SGX2 direct allocation flow](images/SGX2_alloc_direct.svg) + +### \#PF Based Allocation + +![SGX2 #PF based allocation flow](images/SGX2_alloc_pf.svg) + +### EPC Deallocation + +![SGX2 deallocation flow](images/SGX2_trim.svg) + +### Permission Changes + +![SGX2 permissions change flow](images/SGX2_perms.svg) + +**Notes:** +- EACCEPT is needed for enclave to ensure untrusted runtime and OS indeed invoke EMODPR and EPCM permissions are set as expected. +- It is assumed that both OS and enclave keep track of page permissions. However, it is possible for enclave to avoid that with implementation like this: + +``` +//Change page permission to perms_target in EPCM without remember previous permissions. +//The tradeoff here is possibly more ocall, emodpr, emodpe, eaccept than necessary. +trusted_mprotect(..., perms_target, ...){ + ocall_mprotect(..., perms_target, ...); //expect EPCM.perms<=perms_target + emodpe(..., perms_target, ...); //expect EPCM.perms>=perms_target + eaccept(..., perms_target); //verify EPCM.perms==perms_target + assert( ZF == 0); +} +``` + + + + +### TCS Allocation + +![SGX2 TCS allocation flow](images/SGX2_tcs.svg) + + +## Example advanced flows + +More advanced flows can be implemented as combinations of the basic flows. Here we present a few examples. + +### Dynamic code loading + +To load dynamic code after EINIT, the enclave has to have verify the code to be trustworthy. The mechanism +for an enclave to establish trustworthiness of the new code is out-of-scope of this document. + +Assuming a new code page is verified to be trusted and stored at an existing enclave page, then there could +be many ways for an enclave to load the trusted code to a new executable page. For example, in SGX1 environment +without EDMM support, the enclave can reserve RWX regions and load trusted code directly into those regions. +It is straightforward but not flexible or efficient use of EPC. Additionally the requirement of a RWX region +goes against security policies of not running code in writable pages. With EDMM, the EACCEPTCOPY instruction +allows an enclave copy code to a pending page and reset EPCM permissions to RX at the same time, thus provides +a more robust and flexible way to load trusted code without those pitfalls. + +Following are two example sequences in which a dynamic code page is loaded using EACCEPTCOPY on demand when +the code page is read for execution at the first time. + +**Dynamic loading with direct allocation** + +![SGX2 EACCEPTPY flow-direct EAUG](images/SGX2_eaccept2.svg) + +**Dynamic loading with #PF based allocation** + +![SGX2 EACCEPTPY flow](images/SGX2_eaccept.svg) + +In the sequences above it is assumed that the enclave would load a code page only if it is being executed the +first time. This is to minimize the EPC usage. An enclave could also choose to EACCEPTCOPY to preload the +code ahead of time. In that case, the sequence would be as follows in case of direct allocation. +1. Enclave calls mmap to configure a region in enclave ELRANGE for EAUG +2. Kernel EAUG all pages requested. +3. Enclave EACCEPTCOPYs trusted code from an existing EPC page to the target page, which sets RX permissions in EPCM specified in PageInfo operand. +4. Enclave makes ocall which invokes mprotect syscall to change PTE permissions from RW to RX + +### Lazy dynamic stack expansion +An enclave can lazily expand its stacks as follows. +1. Enclave calls mmap with MAP_GROWSDOWN for a stack region in enclave ELRANGE +2. At some time later, enclave pushes to the top of the stack where no EPC page is populated yet, this results in #PF, causing enclave AEX. +3. Kernel determines faulting address is in a stack region in enclave, EAUGs a page, invokes user space handler via vDSO. +4. The user space handler delivers it into enclave exception handler +5. The enclave exception handler checks the faulting address against its record, determines the fault has happened in a stack area not yet EACCEPT'ed. +6. Enclave issues EACCEPT, returns to untrusted user handler of the hosting process, which returns to kernel fault handler. +7. Kernel fault handler returns to enclave AEX address at which an ERESUME instruction is stored +8. Enclave resumed and the original push instruction is retried and succeeds. + +## Exception Handling + +This section focuses on changes around \#PF handling which is affected by the new page states (i.e. states in EPCM) introduced by SGX/EDMM, along with the mechanisms for handling exceptions in enclaves. + +An exception or interrupt during enclave execution will trigger an enclave exit, i.e., Asynchronous Enclave Exits (AEX). To protect the secrecy of the enclave, SGX CPU at AEX would save the state of certain registers within enclave memory, specifically, the thread's current State Save Area (SSA). Then it loads those registers with fixed values called synthetic state, of which the RIP (Instruction Pointer Register) is always set to the AEP (Asynchronous Exit Pointer) address. The AEP is passed in as an operand for the EENTER instruction and points to a trampoline code sequence which ualtimately invokes the ERESUME instruction to reenter the enclave. + +As with all non-enclave exception scenarios, the kernel fault handler registered in the Interrupt Descriptor Table (IDT) would be the first in line to handle exceptions for AEX, and it needs to either handle it in kernel space, or if it can't handle, invoke user space exception handler. In both cases, after handlers return, control is tranferred to AEP trampoline, which enventually invokes ERESUME to reenter enclave. + +Current kernel implementation (in release 5.11) can invoke user space exception handler in two ways depending on how EENTER and AEP trampoline are managed: + + 1. Direct EENTER in runtime: the user space runtime manages EENTER, AEP trampoline directly and use Linux signal APIs to register and handle exceptions. + 2. vDSO interface: the user space invokes [__vdso_sgx_enter_enclave](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124), passing in a callback for exception handling, and the vDSO implementation manages EENTER and AEP trampoline. + + The direct EENTER method requires signal handling in runtime library which is known to be challenging in Linux environment. Therefore, the vDSO interface is preferred and assumed in following discussion. (Runtime implementing direct EENTER method would have similar flow but the callbacks from vDSO are replaced with Linux signals.) For more details about the new SGX vDSO interface please refer to documentation in the [kernel header file](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124). General sequence is as follows: + + 1. User space initializes an sgx_enclave_run struct, run = {..., TCS, sgx_enclave_user_handler, ...} + 2. User space calls __vdso_sgx_enter_enclave (..., EENTER, run, ...) + 3. vDSO invokes EENTER (TCS, vDSO AEP) to enter the enclave, the vDSO AEP points to an ERESUME instruction in vDSO code. + 4. If the enclave finishes sucessfully, and EEXITs, the vDSO sets run.function = EEXIT, goto step 7. + 5. In an event of AEX, the kernel handles the fault if possible, e.g. EAUG on #PF, returns to vDSO AEP. + 6. Otherwise, the kernel dispatches the fault to vDSO (via an entry in exception fix-up table), which copies exception info to run.exception_vector, run.exception_error_code, run.exception_addr, last seen ENCLU leaf in RAX (ERESUME) to run.function + 7. vDSO invokes sgx_enclave_user_handler(..., run) + 8. The sgx_enclave_user_handler process enclave exit event: + * If run.function == EENTER, error case, return negative to fail the last __vdso_sgx_enter_enclave call. User space should treat as if enclave loses EPC context due to power envents or other causes assuming no bugs in code, and try to reload the enclave. + * If run.function == EEXIT, return 0 for normal enclave ecall return, return EENTER after invoking proper ocall with runtime specific convention. + * If run.function == ERESUME, invokes calls __vdso_sgx_enter_enclave (..., EENTER, run2, ...) to handle exception inside enclave, then return ERESUME. + 9. The vDSO returns to caller if the user handler's return is not EENTER or ERESUME, otherwise use ERESUME or EENTER accordingly to reenter the enclave. + + +### Fault Handling in Kernel + +SGX enclave execution may cause “EPCM Induced #PF”. For those #PFs, SGX enabled CPUs set the SGX bit (bit 15) in Page Fault Error Code (PFEC). It is always generated in the PFEC register if the fault is due to an EPCM attribute mismatch. The kernel #PF handler will only see the faulting address (via CR3) and the PFEC codes on a page fault. It must rely on this information and its own stored information about the address of the fault (VMA and PTE) to make a decision on how to handle the fault. In many cases, the kernel can only issue a signal or call user handler callback registered in the SGX vDSO function with run.function=ERESUME and pass on all relevant exception info. + +In addition, a running enclave can lose EPC context due to power events (S3/S4 transitions) or VM being suspended. A page fault on EENTER instruction (either at an initial ecall or at re-entering enclave for exception handling) results in those cases, and the user handler would receive a callback from vDSO with run.function = EENTER. + +This table summarizes kernel, vDSO, user handler actions in different fault scenarios related to enclave operations. All exceptions considered here happen inside enclave causing AEX, or at EENTER/ERESUME, so the kernel will convert them to the synchronous callbacks thru vDSO interface as needed. + +| Fault Condition | Key #PF PFEC Contents | Kernel/vDSO Action | Untrusted User Handler | +|---|---|---|---| +| Access a page which has been swapped out | #PF where PFEC.P=0 | ELD the page from backing store, ERSUME | N/A | +| Access Page Mapped PROT_NONE
(page that the enclave has not mmap'ed) | #PF where PFEC.P=0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Access Page Mapped PROT_W (Page had been mmap'ed by enclave, but not EAUG'ed) | #PF where PFEC.P=0 | EAUG and map the page then ERESUME | N/A | +| Page Protection mismatch in PTE| #PF where PFEC.W/R or PFEC.I/D will not match PTE | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Page Protection mismatch in EPCM| #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Access Page with EPCM.Pending | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Access Page with EPCM.Modified | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Access Page with type PT_TRIM | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| EENTER with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=EENTER) | return error to app signaling enclave lost
App should reload enclave | +| ERESUME with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler
and will trigger #PF on EENTER | + +**Note:** When an error/exception happens when kernel handles a fault on behalf of an enclave, the kernel sees the original fault happened at AEP and would fix it up as a callback to user handler with run.function = ERESUME. For example, in the first case of the table above, a fault on ELD (EPC loss caused by power events) would be fixed up in this way. + +# Enclave Handling of Faults + +Once an exception is passed into an enclave, the enclave has to rely on trusted info stored in the active SSA by CPU during AEX to make right decisions in handling the exception. It should not rely on any info passed in from the untrusted side. To gain access to fault related info in SSA, an enclave configured to use SGX2 EDMM features should also configure the SECS.MISCSELECT to report EXIINFO in the State Save Area frame on a #PF or a General Protection (#GP) Fault. This will ensure that the enclave has the following information in the SSA frame on a #PF: +* ExitInfo.Vector = #PF identifies that a #PF caused an asynchronous exit +* MISC.EXINFO.MADDR = the linear address that page faulted (analogous to CR2) +* MISC.EXINFO.ERRCD = Page-Fault Error Code - information about the page fault + +To securely handle all faulting scenarios and EDMM flows, in addition to information stored in SSA, the enclave should store information about its own memory configuration and relevant states. This can be an array or table of structures storing information about each mapped region of enclave memory. The information that the enclave should store includes: +* Address Range: Range of Enclave Linear Addresses that are covered by the region +* Permissions: Combination of Read, Write, Execute +* Page Type: SGX page type of pages in the region - PT_TCS, PT_REG, or PT_TRIM +* State: the state of the region. The state may indicate that the region is in transition. For example is is changing page type or permissions. +* Table of information about the EACCEPT state of each page in the region. This may be a temporary structure which keeps track of pages which are EACCEPTed for operations requiring EACCEPT. This can ensure that the enclave does not EACCEPT a page twice. For example, when a page is EAUG'ed to an enclave linear address, the enclave should only EACCEPT that page once. If the enclave could be convinced to EACCEPT the page twice, then the OS can potentially EAUG two pages at the same enclave linear address and freely swap them by modifying PTEs. + +Enclaves should prevent two threads from simultaneously operating on the same region, e.g, trying to EMODPE on a page while permission change is in progress in another thread. One way to ensure this is to use some lock/synchronization mechanism to protect the state of each region, have the second thread wait if page is in transition state. + +When an enclave is called after faulting, the enclave can consult its stored memory region states and the ExitInfo.Vector and MISC.EXINFO in SSA to determine what to do with the fault. The following table lists actions on specific page faults. + +| EXITINFO/MISC.EXINFO
Information | State of Region | Cause of Fault | Enclave Action | +|---|---|---|---| +| ERRCD(PFEC).P=0 | n/a | Enclave has accessed an unallocated memory region|Call exception handlers, abort if not handled | +| ERRCD(PFEC).W/R or I/D does not match protections | Not In-Transition | Enclave has incorrectly accessed memory region|Call exception handlers, abort if not handled | +| ERRCD(PFEC).W/R or I/D does not match protections | In-Transition | Enclave has incorrectly accessed memory region which may be changing protections|If future protections will allow access then pend on Lock/Mutex for region, else call exception handlers, abort if not handled | +| ERRCD(PFEC).SGX=1 | Not In-Transition | Error in run-time or kernel | Depending on run-time design, the enclave should not encounter this. | +| ERRCD(PFEC).SGX=1 | In-Transition | Page is being accessed during transition | If future protections/page-type will allow access then pend on Lock/Mutex for region, else call exception handlers, abort if not handled | + +## Debugger Support + +The EDMM flows do not affect how debugger read/write enclave memory by EDBGRD/EDBGWR. However, the DBGOPTIN (i.e. bit 0) of TCS.FLAGS must be set to enable hardware breakpoints and allow single-stepping inside the enclave thread entered with that TCS. Therefore, for TCS pages added dynamically using EDMM instructions, debuggers are required to trap TCS creation flow in order to set DBGOPTIN. For GDB on Linux, the debugger scripts can collaborate with runtime to set DBGOPTIN. One possible implementation could be described as below. +- Whenever a new TCS page is ready to use, the runtime invokes a special empty function and passes the TCS address as its argument. +- GDB startup script sets a breakpoint on that empty function to receive the debug interrupt. +- Once the breakpoint has been hit, GDB script extracts the address of the TCS page, and sets DBGOPTIN for that TCS. +- GDB resumes the interrupted application. +- From now on, hardware breakpoints and single stepping are allowed inside enclave threads entered with the newly created TCS + +For Windows, similar collaboration between debugger and runtime can be implemented using exceptions instead of breakpoints on empty functions. + +For runtimes using EDDM to load dynamic modules into enclave after EINIT, the runtime needs to signal module loading events to the debugger so that the debugger can load additional symbols for those modules. That can also be implemented using exceptions or pre-defined breakpoints. + +**Note:** Kernel does not fixup Debug Exceptions (#DB) and Breakpoints (#BP). diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md new file mode 100644 index 000000000..8c7c44a3c --- /dev/null +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -0,0 +1,697 @@ +SGX Enclave Memory Manager +================================= + +## Motivation ## + +An enclave's memory is backed by a special reserved region in RAM, called +Enclave Page Cache (EPC). Enclave memory management tasks include +allocating/reserving virtual address ranges, committing physical EPC pages, +changing EPC page permissions or page types, and removing EPC pages. +Those tasks require collaboration between the trusted runtime, the untrusted +runtime, and the OS. The SGX enclave memory manager (EMM) serves as a central +component in the enclave trusted runtime that abstracts the interaction with +the untrusted runtime for all memory management flows and provides APIs for +its clients to reserve virtual address ranges, commit EPC memory to the reserved +address ranges, and modify attributes of the reserved/committed pages. + +For details of specific memory management related flows, please refer to +[the SGX EDMM driver API spec](SGX_EDMM_driver_interface.md). +The public EMM APIs defined here are most likely invoked by some intermediate +runtime level components for specific usages, such as dynamic heap/stack, mmap, +mprotect, higher level language JIT compiler, etc. + +**Note:** As the EMM is a component inside enclave, it should not have direct OS dependencies. +However, the design proposed in this document only considers call flows and semantics for Linux. + +## User Experience ## + +**Runtime Abstraction** + +To make the EMM implementation portable across different SGX enclave runtimes, e.g., the Open Enclave and Intel SGX SDKs, +this document also proposes a set of abstraction layer APIs for the runtimes to implement. The runtime abstraction +layer APIs encapsulate runtime specific support such as making OCalls, registering callbacks on page faults, on which +the EMM implementation relies to collaborate with the OS. + +The EMM source code will be hosted and maintained in the [Intel SGX PSW and SDK repository](https://github.com/intel/linux-sgx). +The EMM can be built as a separate library then linked into any runtime that implements the abstraction layer APIs. + +**Allocate, Deallocate Enclave Memory** + +The EMM provides an API, sgx_mm_alloc, for its clients to request enclave memory +allocations. An enclave memory allocation represents both a reserved virtual +address range and a commitment of EPC pages. EPC pages are committed for +enclaves via special SGX instructions: loaded by EADD/EEXTEND before EINIT +or dynamically added using EAUG followed by EACCEPT. + +The sgx_mm_alloc API allows clients to specify one of three committing modes +for an allocation: +- SGX_EMA_RESERVE, only the virtual address range is reserved. No EPC pages will +be committed in this mode. +- SGX_EMA_COMMIT_NOW: reserves and commits physical EPC upon allocation. +EACCEPT will be done immediately on SGX2 platforms. +- SGX_EMA_COMMIT_ON_DEMAND: EACCEPT is done on demand, see below on committing +and uncommitting. + +An allocation, once created, will own its address range until the deallocation +API, sgx_mm_dealloc, is called upon. No two active allocations can have +overlapping address ranges. + +**Commit, Uncommit Enclave Memory** + +When a page in COMMIT_ON_DEMAND allocations is accessed, a page fault occurs if +the page was not yet committed. The EMM will perform EACCEPT to commit the EPC +page on page fault after OS doing EAUG. + +The clients can also call the EMM commit API, sgx_mm_commit, to proactively +commit specific sub-regions in a COMMIT_ON_DEMAND allocation to avoid +future page fault. + +Some EMM clients, e.g., a dynamic code loader wishing to load code on +page faults, can register a custom handler for page faults at the time of +allocation request. In the custom page fault handler, it can invoke an API, +sgx_mm_commit_data, to commit and load data to newly committed EPC page at +the same time as supported by EACCEPTCOPY. + +Committed pages will stay committed (regardless how they were committed) until +the clients calls the uncommit API, sgx_mm_uncommit, on them or the allocation +they belong to is deallocated by sgx_mm_dealloc. + +**Modify Page Attributes** + +The EMM clients may call sgx_mm_modify_permissions/sgx_mm_modify_type to request permissions +or page type changes for pages in existing allocations. + +## Notes on Internal Design ## + +The enclave memory manager keeps track of memory allocation and layout info inside +enclave address range (ELRANGE) using an internal structure called the Enclave Memory +Area (EMA) List. The EMA and the EMA list are considered private data structures of the memory +manager, and their internals are not exposed in client-facing APIs. +- The EMA list tracks all memory regions in use (reserved, committed, +commit-on-demand) in ELRANGE. +- Ranges in ELRANGE not tracked by an EMA are considered free and ready for new allocations. +- The EMM labels certain EMAs reserved for runtime or its internal usage and make them +not accessible from public APIs. +- A thread calling an EMM API on an EMA with an operation pending in another thread will wait +until the pending operation is finished. + +**Assumptions:** + +- When an enclave is loaded, the OS reserves the whole address range covered by ELRANGE. +It is assumed the host app will not remap any part of this reserved range. +- When an enclave is loaded with base address at zero, only a partial ELRANGE may be + reserved by the OS. In that case, the EMM will assume the partial ELRANGE as a valid reserved + range for use inside the enclave. + - The runtime can setup the partial valid range in ELRANGE by marking the unusable range up front + as SGX_EMA_RESERVE using the EMM private EMA_allocate API. +- The memory manager does not check EPC pressure, or proactively trim pages when EPC runs low. +The OS can reclaim EPC pages when EPC running low or cgroups threshold reached +- The memory manager does not maintain and recycle committed then freed pages + - Whenever a page is freed (via dealloc or uncommit API), it is trimmed from the enclave + and needs to be re-allocated and committed before re-use. + - The owner of a region can re-purpose a sub-region of it by calling sgx_mm_modify_type/permissions + to split out the sub-region to be reused. +- The memory manager does not call back into the client for #GP handling. Memory manager code will ensure that +itself would not cause #GP, and only register a #PF handler with the enclave global exception +handler registry through the runtime abstraction layer. A client wishing to handle #GP can register +its own exception handler with the global handler registry. +- The memory manager is implemented on SGX2 platforms only. + +Public APIs +----------------- + +### sgx_mm_alloc + +Allocate a new memory region inside enclave and optionally register a custom page fault handler +for the region + +``` +/** + * Page fault (#PF) info reported in the SGX SSA MISC region. + */ +typedef struct _sgx_pfinfo +{ + uint64_t maddr; // address for #PF. + union _pfec + { + uint32_t errcd; + struct + { // PFEC bits. + uint32_t p : 1; // P flag. + uint32_t rw : 1; // RW access flag, 0 for read, 1 for write. + uint32_t : 13; // U/S, I/O, PK and reserved bits not relevant for SGX PF. + uint32_t sgx : 1; // SGX bit. + uint32_t : 16; // reserved bits. + }; + } pfec; + uint32_t reserved; +} sgx_pfinfo; + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should continue searching for the next handler. + */ +#define SGX_MM_EXCEPTION_CONTINUE_SEARCH 0 + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should stop searching and continue execution. + */ +#define SGX_MM_EXCEPTION_CONTINUE_EXECUTION -1 + + +/* + * Custom page fault (#PF) handler, do usage specific processing upon #PF, + * e.g., loading data and verify its trustworthiness, then call sgx_mm_commit_data + * to explicitly EACCEPTCOPY data. + * This custom handler is passed into sgx_mm_alloc, and associated with the + * newly allocated region. The memory manager calls the handler when a #PF + * happens in the associated region. The handler may invoke abort() if it + * determines the exception is invalid based on certain internal states + * it maintains. + * + * @param[in] pfinfo info reported in the SSA MISC region for page fault. + * @param[in] private_data private data provided by handler in sgx_mm_alloc call. + * @retval SGX_MM_EXCEPTION_CONTINUE_EXECUTION Success on handling the exception. + * @retval SGX_MM_EXCEPTION_CONTINUE_SEARCH Exception not handled and should be passed to + * some other handler. + * + */ +typedef int (*sgx_enclave_fault_handler_t)(const sgx_pfinfo *pfinfo, void *private_data); + +/* bit 0 - 7 are allocation flags. */ +#define SGX_EMA_ALLOC_FLAGS_SHIFT 0 +#define SGX_EMA_ALLOC_FLAGS(n) (((unsigned int)(n) << SGX_EMA_ALLOC_FLAGS_SHIFT)) +#define SGX_EMA_ALLOC_FLAGS_MASK SGX_EMA_ALLOC_FLAGS(0xFF) + +/* Only reserve an address range, no physical memory committed.*/ +#define SGX_EMA_RESERVE SGX_EMA_ALLOC_FLAGS(1) + +/* Reserve an address range and commit physical memory. */ +#define SGX_EMA_COMMIT_NOW SGX_EMA_ALLOC_FLAGS(2) + +/* Reserve an address range and commit physical memory on demand.*/ +#define SGX_EMA_COMMIT_ON_DEMAND SGX_EMA_ALLOC_FLAGS(4) + +/* Always commit pages from higher to lower addresses, + * no gaps in addresses above the last committed. + */ +#define SGX_EMA_GROWSDOWN SGX_EMA_ALLOC_FLAGS(0x10) + +/* Always commit pages from lower to higher addresses, + * no gaps in addresses below the last committed. +*/ +#define SGX_EMA_GROWSUP SGX_EMA_ALLOC_FLAGS(0x20) + +/* Map addr must be exactly as requested. */ +#define SGX_EMA_FIXED SGX_EMA_ALLOC_FLAGS(0x40) + +/* bit 8 - 15 are page types. */ +#define SGX_EMA_PAGE_TYPE_SHIFT 8 +#define SGX_EMA_PAGE_TYPE(n) ((n) << SGX_EMA_PAGE_TYPE_SHIFT) +#define SGX_EMA_PAGE_TYPE_MASK SGX_EMA_PAGE_TYPE(0xFF) +#define SGX_EMA_PAGE_TYPE_TCS SGX_EMA_PAGE_TYPE(0x1) /* TCS page type. */ +#define SGX_EMA_PAGE_TYPE_REG SGX_EMA_PAGE_TYPE(0x2) /* regular page type, default if not specified. */ +#define SGX_EMA_PAGE_TYPE_TRIM SGX_EMA_PAGE_TYPE(0x4) /* TRIM page type. */ +#define SGX_EMA_PAGE_TYPE_SS_FIRST SGX_EMA_PAGE_TYPE(0x5) /* the first page in shadow stack. */ +#define SGX_EMA_PAGE_TYPE_SS_REST SGX_EMA_PAGE_TYPE(0x6) /* the rest pages in shadow stack. */ + +/* Use bit 24-31 for alignment masks. */ +#define SGX_EMA_ALIGNMENT_SHIFT 24 +/* + * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and + * < # bits in a pointer (32 or 64). + */ +#define SGX_EMA_ALIGNED(n) (((unsigned int)(n) << SGX_EMA_ALIGNMENT_SHIFT)) +#define SGX_EMA_ALIGNMENT_MASK SGX_EMA_ALIGNED(0xFFUL) +#define SGX_EMA_ALIGNMENT_64KB SGX_EMA_ALIGNED(16UL) +#define SGX_EMA_ALIGNMENT_16MB SGX_EMA_ALIGNED(24UL) +#define SGX_EMA_ALIGNMENT_4GB SGX_EMA_ALIGNED(32UL) + +/* Permissions flags */ +#define SGX_EMA_PROT_NONE 0x0 +#define SGX_EMA_PROT_READ 0x1 +#define SGX_EMA_PROT_WRITE 0x2 +#define SGX_EMA_PROT_EXEC 0x4 +#define SGX_EMA_PROT_READ_WRITE (SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE) +#define SGX_EMA_PROT_READ_EXEC (SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC) +#define SGX_EMA_PROT_READ_WRITE_EXEC (SGX_EMA_PROT_READ_WRITE|SGX_EMA_PROT_EXEC) +/* + * Allocate a new memory region in enclave address space (ELRANGE). + * @param[in] addr Starting address of the region, page aligned. If NULL is provided, + * then the function will select the starting address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, and page type. + * Flags should include exactly one of following for committing mode: + * - SGX_EMA_RESERVE: just reserve an address range, no EPC committed. + * To allocate memory on a reserved range, call this + * function again with SGX_EMA_COMMIT_ON_DEMAND or SGX_EMA_COMMIT_NOW. + * - SGX_EMA_COMMIT_NOW: reserves memory range and commit EPC pages. EAUG and + * EACCEPT are done on SGX2 platforms. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages + * are committed (EACCEPT) on demand upon #PF on SGX2 platforms. + * ORed with zero or one of the committing order flags for SGX2 platforms: + * - SGX_EMA_GROWSDOWN: always commit pages from higher to lower addresses, + * no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: always commit pages from lower to higher addresses, + * no gaps in addresses below the last committed. + * Optionally ORed with + * - SGX_EMA_FIXED: allocate at fixed address, will return error if the + * requested address is in use. + * - SGX_EMA_ALIGNED(n): Align the region on a requested boundary. + * Fail if a suitable region cannot be found, + * The argument n specifies the binary logarithm of + * the desired alignment and must be at least 12. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * + * @param[in] handler A custom handler for page faults in this region, NULL if + * no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be passed + * back when the handler is called. + * @param[out] out_addr Pointer to store the start address of allocated range. + * Set to valid address by the function on success, NULL otherwise. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is set. + * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in SGX_EMA_ALIGNED(n). + * @retval ENOMEM Out of memory, or no free space to satisfy alignment boundary. + */ +int sgx_mm_alloc(void *addr, size_t length, int flags, + sgx_enclave_fault_handler_t handler, void *handler_private, + void **out_addr); + +``` + +**Remarks:** +- Permissions of newly allocated regions are always SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE and of page + type SGX_EMA_PAGE_TYPE_REG, except for SGX_EMA_RESERVE mode regions which will have SGX_EMA_PROT_NONE. +- Once allocated by sgx_mm_alloc, a region will stay in the allocated state and become + deallocated once sgx_mm_dealloc is called. +- If sgx_mm_dealloc on a partial range of a previously allocated region, then the + region is split, and the freed range is deallocated. The remainder of the + region stays allocated. +- If all pages in the region are freed by sgx_mm_dealloc, then the whole region + is released, and the memory manager no longer tracks the region. + + +### sgx_mm_uncommit and sgx_mm_dealloc + +``` +/* + * Uncommit (trim) physical EPC pages in a previously committed range. + * The pages in the allocation are freed, but the address range is still reserved. + * @param[in] addr Page aligned start address of the region to be trimmed. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + */ +int sgx_mm_uncommit(void *addr, size_t length); + +/* + * Deallocate the address range. + * The pages in the allocation are freed and the address range is released for future allocation. + * @param[in] addr Page aligned start address of the region to be freed and released. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + */ +int sgx_mm_dealloc(void *addr, size_t length); + +``` + +### sgx_mm_modify_type, sgx_mm_modify_permissions + +``` +/* + * Change permissions of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] prot permissions bitwise OR of following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXEC: Pages may be executed. + * @retval 0 The operation was successful. + * @retval EACCES Original page type can not be changed to target type. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EPERM The request permissions are not allowed, e.g., by target page type or + * SELinux policy. + */ +int sgx_mm_modify_permissions(void *addr, size_t length, int prot); + +/* + * Change the page type of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] type page type, only SGX_EMA_PAGE_TYPE_TCS is supported. + * + * @retval 0 The operation was successful. + * @retval EACCES Original page type can not be changed to target type. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EPERM Target page type is no allowed by this API, e.g., PT_TRIM, + * PT_SS_FIRST, PT_SS_REST. + */ +int sgx_mm_modify_type(void *addr, size_t length, int type); + +``` +**Remarks:** +- The memory manager will track current permissions for each region, and can + determine whether new permissions require an OCall for EMODPR, e.g., RW<->RX, RW->R. +- These APIs should not be used to change EPC page type to PT_TRIM. Trimming pages + are done by sgx_mm_uncommit and sgx_mm_dealloc only. + + +### sgx_mm_commit + +``` + +/* + * Commit a partial or full range of memory allocated previously with SGX_EMA_COMMIT_ON_DEMAND. + * The API will return 0 if all pages in the requested range are successfully committed. + * Calling this API on pages already committed has no effect. + * @param[in] addr Page aligned starting address. + * @param[in] length Length of the region in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL Any requested page is not in any previously allocated regions, or + * outside the enclave address range. + * @retval EFAULT All other errors. + */ +int sgx_mm_commit(void *addr, size_t length); + +``` + +### sgx_mm_commit_data + +``` + +/* + * Load data into target pages within a region previously allocated by sgx_mm_alloc. + * This can be called to load data and set target permissions at the same time, + * e.g., dynamic code loading. The caller has verified data to be trusted and expected + * to be loaded to the target address range. Calling this API on pages already committed + * will fail. + * + * @param[in] addr Page aligned target starting addr. + * @param[in] length Length of data, in bytes of multiples of page size. + * @param[in] data Data of @length. + * @param[in] prot Target permissions. + * @retval 0 The operation was successful. + * @retval EINVAL Any page in requested address range is not previously allocated, or + * outside the enclave address range. + * @retval EPERM Any page in requested range is previously committed. + * @retval EPERM The target permissions are not allowed by OS security policy, + * e.g., SELinux rules. + */ +int sgx_mm_commit_data(void *addr, size_t length, uint8_t *data, int prot); + +``` +**Remarks:** +- The memory manager decides whether OCalls are needed to ask the OS to make Page Table Entry (PTE) +permissions changes. No separate sgx_mm_modify_permissions call is needed. + +Runtime Abstraction Layer +---------------------------------- + +To support and use the EMM, an SGX trusted runtime shall implement following +abstraction layer APIs. + +### Exception Handler Registration + +``` +/* + * The EMM page fault (#PF) handler. + * + * @param[in] pfinfo Info reported in the SSA MISC region for page fault. + * @retval SGX_EXCEPTION_CONTINUE_EXECUTION Success handling the exception. + * @retval SGX_EXCEPTION_CONTINUE_SEARCH The EMM does not handle the exception. + */ +typedef int (*sgx_mm_pfhandler_t)(const sgx_pfinfo *pfinfo); + +/* + * Register the EMM handler with the global exception handler registry + * The Runtime should ensure this handler is called first in case of + * a #PF before all other handlers. + * + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ +bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler); + +/* + * Unregister the EMM handler with the global exception handler registry. + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ +bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); + +``` + +### OCalls + +``` +/* + * Call OS to reserve region for EAUG, immediately or on-demand. + * + * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, committing + * order, address preference, page type. The untrusted side. + * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and + * translate following additional bits to proper parameters invoking mmap or other SGX specific + * syscall(s) provided by the kernel. + * The flags param of this interface should include exactly one of following for committing mode: + * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * kernel is given a hint to EAUG EPC pages for the area as soon as possible. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * ORed with zero or one of the committing order flags: + * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * to lower addresses, no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * to higher addresses, no gaps in addresses below the last committed. + * Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @retval 0 The operation was successful. + * @retval EINVAL Any parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + */ +int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); + +/* + * Call OS to change permissions, type, or notify EACCEPT done after TRIM. + * + * @param[in] addr Start address of the memory to change protections. + * @param[in] length Length of the area. This must be a multiple of the page size. + * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * Must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and TCS + * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: + * SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * range for trimmed pages may still be reserved by enclave with + * proper permissions. + * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS + * @retval 0 The operation was successful. + * @retval EINVAL A parameter passed in is not valid. + * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + */ + +int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_to); + +``` + +### Other Utilities + +``` +/* + * Define a mutex and create/lock/unlock/destroy functions. + */ +typedef struct _sgx_mm_mutex sgx_mm_mutex; +sgx_mm_mutex *sgx_mm_mutex_create(void); +int sgx_mm_mutex_lock(sgx_mm_mutex *mutex); +int sgx_mm_mutex_unlock(sgx_mm_mutex *mutex); +int sgx_mm_mutex_destroy(sgx_mm_mutex *mutex); + +/* + * Check whether the given buffer is strictly within the enclave. + * + * Check whether the buffer given by the **ptr** and **size** parameters is + * strictly within the enclave's memory. If so, return true. If any + * portion of the buffer lies outside the enclave's memory, return false. + * + * @param[in] ptr The pointer to the buffer. + * @param[in] size The size of the buffer. + * + * @retval true The buffer is strictly within the enclave. + * @retval false At least some part of the buffer is outside the enclave, or + * the arguments are invalid. For example, if **ptr** is null or **size** + * causes arithmetic operations to wrap. + * + */ +bool sgx_mm_is_within_enclave(const void *ptr, size_t size); + +``` + +### Support for EMM Initialization + +In addition to implement the abstraction layer APIs, a runtime shall provide +iniitial enclave memory layout information to the EMM during early +initialization phase of the enclave. +The memory manager must be initialized in the first ECALL (ECMD_INIT_ENCLAVE in +Intel SGX SDK) before any other clients can use it. Therefore, code and data +of the memory manager will be part of initial enclave image that are loaded +with EADD before EINIT, and as a part of the trusted runtime. + +The trusted runtime should enumerate all initial committed regions (code, +data, heap, stack, TCS, and SSA), and call the EMM internal APIs to set up +initial entries in the EMA list to track existing regions and mark some +of them as not modifiable by EMM public APIs. The runtime also ensures there is +enough reserved space on the heap for EMM to create the initial EMA list and +the entries. Once initialized, the memory manager can reserve its own +space for future expansion of the EMA list, and special EMAs to hold +EMA objects. To keep it simple, the expansion can be done eagerly: commit +more pages for EMA list once unused committed space in the EMA List Region +below certain threshold. + +Alternative option: At build time, the enclave signing tool can precalculate +and fill in EMA entries that hold info on initial regions to be committed by +EADD during enclave load. The calculated start addresses in these EMAs can be +relative to enclave secs->base. The runtime can patch those entries at +initialization by adding secs->base. The EMM can directly use those EMAs as +the initial entries of the EMA list. It only needs to reserve and commit +a number of additional pages for future EMA list expansion. + + +### EMM Private APIs for Trusted Runtimes +These private APIs can be used by the trusted runtime to reserve and allocate +regions not accessible from public APIs. They have the identical signature +as the public API counterparts and replace "sgx_mm_" prefix with "ema_" prefix. +The main difference is that the private ema_alloc allows an extra flag +SGX_EMA_SYSTEM passed in. + +``` + +#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80) /* EMA reserved by system */ + +/* + * Initialize an EMA. This can be used to setup EMAs to account regions that + * are loaded and initialized with EADD before EINIT. + * @param[in] addr Starting address of the region, page aligned. If NULL is provided, + * then the function will select the starting address. + * @param[in] size Size of the region in multiples of page size in bytes. + * @param[in] flags SGX_EMA_SYSTEM, or SGX_EMA_SYSTEM | SGX_EMA_RESERVE + * bitwise ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_TCS: TCS page. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] prot permissions, either SGX_EMA_PROT_NONE or a bitwise OR of following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXEC: Pages may be executed. + * @param[in] handler A custom handler for page faults in this region, NULL if + * no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be passed + * back when the handler is called. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use. + * @retval EINVAL Invalid page type, flags, or addr and length are not page aligned. + */ +int mm_init_ema(void *addr, size_t size, int flags, int prot, + sgx_enclave_fault_handler_t handler, + void *handler_private); +/** + * Same as sgx_mm_alloc, SGX_EMA_SYSTEM can be OR'ed with flags to indicate + * that the EMA can not be modified thru public APIs. + */ +int mm_alloc(void *addr, size_t size, uint32_t flags, + sgx_enclave_fault_handler_t handler, void *private_data, void** out_addr); +int mm_dealloc(void *addr, size_t size); +int mm_uncommit(void *addr, size_t size); +int mm_commit(void *addr, size_t size); +int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot); +int mm_modify_type(void *addr, size_t size, int type); +int mm_modify_permissions(void *addr, size_t size, int prot); + +``` + +Internal APIs and Structures +------------------------------------- + +The following are internal functions and structures to be used by the EMM implementation. +They can evolve over time, and are shown here for reference only. + +### Enclave Memory Area (EMA) struct + +Each enclave has a global doubly linked EMA list to keep track of all dynamically +allocated regions in enclave address space (ELRANGE). + +``` +typedef struct _ema_t { + size_t start_addr; // starting address, should be on a page boundary. + size_t size; // in bytes of multiples of page size. + uint32_t alloc_flags; // SGX_EMA_RESERVE, SGX_EMA_COMMIT_NOW, SGX_EMA_COMMIT_ON_DEMAND, + // OR'ed with SGX_EMA_SYSTEM, SGX_EMA_GROWSDOWN, ENA_GROWSUP. + uint64_t si_flags; // SGX_EMA_PROT_NONE, SGX_EMA_PROT_READ |{SGX_EMA_PROT_WRITE, SGX_EMA_PROT_EXEC}. + // Or'd with one of SGX_EMA_PAGE_TYPE_REG, SGX_EMA_PAGE_TYPE_TCS, SGX_EMA_PAGE_TYPE_TRIM. + ema_bit_array* eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for the page at start address. + // bit i in eaccept_map[j] for page at start_address+(i+j<<3)<<12. + sgx_mutex_t* lock; // lock to prevent concurrent modification. + int transition; // state to indicate whether a transition in progress, e.g page type/permission changes. + sgx_enclave_fault_handler_t + h; // custom PF handler (for EACCEPTCOPY use). + void* hprivate; // private data for handler. + _ema_t* next; // next in doubly linked list. + _ema_t* prev; // prev in doubly linked list. +} ema_t; + +``` + **Remarks:** + - Accesses to the list (find, insert, remove EMAs) are synchronized for thread-safety. + - Initial implementation will also have one lock per EMA to synchronize access and + modifications to the same EMA. We may optimize this as needed. + +### SGX primitives + +``` +typedef struct _sec_info_t +{ + uint64_t flags; + uint64_t reserved[7]; +} sec_info_t; + +// EACCEPT +int do_eaccept(const sec_info_t* si, size_t addr); +// EMODPE +int do_emodpe(const sec_info_t* si, size_t addr); +// EACCEPTCOPY +int do_eacceptcopy(const sec_info_t* si, size_t dest, size_t src); + +``` + + +Metadata, File format +--------------------------------------- + +The enclave metadata and file format are runtime specific. A detailed design is +out of scope of this document. + +It is required that the enclave file should include metadata of memory layout +of initial code and data (e.g., program headers and PT_LOAD segments in ELF +file), any reserved region for special purposes, e.g., minimal heap, stack, +TCS areas, SSAs for expected minimal number of threads, etc. The runtime +would read those info to populate the initial EMAs described in the section +above on [Support for EMM Initialization](#support-for-emm-initialization) diff --git a/sdk/emm/design_docs/images/SGX2_alloc_direct.svg b/sdk/emm/design_docs/images/SGX2_alloc_direct.svg new file mode 100644 index 000000000..2c5849022 --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_alloc_direct.svg @@ -0,0 +1,213 @@ + + + + + + + + + + + + + + + + + + + + + + Page-1 + + Sheet.77 + + Object lifeline.11 + CPU-sgx + + Sheet.12 + + + + Sheet.13 + + + + Sheet.14 + + + Sheet.15 + + + + + CPU-sgx + + + Message.50 + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_POPULA... + + + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_POPULATE |MAP_FIXED, fd) + + Object lifeline.24 + Enclave + + Sheet.25 + + + + Sheet.26 + + + + Sheet.27 + + + Sheet.28 + + + + + Enclave + + + Message.29 + OCALL_mmap + + + OCALL_mmap + + Message.30 + EAUG + + + EAUG + + Return Message.53 + address + + + address + + Message.33 + EACCEPT + + + EACCEPT + + Loop fragment + + + + + Sheet.59 + loop + + loop + + Sheet.60 + [for each page] + [for each page] + + + Loop fragment.61 + + + + + Sheet.62 + loop + + loop + + Sheet.63 + [for each page] + [for each page] + + + Self Message.52 + use the new pages + + + use the new pages + + Return Message.68 + + + + Interaction operand + [Kernel decides to EAUG pages now Otherwise this reduces to #... + + + [Kernel decides to EAUG pages now Otherwise this reduces to #PF based allocation flow] + + Object lifeline + Untrusted runtime + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Untrusted runtime + + + Object lifeline.6 + kernel + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + kernel + + + + diff --git a/sdk/emm/design_docs/images/SGX2_alloc_pf.svg b/sdk/emm/design_docs/images/SGX2_alloc_pf.svg new file mode 100644 index 000000000..a753a970f --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_alloc_pf.svg @@ -0,0 +1,220 @@ + + + + + + + + + + + + + + + + + + + + + + Page-1 + + Sheet.94 + + Loop fragment.61 + + + + + Sheet.62 + loop + + loop + + Sheet.63 + [for each page] + [for each page] + + + Object lifeline + Untrusted runtime + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Untrusted runtime + + + Object lifeline.6 + kernel + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + kernel + + + Object lifeline.11 + CPU-sgx + + Sheet.12 + + + + Sheet.13 + + + + Sheet.14 + + + Sheet.15 + + + + + CPU-sgx + + + Message.50 + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... + + + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) + + Object lifeline.24 + Enclave + + Sheet.25 + + + + Sheet.26 + + + + Sheet.27 + + + Sheet.28 + + + + + Enclave + + + Message.29 + OCALL_mmap + + + OCALL_mmap + + Message.30 + EAUG + + + EAUG + + Return Message.32 + address + + + address + + Message.33 + EACCEPT + + + EACCEPT + + Self Message.52 + use the new pages + + + use the new pages + + Asynchronous Message.55 + #PF + + + #PF + + Return Message.53 + + + + Message.79 + ERESUME + + + ERESUME + + Return Message.80 + return to AEP trampoline + + + return to AEP trampoline + + Return Message.83 + + + + Interaction operand + [Kernel decides to EAUG on #PF, otherwise this reduces to dir... + + + [Kernel decides to EAUG on #PF, otherwise this reduces to direct allocation flow with MAP_POPULATE] + + + diff --git a/sdk/emm/design_docs/images/SGX2_eaccept.svg b/sdk/emm/design_docs/images/SGX2_eaccept.svg new file mode 100644 index 000000000..70692d996 --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_eaccept.svg @@ -0,0 +1,301 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + eacceptcopy + + Object lifeline + Untrusted runtime + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Untrusted runtime + + + Object lifeline.6 + kernel + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + kernel + + + Object lifeline.11 + CPU-sgx + + Sheet.12 + + + + Sheet.13 + + + + Sheet.14 + + + Sheet.15 + + + + + CPU-sgx + + + Message.50 + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... + + + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) + + Object lifeline.24 + Enclave + + Sheet.25 + + + + Sheet.26 + + + + Sheet.27 + + + Sheet.28 + + + + + Enclave + + + Message.50.29 + OCALL_mmap + + + OCALL_mmap + + Message.50.30 + EAUG + + + EAUG + + Return Message.32 + address + + + address + + Asynchronous Message.55 + #PF (EPCM.pending) + + + #PF (EPCM.pending) + + Return Message.53 + + + + Message.50.79 + ERESUME + + + ERESUME + + Return Message.53.80 + return to AEP trampoline + + + return to AEP trampoline + + Return Message.83 + + + + Interaction operand + [Kernel to EAUG on #PF] + + + [Kernel to EAUG on #PF] + + Dynamic connector + Execute code not loaded + + + Execute code not loaded + + Asynchronous Message.55.99 + #PF on read + + + #PF on read + + Dynamic connector.103 + Retry: execute code not loaded + + + Retry: execute code not loaded + + Message.50.105 + vDSO callback + + + vDSO callback + + Message.50.106 + ExceptionHandler + + + ExceptionHandler + + Return Message.53.107 + return + + + return + + Message.108 + ERESUME + + + ERESUME + + Dynamic connector.109 + Retry: execute code + + + Retry: execute code + + Message.50.111 + OCall_mprotect(RX) + + + OCall_mprotect(RX) + + Message.112 + mprotect(RX) + + + mprotect(RX) + + Message.50.114 + update PTE.RW->RX + + + update PTE.RW->RX + + Return Message.53.115 + success + + + success + + Return Message.53.116 + success + + + success + + Return Message.53.119 + + + + Message.50.120 + load code page + + + load code page + + Self Message.52 + Set PTE.RW + + + Set PTE.RW + + Message.50.123 + EACCEPTCOPY(code, RX) load code and set EPCM.RX + + + EACCEPTCOPY(code, RX) load code and set EPCM.RX + + Return Message.53.125 + success + + + success + + diff --git a/sdk/emm/design_docs/images/SGX2_eaccept2.svg b/sdk/emm/design_docs/images/SGX2_eaccept2.svg new file mode 100644 index 000000000..03cdaed3a --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_eaccept2.svg @@ -0,0 +1,278 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + eacceptcopy2 + + Interaction operand + [Kernel directly EAUG ] + + + [Kernel directly EAUG ] + + Object lifeline + Untrusted runtime + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Untrusted runtime + + + Object lifeline.6 + kernel + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + kernel + + + Object lifeline.11 + CPU-sgx + + Sheet.12 + + + + Sheet.13 + + + + Sheet.14 + + + Sheet.15 + + + + + CPU-sgx + + + Message.50 + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... + + + mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) + + Object lifeline.24 + Enclave + + Sheet.25 + + + + Sheet.26 + + + + Sheet.27 + + + Sheet.28 + + + + + Enclave + + + Message.50.29 + OCALL_mmap + + + OCALL_mmap + + Message.50.30 + EAUG for each page EPCM.pending=1 + + + EAUG for each page EPCM.pending=1 + + Return Message.32 + address + + + address + + Asynchronous Message.55 + #PF (EPCM.pending) + + + #PF (EPCM.pending) + + Return Message.83 + + + + Sheet.95 + + Message.50.33 + EACCEPTCOPY(code, RX) load code and set EPCM.RX + + + EACCEPTCOPY(code, RX) load code and set EPCM.RX + + + Dynamic connector + Execute code not loaded + + + Execute code not loaded + + Message.50.105 + vDSO callback + + + vDSO callback + + Message.50.106 + ExceptionHandler + + + ExceptionHandler + + Return Message.53.107 + return + + + return + + Message.108 + ERESUME + + + ERESUME + + Dynamic connector.109 + Retry: execute code + + + Retry: execute code + + Message.50.111 + OCall_mprotect(RX) + + + OCall_mprotect(RX) + + Message.112 + mprotect(RX) + + + mprotect(RX) + + Message.50.114 + update PTE.RW->RX + + + update PTE.RW->RX + + Return Message.53.115 + success + + + success + + Return Message.53.116 + success + + + success + + Return Message.53.119 + + + + Message.50.120 + load code page + + + load code page + + Message.50.122 + Set PTE.RW + + + Set PTE.RW + + Return Message.53 + success + + + success + + diff --git a/sdk/emm/design_docs/images/SGX2_perms.svg b/sdk/emm/design_docs/images/SGX2_perms.svg new file mode 100644 index 000000000..842b07140 --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_perms.svg @@ -0,0 +1,227 @@ + + + + + + + + + + + + + + + + + + + + + + Page-1 + + Sheet.104 + + Sheet.102 + + Object lifeline + Kernel + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Kernel + + + Object lifeline.6 + Untrusted Runtime + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + Untrusted Runtime + + + Object lifeline.12 + Enclave + + Sheet.13 + + + + Sheet.14 + + + + Sheet.15 + + + Sheet.16 + + + + + Enclave + + + Object lifeline.22 + SGX + + Sheet.23 + + + + Sheet.24 + + + + Sheet.25 + + + Sheet.26 + + + + + SGX + + + Message.30 + ocall_mprotect (addr,len, perms) + + + ocall_mprotect (addr,len, perms) + + Message.34 + mprotect (addr, len, perms) + + + mprotect (addr, len, perms) + + Loop fragment + + + + + Sheet.36 + for each page if perms<prev perms + + for each page if perms<prev perms + + + Message.38 + EMODPR(perms) + + + EMODPR(perms) + + Message.39 + update PTE + + + update PTE + + Message.41 + ETRACK + + + ETRACK + + Message.42 + send IPI + + + send IPI + + Message.43 + + + + Message.44 + + + + Loop fragment.47 + + + + + Sheet.48 + for each page + + for each page + + + Message.50 + EMODPE(perms) + + + EMODPE(perms) + + Return Message.53 + + + + Message.100 + EACCEPT(perms) + + + EACCEPT(perms) + + Return Message.101 + + + + + Interaction operand + [if EMODPR done] + + + [if EMODPR done] + + + diff --git a/sdk/emm/design_docs/images/SGX2_tcs.svg b/sdk/emm/design_docs/images/SGX2_tcs.svg new file mode 100644 index 000000000..f07613d25 --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_tcs.svg @@ -0,0 +1,216 @@ + + + + + + + + + + + + + + + + + + + + + + TCS seq + + Sheet.98 + + Object lifeline + Kernel + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Kernel + + + Object lifeline.12 + Enclave + + Sheet.13 + + + + Sheet.14 + + + + Sheet.15 + + + Sheet.16 + + + + + Enclave + + + Object lifeline.22 + CPU-sgx + + Sheet.23 + + + + Sheet.24 + + + + Sheet.25 + + + Sheet.26 + + + + + CPU-sgx + + + Message.30 + mprotect (addr, len, PT_TCS) + + + mprotect (addr, len, PT_TCS) + + Message.34 + mprotect (addr, len, PROT_TCS) + + + mprotect (addr, len, PROT_TCS) + + Message.38 + EMODT(PT_TCS) + + + EMODT(PT_TCS) + + Message.41 + ETRACK + + + ETRACK + + Message.42 + send IPI + + + send IPI + + Message.43 + + + + Message.44 + + + + Message.50 + EACCEPT(PT_TCS) + + + EACCEPT(PT_TCS) + + Return Message.53 + + + + Self Message + start_thread (addr, ecall#, ...) + + + start_thread (addr, ecall#, ...) + + Object lifeline.6 + Untrusted Runtime + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + Untrusted Runtime + + + Interaction operand + After allocating a regular page + + + After allocating a regular page + + Self Message.52 + init TCS content + + + init TCS content + + Self Message.52.97 + Associate an OS thread with the new TCS, ecall from OS thread... + + + Associate an OS thread with the new TCS, ecall from OS thread with the new TCS + + + Other fragment + + + + + Sheet.103 + Runtime Specific + + Runtime Specific + + + diff --git a/sdk/emm/design_docs/images/SGX2_trim.svg b/sdk/emm/design_docs/images/SGX2_trim.svg new file mode 100644 index 000000000..6f047ae00 --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_trim.svg @@ -0,0 +1,250 @@ + + + + + + + + + + + + + + + + + + + + + + Page-1 + + Sheet.69 + + Object lifeline + Kernel + + Sheet.2 + + + + Sheet.3 + + + + Sheet.4 + + + Sheet.5 + + + + + Kernel + + + Object lifeline.12 + Enclave + + Sheet.13 + + + + Sheet.14 + + + + Sheet.15 + + + Sheet.16 + + + + + Enclave + + + Object lifeline.22 + CPU-sgx + + Sheet.23 + + + + Sheet.24 + + + + Sheet.25 + + + Sheet.26 + + + + + CPU-sgx + + + Message.30 + ocall_mprotect (addr, len, TRIM) + + + ocall_mprotect (addr, len, TRIM) + + Message.34 + mprotect (addr, len, PROT_TRIM) + + + mprotect (addr, len, PROT_TRIM) + + Loop fragment + + + + + Sheet.36 + for each page + + for each page + + + Message.38 + EMODT(PT_TRIM) + + + EMODT(PT_TRIM) + + Message.41 + ETRACK + + + ETRACK + + Message.42 + send IPI + + + send IPI + + Message.43 + + + + Message.44 + + + + Loop fragment.47 + + + + + Sheet.48 + for each page + + for each page + + + Message.50 + EACCEPT(PT_TRIM) + + + EACCEPT(PT_TRIM) + + Return Message.53 + + + + Message.58 + ocall_mprotect (addr, len) + + + ocall_mprotect (addr, len) + + Message.59 + mprotect (addr, len, PROT_NONE) + + + mprotect (addr, len, PROT_NONE) + + Message.60 + EREMOVE + + + EREMOVE + + Message.64 + + + + Return Message + + + + Message.67 + unmap PTE + + + unmap PTE + + Self Message + record range deallocated + + + record range deallocated + + Loop fragment.61 + + + + + Sheet.62 + for each page of PT_TRIM + + for each page of PT_TRIM + + + Object lifeline.6 + Untrusted Runtime + + Sheet.7 + + + + Sheet.8 + + + + Sheet.9 + + + Sheet.10 + + + + + Untrusted Runtime + + + + From 151a3bb927ded59dd68cac0888a4e9c46284ef84 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 8 Mar 2022 16:31:36 -0800 Subject: [PATCH 11/96] TRTS: use a separate spinlock implementation for EMM This is to avoid unneeded ocall references in regular mutex implementation, which requires every enclave to import those ocalls in its EDL even if it does not need the mutex for synchronization. Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- common/inc/internal/sethread_spinlock.h | 59 ++++++++++++ sdk/tlibthread/Makefile | 1 + sdk/tlibthread/sethread_spinlock.cpp | 118 ++++++++++++++++++++++++ sdk/trts/ema_rt.c | 18 ++-- 4 files changed, 187 insertions(+), 9 deletions(-) create mode 100644 common/inc/internal/sethread_spinlock.h create mode 100644 sdk/tlibthread/sethread_spinlock.cpp diff --git a/common/inc/internal/sethread_spinlock.h b/common/inc/internal/sethread_spinlock.h new file mode 100644 index 000000000..2c7a1863a --- /dev/null +++ b/common/inc/internal/sethread_spinlock.h @@ -0,0 +1,59 @@ + +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _SE_THREAD_SPINLOCK_H_ +#define _SE_THREAD_SPINLOCK_H_ +#include "sgx_thread.h" +/** a recursive spin lock */ +typedef struct _sgx_thread_spinlock_t +{ + size_t m_refcount; /* number of recursive calls */ + volatile uint32_t m_lock; /* use sgx_spinlock_t */ + sgx_thread_t m_owner; +} sgx_thread_spinlock_t; + +#define SGX_THREAD_RECURSIVE_SPINLOCK_INITIALIZER \ + {0, 0, SGX_THREAD_T_NULL} +#ifdef __cplusplus +extern "C" { +#endif + +int sgx_thread_spin_init(sgx_thread_spinlock_t *mutex); +int sgx_thread_spin_destroy(sgx_thread_spinlock_t *mutex); + +int sgx_thread_spin_trylock(sgx_thread_spinlock_t *mutex); +int sgx_thread_spin_unlock(sgx_thread_spinlock_t *mutex); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sdk/tlibthread/Makefile b/sdk/tlibthread/Makefile index a80af671a..fdca14bdf 100755 --- a/sdk/tlibthread/Makefile +++ b/sdk/tlibthread/Makefile @@ -40,6 +40,7 @@ CPPFLAGS := -I$(COMMON_DIR)/inc/internal \ -I$(LINUX_PSW_DIR) OBJ := sethread_mutex.o \ + sethread_spinlock.o \ sethread_rwlock.o \ sethread_cond.o \ sethread_utils.o diff --git a/sdk/tlibthread/sethread_spinlock.cpp b/sdk/tlibthread/sethread_spinlock.cpp new file mode 100644 index 000000000..cc2744ae3 --- /dev/null +++ b/sdk/tlibthread/sethread_spinlock.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +#include +#include +#include + +#include "util.h" +#include "sethread_internal.h" +#include "sethread_spinlock.h" +int sgx_thread_spin_init(sgx_thread_spinlock_t *mutex) +{ + CHECK_PARAMETER(mutex); + + mutex->m_refcount = 0; + mutex->m_owner = SGX_THREAD_T_NULL; + mutex->m_lock = SGX_SPINLOCK_INITIALIZER; + + return 0; +} + +int sgx_thread_spin_destroy(sgx_thread_spinlock_t *mutex) +{ + CHECK_PARAMETER(mutex); + + SPIN_LOCK(&mutex->m_lock); + if (mutex->m_owner != SGX_THREAD_T_NULL) { + SPIN_UNLOCK(&mutex->m_lock); + return EBUSY; + } + + mutex->m_refcount = 0; + SPIN_UNLOCK(&mutex->m_lock); + + return 0; +} + +int sgx_thread_spin_trylock(sgx_thread_spinlock_t *mutex) +{ + CHECK_PARAMETER(mutex); + + sgx_thread_t self = (sgx_thread_t)get_thread_data(); + + SPIN_LOCK(&mutex->m_lock); + + if (mutex->m_owner == self) { + mutex->m_refcount++; + SPIN_UNLOCK(&mutex->m_lock); + return 0; + } + + if (mutex->m_owner == SGX_THREAD_T_NULL) { + mutex->m_owner = self; + mutex->m_refcount++; + SPIN_UNLOCK(&mutex->m_lock); + return 0; + } + + SPIN_UNLOCK(&mutex->m_lock); + return EBUSY; +} + +int sgx_thread_spin_unlock(sgx_thread_spinlock_t *mutex) +{ + CHECK_PARAMETER(mutex); + + sgx_thread_t self = (sgx_thread_t)get_thread_data(); + + SPIN_LOCK(&mutex->m_lock); + /* if the mutux is not locked by anyone */ + if(mutex->m_owner == SGX_THREAD_T_NULL) { + SPIN_UNLOCK(&mutex->m_lock); + return EPERM; + } + + /* if the mutex is locked by another thread */ + if (mutex->m_owner != self) { + SPIN_UNLOCK(&mutex->m_lock); + return EPERM; + } + + /* the mutex is locked by current thread */ + if (--mutex->m_refcount == 0) { + mutex->m_owner = SGX_THREAD_T_NULL; + } + + SPIN_UNLOCK(&mutex->m_lock); + return 0; +} diff --git a/sdk/trts/ema_rt.c b/sdk/trts/ema_rt.c index a3ce79eba..db36e0075 100644 --- a/sdk/trts/ema_rt.c +++ b/sdk/trts/ema_rt.c @@ -28,7 +28,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ -#include "sgx_thread.h" +#include "sethread_spinlock.h" #include #include #include @@ -136,7 +136,7 @@ bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler) } typedef struct _sgx_mm_mutex { - sgx_thread_mutex_t m; + sgx_thread_spinlock_t m; }sgx_mm_mutex; static int sgx_mm_mutex_init(sgx_mm_mutex* mutex) @@ -146,7 +146,7 @@ static int sgx_mm_mutex_init(sgx_mm_mutex* mutex) // stack expansion/heap expansion during those calls as we use // regular enclave stack and heap for internal processing and // book keeping. - mutex->m = (sgx_thread_mutex_t)SGX_THREAD_RECURSIVE_MUTEX_INITIALIZER; + mutex->m = (sgx_thread_spinlock_t)SGX_THREAD_RECURSIVE_SPINLOCK_INITIALIZER; return 0; } @@ -163,27 +163,27 @@ sgx_mm_mutex *sgx_mm_mutex_create() int sgx_mm_mutex_lock(sgx_mm_mutex* mutex) { assert(mutex != NULL); - //!FIXME //Intel SDK does not have // WAKE/WAIT event ocalls as builtins. And TCS // pages are addred in a "utility" thread which // does not have those in ocall table for the ecall. - // Therefore we must not make ocalls for synchronization. - // OE has builtin ocalls for wait/wake so no trylock needed - while ( sgx_thread_mutex_trylock(&mutex->m)); + // Additionally, stack expansion is done in 1st phase + // exception handler which does not support ocalls currently. + // Therefore we do not make ocalls for synchronization. + while ( sgx_thread_spin_trylock(&mutex->m)); return 0; } int sgx_mm_mutex_unlock(sgx_mm_mutex* mutex) { assert(mutex != NULL); - return sgx_thread_mutex_unlock(&mutex->m); + return sgx_thread_spin_unlock(&mutex->m); } int sgx_mm_mutex_destroy(sgx_mm_mutex* mutex) { assert(mutex != NULL); - int ret = sgx_thread_mutex_destroy(&mutex->m); + int ret = sgx_thread_spin_destroy(&mutex->m); free(mutex); return ret; } From fbe7e62480a03ba79a74486e0a404529588e5319 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 22 Mar 2022 04:20:30 +0000 Subject: [PATCH 12/96] Revert "urts: check if /dev is mounted with noexec" This reverts commit 435c34b3afeca1e4590e1ba98291159d76a403a3. It does not work for DCAP driver --- psw/urts/linux/edmm_utility.cpp | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/psw/urts/linux/edmm_utility.cpp b/psw/urts/linux/edmm_utility.cpp index c8569f78a..8ce644a4c 100644 --- a/psw/urts/linux/edmm_utility.cpp +++ b/psw/urts/linux/edmm_utility.cpp @@ -34,7 +34,6 @@ #include "se_trace.h" #include "isgx_user.h" #include "cpuid.h" -#include "arch.h" #include #include #include @@ -45,7 +44,7 @@ #include #define SGX_URTS_CMD "for f in $(find /usr/$(basename $(gcc -print-multi-os-directory)) -name 'libsgx_urts.so' 2> /dev/null); do strings $f|grep 'SGX_URTS_VERSION_2'; done" #define SGX_CPUID 0x12 -#include + /* is_urts_support_edmm() * Parameters: * None. @@ -127,20 +126,6 @@ bool get_driver_type(int *driver_type) else { sgx_driver_type = SGX_DRIVER_IN_KERNEL; - -#define ERR_LOG \ -"mmap() failed for PROT_EXEC|PROT_READ.\n" \ -" Was /dev mounted with noexec set?\n" \ -" If so, remount it with exec: sudo mount -o remount,exec /dev\n" - - void* ptr = mmap(NULL, SE_PAGE_SIZE, PROT_READ|PROT_EXEC, MAP_SHARED, hdev, 0); - if (ptr == (void *)-1) { - SE_PROD_LOG(ERR_LOG); - close(hdev); - return false; - } - munmap(ptr, SE_PAGE_SIZE); - } close(hdev); From 0e57da80df87847da9e9c685d94b83ea40b18329 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 4 Apr 2022 20:16:56 -0700 Subject: [PATCH 13/96] enclave_common and urts: update for EDMM kernel patch V3 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 51 +---------------- psw/urts/linux/edmm_utility.cpp | 2 +- psw/urts/linux/isgx_user.h | 82 +++++++++++----------------- 3 files changed, 34 insertions(+), 101 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 286f68848..480825080 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -81,7 +81,7 @@ uint64_t get_offset_for_address(uint64_t target_address) static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) { - struct sgx_enclave_modt ioc; + struct sgx_enclave_modify_type ioc; if (length == 0) return EINVAL; @@ -156,7 +156,7 @@ static int trim_accept(int fd, uint64_t addr, size_t length) } static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) { - struct sgx_enclave_restrict_perm ioc; + struct sgx_enclave_restrict_permissions ioc; if (length == 0) return EINVAL; @@ -192,44 +192,6 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) return 0; } -static int relaxp(int fd, uint64_t addr, uint64_t length, uint64_t prot) -{ - struct sgx_enclave_relax_perm ioc; - if (length == 0) - return EINVAL; - - SE_TRACE(SE_TRACE_DEBUG, - "RELAX_PERM for 0x%llX ( %llX ), prot: 0x%llX\n", - addr, length, prot); - memset(&ioc, 0, sizeof(ioc)); - - sec_info_t sec_info; - memset(&sec_info, 0, sizeof(sec_info_t)); - sec_info.flags = prot; - - ioc.secinfo = POINTER_TO_U64(&sec_info); - ioc.offset = get_offset_for_address(addr); - ioc.length = length; - - do - { - int ret = ioctl(fd, SGX_IOC_ENCLAVE_RELAX_PERMISSIONS, &ioc); - //TODO: use error code - if (ret && ioc.count == 0 && errno != EBUSY ) - { //total failure - SE_TRACE(SE_TRACE_WARNING, - "RELAX failed, error = %d for 0x%llX ( %llX ), prot: 0x%llX\n", - errno, addr, length, prot); - return errno; - } - ioc.length -= ioc.count; - ioc.offset += ioc.count; - ioc.count = 0; - } while (ioc.length != 0); - - return 0; - -} // legacy support for EDMM @@ -299,10 +261,6 @@ static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) return SGX_SUCCESS; } -static int relaxp_legacy(int, uint64_t, uint64_t, uint64_t) -{ - return 0; -} /* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * @@ -349,7 +307,6 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f function _trim_accept = trim_accept; function _mktcs = mktcs; function _emodpr = emodpr; - function _relaxp = relaxp; int fd = get_file_handle_from_address((void *)addr); if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { @@ -357,7 +314,6 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f _trim_accept = trim_accept_legacy; _mktcs = mktcs_legacy; _emodpr = emodpr_legacy; - _relaxp = relaxp_legacy; fd = s_hdevice; } if(fd == -1) return EINVAL; @@ -443,9 +399,6 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f //EACCEPT needs at least pte.R, PROT_NONE case done above. if (prot_to != PROT_NONE) { - ret = _relaxp(fd, addr, length, prot_to); - if(ret) - return ret; ret = mprotect((void *)addr, length, prot_to); if (ret == -1) return errno; diff --git a/psw/urts/linux/edmm_utility.cpp b/psw/urts/linux/edmm_utility.cpp index 8ce644a4c..a7e9f73ce 100644 --- a/psw/urts/linux/edmm_utility.cpp +++ b/psw/urts/linux/edmm_utility.cpp @@ -239,7 +239,7 @@ extern "C" bool is_driver_support_edmm(int hdevice) if (-1 == hdevice){ if(!open_se_device(SGX_DRIVER_IN_KERNEL, &hdevice)) return false; - struct sgx_enclave_restrict_perm ioc; + struct sgx_enclave_restrict_permissions ioc; memset(&ioc, 0, sizeof(ioc)); int ret = ioctl(hdevice, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); diff --git a/psw/urts/linux/isgx_user.h b/psw/urts/linux/isgx_user.h index cccff44e4..0e29dae8e 100644 --- a/psw/urts/linux/isgx_user.h +++ b/psw/urts/linux/isgx_user.h @@ -110,14 +110,13 @@ enum sgx_page_flags { _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute) #define SGX_IOC_VEPC_REMOVE_ALL \ _IO(SGX_MAGIC, 0x04) -#define SGX_IOC_ENCLAVE_RELAX_PERMISSIONS \ - _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_relax_perm) #define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \ - _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_restrict_perm) + _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions) #define SGX_IOC_ENCLAVE_MODIFY_TYPE \ - _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_modt) + _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_type) #define SGX_IOC_ENCLAVE_REMOVE_PAGES \ - _IOWR(SGX_MAGIC, 0x08, struct sgx_enclave_remove_pages) + _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_remove_pages) + /* Legacy OOT driver support for EDMM */ #define SGX_IOC_ENCLAVE_EMODPR \ @@ -296,60 +295,41 @@ struct sgx_modification_param { unsigned long flags; }; - - /** - * struct sgx_enclave_relax_perm - parameters for ioctl - * %SGX_IOC_ENCLAVE_RELAX_PERMISSIONS - * @offset: starting page offset (page aligned relative to enclave base - * address defined in SECS) - * @length: length of memory (multiple of the page size) + * struct sgx_enclave_restrict_permissions - parameters for ioctl + * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) * @secinfo: address for the SECINFO data containing the new permission bits - * for pages in range described by @offset and @length - * @count: (output) bytes successfully changed (multiple of page size) + * for pages in range described by @offset and @length + * @result: (output) SGX result code of ENCLS[EMODPR] function + * @count: (output) bytes successfully changed (multiple of page size) */ -struct sgx_enclave_relax_perm { - __u64 offset; - __u64 length; - __u64 secinfo; - __u64 count; +struct sgx_enclave_restrict_permissions { + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 result; + __u64 count; }; /** - * struct sgx_enclave_restrict_perm - parameters for ioctl - * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS - * @offset: starting page offset (page aligned relative to enclave base - * address defined in SECS) - * @length: length of memory (multiple of the page size) - * @secinfo: address for the SECINFO data containing the new permission bits - * for pages in range described by @offset and @length - * @result: (output) SGX result code of ENCLS[EMODPR] function - * @count: (output) bytes successfully changed (multiple of page size) - */ -struct sgx_enclave_restrict_perm { - __u64 offset; - __u64 length; - __u64 secinfo; - __u64 result; - __u64 count; -}; - -/** - * struct sgx_enclave_modt - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE - * @offset: starting page offset (page aligned relative to enclave base - * address defined in SECS) - * @length: length of memory (multiple of the page size) + * struct sgx_enclave_modify_type - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE + * @offset: starting page offset (page aligned relative to enclave base + * address defined in SECS) + * @length: length of memory (multiple of the page size) * @secinfo: address for the SECINFO data containing the new type - * for pages in range described by @offset and @length - * @result: (output) SGX result code of ENCLS[EMODT] function - * @count: (output) bytes successfully changed (multiple of page size) + * for pages in range described by @offset and @length + * @result: (output) SGX result code of ENCLS[EMODT] function + * @count: (output) bytes successfully changed (multiple of page size) */ -struct sgx_enclave_modt { - __u64 offset; - __u64 length; - __u64 secinfo; - __u64 result; - __u64 count; +struct sgx_enclave_modify_type { + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 result; + __u64 count; }; /** From ac72298987d60558697b6bf352cd15120acd29de Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 4 Apr 2022 20:46:21 -0700 Subject: [PATCH 14/96] emm: update README for edmm V3 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 7894d2ab2..12d83c782 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -16,10 +16,10 @@ implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](in The instructions here are for developing and testing the EMM functionality only. Consult the main README of this repo for general usages. -**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1644274683.git.reinette.chatre@intel.com/). +**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1648847675.git.reinette.chatre@intel.com/). Please refer to the cover letter of the series for changes between versions. -This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v2_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. +This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v3_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. As the kernel interfaces evolve, this EMM implementation and/or interface may change. However, the goal is to minimize the EMM public API changes so that impact to upper layer implementations are minimized. @@ -33,11 +33,20 @@ On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki. ``` $ git clone https://github.com/rchatre/linux.git $ cd linux -$ git checkout sgx/sgx2_submitted_v2_plus_rwx +$ git checkout sgx/sgx2_submitted_v3_plus_rwx ``` - For step 6, modify .config to set "CONFIG_X86_SGX=y". +**Note:** on Ubuntu 20.04, ensure that /dev does not have noexec set: +``` +mount | grep "/dev .*noexec" +``` +If so, remount it executable: +``` +sudo mount -o remount,exec /dev +``` + #### Verify kernel build and EDMM support At the root of the kernel source repo, ``` @@ -58,7 +67,7 @@ Build and Install SDK and PSW ``` $ git clone https://github.com/intel/linux-sgx.git $repo_root $ cd $repo_root -$ git checkout edmm_v2 +$ git checkout edmm_v3 ``` Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. From 1c664eeddabb53bb4a1a3a92d320ba36b829fadc Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Fri, 15 Apr 2022 06:34:17 -0700 Subject: [PATCH 15/96] PSW: update for kernel EDMM patch V4 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 16 ++++------------ psw/urts/linux/isgx_user.h | 25 +++++++++++++------------ sdk/emm/README.md | 8 ++++---- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 480825080..ee378196c 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -81,7 +81,7 @@ uint64_t get_offset_for_address(uint64_t target_address) static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) { - struct sgx_enclave_modify_type ioc; + struct sgx_enclave_modify_types ioc; if (length == 0) return EINVAL; @@ -90,16 +90,12 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) addr, length, type); memset(&ioc, 0, sizeof(ioc)); - sec_info_t sec_info; - memset(&sec_info, 0, sizeof(sec_info_t)); - sec_info.flags = SGX_EMA_PAGE_TYPE(type); - - ioc.secinfo = POINTER_TO_U64(&sec_info);; + ioc.page_type = type; ioc.offset = get_offset_for_address(addr); ioc.length = length; do { - int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPE, &ioc); + int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); //TODO: use error code if (ret && ioc.count == 0 && errno != EBUSY) { //total failure @@ -165,11 +161,7 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) addr, length, prot); memset(&ioc, 0, sizeof(ioc)); - sec_info_t sec_info; - memset(&sec_info, 0, sizeof(sec_info_t)); - sec_info.flags = prot;//no shift - - ioc.secinfo = POINTER_TO_U64(&sec_info); + ioc.permissions = prot; ioc.offset = get_offset_for_address(addr); ioc.length = length; diff --git a/psw/urts/linux/isgx_user.h b/psw/urts/linux/isgx_user.h index 0e29dae8e..177b50276 100644 --- a/psw/urts/linux/isgx_user.h +++ b/psw/urts/linux/isgx_user.h @@ -4,7 +4,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016-2022 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -21,7 +21,7 @@ * * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016-2022 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -55,6 +55,7 @@ * Suresh Siddha * Serge Ayoun * Shay Katz-zamir + * Haitao Huang */ #ifndef _UAPI_ASM_X86_SGX_H @@ -112,8 +113,8 @@ enum sgx_page_flags { _IO(SGX_MAGIC, 0x04) #define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions) -#define SGX_IOC_ENCLAVE_MODIFY_TYPE \ - _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_type) +#define SGX_IOC_ENCLAVE_MODIFY_TYPES \ + _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_types) #define SGX_IOC_ENCLAVE_REMOVE_PAGES \ _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_remove_pages) @@ -295,39 +296,39 @@ struct sgx_modification_param { unsigned long flags; }; + /** * struct sgx_enclave_restrict_permissions - parameters for ioctl * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS * @offset: starting page offset (page aligned relative to enclave base * address defined in SECS) * @length: length of memory (multiple of the page size) - * @secinfo: address for the SECINFO data containing the new permission bits - * for pages in range described by @offset and @length + * @permissions:new permission bits for pages in range described by @offset + * and @length * @result: (output) SGX result code of ENCLS[EMODPR] function * @count: (output) bytes successfully changed (multiple of page size) */ struct sgx_enclave_restrict_permissions { __u64 offset; __u64 length; - __u64 secinfo; + __u64 permissions; __u64 result; __u64 count; }; /** - * struct sgx_enclave_modify_type - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE + * struct sgx_enclave_modify_type - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPES * @offset: starting page offset (page aligned relative to enclave base * address defined in SECS) * @length: length of memory (multiple of the page size) - * @secinfo: address for the SECINFO data containing the new type - * for pages in range described by @offset and @length + * @page_type: new type for pages in range described by @offset and @length * @result: (output) SGX result code of ENCLS[EMODT] function * @count: (output) bytes successfully changed (multiple of page size) */ -struct sgx_enclave_modify_type { +struct sgx_enclave_modify_types { __u64 offset; __u64 length; - __u64 secinfo; + __u64 page_type; __u64 result; __u64 count; }; diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 12d83c782..262eb987b 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -16,10 +16,10 @@ implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](in The instructions here are for developing and testing the EMM functionality only. Consult the main README of this repo for general usages. -**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/linux-sgx/cover.1648847675.git.reinette.chatre@intel.com/). +**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/lkml/64e8cf67aa564b317f4028aee9b3e77f3c1ce326.1649878359.git.reinette.chatre@intel.com/T/). Please refer to the cover letter of the series for changes between versions. -This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v3_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. +This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v4_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. As the kernel interfaces evolve, this EMM implementation and/or interface may change. However, the goal is to minimize the EMM public API changes so that impact to upper layer implementations are minimized. @@ -33,7 +33,7 @@ On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki. ``` $ git clone https://github.com/rchatre/linux.git $ cd linux -$ git checkout sgx/sgx2_submitted_v3_plus_rwx +$ git checkout sgx/sgx2_submitted_v4_plus_rwx ``` - For step 6, modify .config to set "CONFIG_X86_SGX=y". @@ -67,7 +67,7 @@ Build and Install SDK and PSW ``` $ git clone https://github.com/intel/linux-sgx.git $repo_root $ cd $repo_root -$ git checkout edmm_v3 +$ git checkout edmm_v4 ``` Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. From 6f184700ce91abc2fd343bc78a5f861adb4238a3 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 8 May 2022 13:59:25 -0700 Subject: [PATCH 16/96] enclave_common: update for kernel patch V5 Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 8 ++------ sdk/emm/README.md | 8 ++++---- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index ee378196c..758a75bc2 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -96,8 +96,7 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) do { int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); - //TODO: use error code - if (ret && ioc.count == 0 && errno != EBUSY) + if (ret && ioc.count == 0 && errno != EBUSY && errno != EAGAIN) { //total failure SE_TRACE(SE_TRACE_WARNING, "MODT failed, error = %d for 0x%llX ( %llX ), type: 0x%llX\n", @@ -169,7 +168,7 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) { int ret = ioctl(fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); //TODO: use error code - if (ret && ioc.count == 0 && errno != EBUSY ) + if (ret && ioc.count == 0 && errno != EBUSY && errno!=EAGAIN ) { //total failure SE_TRACE(SE_TRACE_WARNING, "MODP failed, error = %d for 0x%llX ( %llX ), prot: 0x%llX\n", @@ -346,9 +345,6 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f assert(type_from != SGX_EMA_PAGE_TYPE_TRIM); if (prot_to != prot_from) return EINVAL; - //user must be able to do EACCEPT - if (prot_to == PROT_NONE) - return EINVAL; return _trim(fd, addr, length); } diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 262eb987b..dcf2e2b9c 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -16,10 +16,10 @@ implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](in The instructions here are for developing and testing the EMM functionality only. Consult the main README of this repo for general usages. -**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/lkml/64e8cf67aa564b317f4028aee9b3e77f3c1ce326.1649878359.git.reinette.chatre@intel.com/T/). +**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/lkml/YnrllJ2OqmcqLUuv@kernel.org/T/). Please refer to the cover letter of the series for changes between versions. -This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v4_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. +This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v5_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. As the kernel interfaces evolve, this EMM implementation and/or interface may change. However, the goal is to minimize the EMM public API changes so that impact to upper layer implementations are minimized. @@ -33,7 +33,7 @@ On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki. ``` $ git clone https://github.com/rchatre/linux.git $ cd linux -$ git checkout sgx/sgx2_submitted_v4_plus_rwx +$ git checkout sgx/sgx2_submitted_v5_plus_rwx ``` - For step 6, modify .config to set "CONFIG_X86_SGX=y". @@ -67,7 +67,7 @@ Build and Install SDK and PSW ``` $ git clone https://github.com/intel/linux-sgx.git $repo_root $ cd $repo_root -$ git checkout edmm_v4 +$ git checkout edmm_v5 ``` Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. From 07ba234caf17cd9324d5bfd2fa90407a16409ffe Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 8 May 2022 14:00:22 -0700 Subject: [PATCH 17/96] sdk/emm: merge ocalls for continuous pages To minimize number of VMAs created by kernel Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/ema.c | 117 ++++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c index 948104f82..e9b85b018 100644 --- a/sdk/emm/ema.c +++ b/sdk/emm/ema.c @@ -668,9 +668,9 @@ int ema_do_commit_loop(ema_t *first, ema_t *last, size_t start, size_t end) return ret; } -int ema_do_uncommit(ema_t *node, size_t start, size_t end) +static int ema_do_uncommit_real(ema_t *node, size_t real_start, size_t real_end, + int prot) { - int prot = node->si_flags & SGX_EMA_PROT_MASK; int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; uint32_t alloc_flags = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; @@ -680,39 +680,66 @@ int ema_do_uncommit(ema_t *node, size_t start, size_t end) } assert(node->eaccept_map); //TODO: refactor bit_array_test/set - size_t real_start = MAX(start, node->start_addr); - size_t real_end = MIN(end, node->start_addr + node->size); sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_TRIM | SGX_EMA_STATE_MODIFIED, 0}; - - for(size_t addr = real_start; addr < real_end; addr += SGX_PAGE_SIZE) + + while (real_start < real_end) { - size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; - // only for committed page - if (bit_array_test(node->eaccept_map, pos)) { - int ret = sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, - prot | type, prot | SGX_EMA_PAGE_TYPE_TRIM); - if (ret != 0) { - return ret; + size_t block_start = real_start; + while (block_start < real_end ){ + size_t pos = (block_start - node->start_addr) >> SGX_PAGE_SHIFT; + if (bit_array_test(node->eaccept_map, pos)) { + break; + } else { + block_start += SGX_PAGE_SIZE; } - - ret = do_eaccept(&si, addr); - if (ret != 0) { - return ret; + } + if (block_start == real_end) + break; + + size_t block_end = block_start + SGX_PAGE_SIZE; + while (block_end < real_end) { + size_t pos = (block_end - node->start_addr) >> SGX_PAGE_SHIFT; + if (bit_array_test(node->eaccept_map, pos)) { + block_end += SGX_PAGE_SIZE; } - bit_array_reset(node->eaccept_map, pos); - //eaccept trim notify - ret =sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, - prot | SGX_EMA_PAGE_TYPE_TRIM, - prot | SGX_EMA_PAGE_TYPE_TRIM); - if(ret) return ret; + else + break; } + assert(block_end > block_start); + // only for committed page + size_t block_length = block_end - block_start; + int ret = sgx_mm_modify_ocall(block_start, block_length, + prot | type, prot | SGX_EMA_PAGE_TYPE_TRIM); + if (ret != 0) { + return ret; + } + + ret = eaccept_range_forward(&si, block_start, block_end); + if (ret != 0) { + return ret; + } + bit_array_reset_range(node->eaccept_map, (block_start - node->start_addr) >> SGX_PAGE_SHIFT, block_length >> SGX_PAGE_SHIFT); + //eaccept trim notify + ret =sgx_mm_modify_ocall(block_start, block_length, + prot | SGX_EMA_PAGE_TYPE_TRIM, + prot | SGX_EMA_PAGE_TYPE_TRIM); + if(ret) return ret; + + real_start = block_end; } return 0; } +int ema_do_uncommit(ema_t *node, size_t start, size_t end) +{ + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + int prot = node->si_flags & SGX_EMA_PROT_MASK; + return ema_do_uncommit_real(node, real_start, real_end, prot); +} static int ema_can_uncommit(ema_t* first, ema_t* last, size_t start, size_t end) { @@ -754,8 +781,6 @@ int ema_do_uncommit_loop(ema_t *first, ema_t *last, size_t start, size_t end) int ema_do_dealloc(ema_t *node, size_t start, size_t end) { - int prot = node->si_flags & SGX_EMA_PROT_MASK; - int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; int alloc_flag = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; if (alloc_flag & SGX_EMA_RESERVE) @@ -767,45 +792,15 @@ int ema_do_dealloc(ema_t *node, size_t start, size_t end) size_t real_start = MAX(start, node->start_addr); size_t real_end = MIN(end, node->start_addr + node->size); - sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_TRIM | - SGX_EMA_STATE_MODIFIED, - 0}; - - for(size_t page = real_start; page < real_end; page += SGX_PAGE_SIZE) - { - size_t pos = (page - node->start_addr) >> SGX_PAGE_SHIFT; - - // only for committed page - //!TODO combine ocalls for multiple pages - if (bit_array_test(node->eaccept_map, pos)) { - //Make ocall to trim, make sure to keep READ for eaccept - int ret = sgx_mm_modify_ocall(page, SGX_PAGE_SIZE, - prot | SGX_EMA_PROT_READ | type, - prot | SGX_EMA_PROT_READ | SGX_EMA_PAGE_TYPE_TRIM ); - if (ret != 0) { - return ret; - } - - ret = do_eaccept(&si, page); - if (ret != 0) { - return ret; - } - bit_array_reset(node->eaccept_map, pos); - - //notify kernel to remove, clear all protection bits - ret = sgx_mm_modify_ocall(page, SGX_PAGE_SIZE, - prot | SGX_EMA_PAGE_TYPE_TRIM, - SGX_EMA_PROT_NONE | SGX_EMA_PAGE_TYPE_TRIM); - if (ret != 0) { - return ret; - } - } - } + //clear protections flag + int ret = ema_do_uncommit_real (node, real_start, real_end, SGX_EMA_PROT_NONE); + if (ret != 0) + return ret; // potential ema split ema_t *tmp_node = NULL; if (real_start > node->start_addr) { - int ret = ema_split(node, real_start, false, &tmp_node); + ret = ema_split(node, real_start, false, &tmp_node); if(ret) return ret; assert(tmp_node); node = tmp_node; @@ -813,7 +808,7 @@ int ema_do_dealloc(ema_t *node, size_t start, size_t end) tmp_node = NULL; if (real_end < (node->start_addr + node->size)) { - int ret = ema_split(node, real_end, true, &tmp_node); + ret = ema_split(node, real_end, true, &tmp_node); if(ret) return ret; assert(tmp_node); node = tmp_node; From 6ca2ce74d7103dc042b98b60e756d5b0fc625316 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 8 May 2022 14:01:30 -0700 Subject: [PATCH 18/96] trts: fix an overflow Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/trts/ema_init.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/trts/ema_init.cpp b/sdk/trts/ema_init.cpp index 8bc297645..76e5fd925 100644 --- a/sdk/trts/ema_init.cpp +++ b/sdk/trts/ema_init.cpp @@ -64,7 +64,7 @@ static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) assert(IS_PAGE_ALIGNED(rva)); size_t addr = (size_t)get_enclave_base() + rva; - size_t size = entry->page_count << SE_PAGE_SHIFT; + size_t size = ((size_t)entry->page_count) << SE_PAGE_SHIFT; size_t enclave_end = (size_t)get_enclave_base() + get_enclave_size(); // entry is guard page or has EREMOVE, build a reserved ema @@ -113,7 +113,7 @@ static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) } int ret = mm_alloc((void*)addr, - entry->page_count << SE_PAGE_SHIFT, + ((size_t)entry->page_count) << SE_PAGE_SHIFT, SGX_EMA_COMMIT_ON_DEMAND | commit_direction | SGX_EMA_SYSTEM | SGX_EMA_FIXED | type, NULL, NULL, NULL); From 0e2de0ae7e0e61c47490db3da72680fca0227171 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 12 May 2022 12:05:32 -0700 Subject: [PATCH 19/96] ema: remove unused lock Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/ema.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c index e9b85b018..c68406e5d 100644 --- a/sdk/emm/ema.c +++ b/sdk/emm/ema.c @@ -58,7 +58,6 @@ struct ema_t_ { // Or'd with one of EMA_PAGE_TYPE_REG, EMA_PAGE_TYPE_TCS, EMA_PAGE_TYPE_TRIM bit_array * eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for the page at start address // bit i in eaccept_map[j] for page at start_address+(i+j<<3)<<12 - sgx_mm_mutex * lock; // lock to prevent concurrent modification, could be sgx_thread_mutex_t/rwlock_t int transition; // state to indicate whether a transition in progress, e.g page type/permission changes. sgx_enclave_fault_handler_t handler; // custom PF handler (for EACCEPTCOPY use) @@ -541,7 +540,6 @@ ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, alloc_flags, si_flags, NULL, - NULL, //TODO, use lock? 0, handler, private_data, From bda6b29276c950d603942a1c9286a4f2c8853a88 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 15 May 2022 05:47:14 -0700 Subject: [PATCH 20/96] sdk/ema: misc stability fixes Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/bit_array.c | 14 ++++++++++++-- sdk/emm/ema.c | 5 +++-- sdk/trts/ema_init.cpp | 2 +- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/sdk/emm/bit_array.c b/sdk/emm/bit_array.c index 47a381f6e..b9f1a18b6 100644 --- a/sdk/emm/bit_array.c +++ b/sdk/emm/bit_array.c @@ -61,10 +61,15 @@ bit_array *bit_array_new(size_t num_of_bits) return NULL; bit_array *ba = (bit_array *)malloc(sizeof(bit_array)); - if(!ba) return NULL; + if (!ba) return NULL; ba->n_bytes = n_bytes; ba->n_bits = num_of_bits; ba->data = (uint8_t*)malloc(n_bytes); + if (!ba->data) + { + free(ba); + return NULL; + } return ba; } @@ -401,6 +406,11 @@ int bit_array_split(bit_array *ba, size_t pos, bit_array **new_lower, bit_array // new bit_array for higher pages bit_array *ba2 = bit_array_new(r_bits); + if(!ba2) + { + free(data); + return ENOMEM; + } size_t bits_remain = r_bits; size_t curr_byte = byte_index; @@ -437,7 +447,7 @@ bit_array* bit_array_merge(bit_array *ba1, bit_array *ba2) { size_t total_bits = ba1->n_bits + ba2->n_bits; bit_array *ba = bit_array_new(total_bits); - + if (!ba) return NULL; // copy ba1 data into new bit_array memcpy(ba->data, ba1->data, ba1->n_bytes); diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c index c68406e5d..fcbe358d3 100644 --- a/sdk/emm/ema.c +++ b/sdk/emm/ema.c @@ -553,8 +553,9 @@ ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, void ema_destroy(ema_t *ema) { remove_ema(ema); - if (ema->eaccept_map) { - free(ema->eaccept_map); + if (ema->eaccept_map) + { + bit_array_delete(ema->eaccept_map); } free(ema); } diff --git a/sdk/trts/ema_init.cpp b/sdk/trts/ema_init.cpp index 76e5fd925..78033d68f 100644 --- a/sdk/trts/ema_init.cpp +++ b/sdk/trts/ema_init.cpp @@ -74,7 +74,7 @@ static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) Intel SDK specific. Last guard page area fills up remaining enclave space we cut off to leave space for user. ************************/ - if((addr + size) == enclave_end) + if((addr + size) == enclave_end && size > 0x10000ULL) size = 0x10000; int ret = mm_init_ema((void*)addr, size, From c4c1b3dca4d8ec6494c05ba093ed322759b97cb3 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 12 May 2022 21:56:13 -0700 Subject: [PATCH 21/96] emm: add custom allocator for internal use Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/Makefile | 1 + sdk/emm/README.md | 22 +- sdk/emm/api_tests/App/App.cpp | 2 +- sdk/emm/api_tests/Enclave/config.xml | 10 +- sdk/emm/bit_array.c | 20 +- sdk/emm/ema.c | 42 ++- sdk/emm/emalloc.c | 522 +++++++++++++++++++++++++++ sdk/emm/include/emalloc.h | 44 +++ sdk/emm/sgx_mm.c | 18 +- sdk/trts/init_enclave.cpp | 17 +- 10 files changed, 646 insertions(+), 52 deletions(-) create mode 100644 sdk/emm/emalloc.c create mode 100644 sdk/emm/include/emalloc.h diff --git a/sdk/emm/Makefile b/sdk/emm/Makefile index 9312a1f64..ca8338c57 100644 --- a/sdk/emm/Makefile +++ b/sdk/emm/Makefile @@ -36,6 +36,7 @@ CPPFLAGS += -Iinclude \ OBJS := bit_array.o \ ema.o \ + emalloc.o \ emm_private.o \ sgx_mm.o diff --git a/sdk/emm/README.md b/sdk/emm/README.md index dcf2e2b9c..72c3754b6 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -106,18 +106,18 @@ $ tail -f nohup.out Limitations of current implementation --------------------------------------- -1. EMM holds a global recursive mutex for the whole duration of each API invocation. +1. The EMM holds a global recursive mutex for the whole duration of each API invocation. - No support for concurrent operations (modify type/permissions, commit and commit_data) on different regions. -2. EMM internally uses the default heap and stack during its internal operations - - The initial heap and stack should be sufficient to bootstrap EMM initializations - - Book-keeping for heap should be created when RTS is initialized. - - RTS calls mm_init_ema to create region for the static heap (EADDed), and mm_alloc to reserve COMMIT_ON_DEMAND for dynamic heap. - - Stack expansion should be done in 1st phase exception handler and use a reserved static stack - - Such that stack is not overrun in sgx_mm API calls during stack expansion. -3. EMM requires all RTS allocations(with SGX_EMA_SYSTEM flag) are reserved up front during RTS/enclave initializations. - - EMM won't allocate any user requested region below the highest address in RTS regions. - - EMM won't serve any user request unless at least one RTS region is reserved. -4. EMM relies on vDSO interface to guarantee that fault handler is called on the same OS thread where fault happened. +2. The EMM internally uses a separate dynamic allocator (emalloc) to manage its internal memory usage: allocations for EMA objects and bitmaps of the regions. + - During initialization, the EMM emalloc will create an initial reserve region from the user range (given by RTS, see below). And it may add more reserves later also from the user range if needed. + - RTS and SDK signing tools can estimate this overhead with (total size of all RTS regions and user regions)/2^14. And account for it when calculating the enclave size. + - Before calling any EMM APIs, the RTS needs initialize EMM by calling sgx_mm_init pass in an address range [user_start, user_end) for user allocation. + - The EMM allocates all user requested region(via sgx_mm_alloc API) in this range only. +3. Allocations created by the RTS enclave loader at fixed address ranges can be reserved with SGX_EMA_SYSTEM flag after EMM initializations. + - For example, for a heap region to be dynamically expanded: + - The RTS calls mm_init_ema to create region for the static heap (EADDed), and mm_alloc to reserve COMMIT_ON_DEMAND for dynamic heap. + - Stack expansion should be done in 1st phase exception handler and use a reserved static stack so that stack is not overrun in sgx_mm API calls during stack expansion. +4. The EMM relies on vDSO interface to guarantee that fault handler is called on the same OS thread where fault happened. - This is due to the use of the global recursive mutex. If fault handler comes in from different thread while the mutex is held, it will deadlock. - Note a #PF could happen when more stack is needed inside EMM functions while the mutex is locked. - vDSO user handler should ensure it re-enters enclave with the original TCS and on the same OS thread. diff --git a/sdk/emm/api_tests/App/App.cpp b/sdk/emm/api_tests/App/App.cpp index adc931ec2..f5a88396f 100644 --- a/sdk/emm/api_tests/App/App.cpp +++ b/sdk/emm/api_tests/App/App.cpp @@ -405,7 +405,7 @@ int SGX_CDECL main(int argc, char *argv[]) int ret = 0; //17 threads for 100 iterations passed when this is checked in - ret += test_sgx_mm_functions(17); + ret += test_sgx_mm_functions(247); ret += test_unsafe(); sgx_destroy_enclave(global_eid); diff --git a/sdk/emm/api_tests/Enclave/config.xml b/sdk/emm/api_tests/Enclave/config.xml index aea997c8a..2dafb0872 100644 --- a/sdk/emm/api_tests/Enclave/config.xml +++ b/sdk/emm/api_tests/Enclave/config.xml @@ -1,16 +1,16 @@ 0 0 - 8 + 248 3 1 - 18 + 248 0x10000 0x2000 - 0x900000 - 0x90000 - 0x080000 + 0xF0000000 + 0x9000 + 0x08000 0x00001000 0x00100000 0x90000000 diff --git a/sdk/emm/bit_array.c b/sdk/emm/bit_array.c index b9f1a18b6..7a6ca0e4d 100644 --- a/sdk/emm/bit_array.c +++ b/sdk/emm/bit_array.c @@ -35,8 +35,8 @@ #include #include #include "bit_array.h" +#include "emalloc.h" -#define ROUND_TO(x, align) ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) #define NUM_OF_BYTES(nbits) (ROUND_TO((nbits), 8) >> 3) #define TEST_BIT(A, p) ((A)[((p)/8)] & ((uint8_t)(1 << ((p)%8)))) #define SET_BIT(A, p) ((A)[((p)/8)] |= ((uint8_t)(1 << ((p)%8)))) @@ -60,14 +60,14 @@ bit_array *bit_array_new(size_t num_of_bits) if (n_bytes == 0) return NULL; - bit_array *ba = (bit_array *)malloc(sizeof(bit_array)); - if (!ba) return NULL; + bit_array *ba = (bit_array *)emalloc(sizeof(bit_array)); + if(!ba) return NULL; ba->n_bytes = n_bytes; ba->n_bits = num_of_bits; - ba->data = (uint8_t*)malloc(n_bytes); + ba->data = (uint8_t*)emalloc(n_bytes); if (!ba->data) { - free(ba); + efree(ba); return NULL; } return ba; @@ -101,7 +101,7 @@ bit_array *bit_array_new_reset(size_t num_of_bits) void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data) { if (ba->data) { - free(ba->data); + efree(ba->data); } size_t n_bytes = NUM_OF_BYTES(num_of_bits); @@ -113,8 +113,8 @@ void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data) // Delete the bit_array 'ba' and the data it owns void bit_array_delete(bit_array *ba) { - free(ba->data); - free(ba); + efree(ba->data); + efree(ba); } #if 0 @@ -392,7 +392,7 @@ int bit_array_split(bit_array *ba, size_t pos, bit_array **new_lower, bit_array size_t r_bits = ba->n_bits - l_bits; // new data for bit_array of lower pages - uint8_t *data = (uint8_t *)malloc(l_bytes); + uint8_t *data = (uint8_t *)emalloc(l_bytes); if (!data) return ENOMEM; size_t i; for (i = 0; i < byte_index; ++i) { @@ -408,7 +408,7 @@ int bit_array_split(bit_array *ba, size_t pos, bit_array **new_lower, bit_array bit_array *ba2 = bit_array_new(r_bits); if(!ba2) { - free(data); + efree(data); return ENOMEM; } diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c index fcbe358d3..69b45c437 100644 --- a/sdk/emm/ema.c +++ b/sdk/emm/ema.c @@ -36,6 +36,7 @@ #include #include #include "ema.h" +#include "emalloc.h" #include "bit_array.h" #include "sgx_mm.h" #include "sgx_mm_primitives.h" @@ -44,10 +45,6 @@ #define SGX_EMA_STATE_PENDING 0x8UL #define SGX_EMA_STATE_MODIFIED 0x10UL #define SGX_EMA_STATE_PR 0x20UL -#define ROUND_TO(x, align) (((x) + (align-1)) & ~(align-1)) -#define TRIM_TO(x, align) ((x) & ~(align-1)) -#define MIN(x, y) (((x)>(y))?(y):(x)) -#define MAX(x, y) (((x)>(y))?(x):(y)) #define UNUSED(x) ((void)(x)) struct ema_t_ { size_t start_addr; // starting address, should be on a page boundary @@ -69,7 +66,8 @@ struct ema_t_ { struct ema_root_ { ema_t *guard; }; - +extern size_t mm_user_base; +extern size_t mm_user_end; ema_t dummy_user_ema = {.next = &dummy_user_ema, .prev = &dummy_user_ema}; ema_root_t g_user_ema_root = {.guard = &dummy_user_ema}; @@ -355,7 +353,7 @@ int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** ret_node) return EINVAL; } - ema_t *new_node = (ema_t *)malloc(sizeof(ema_t)); + ema_t *new_node = (ema_t *)emalloc(sizeof(ema_t)); if (!new_node) { return ENOMEM; } @@ -365,7 +363,7 @@ int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** ret_node) size_t pos = (addr - ema->start_addr) >> SGX_PAGE_SHIFT; int ret = bit_array_split(ema->eaccept_map, pos, &low, &high); if(ret) { - free(new_node); + efree(new_node); return ret; } } @@ -439,12 +437,19 @@ bool find_free_region(ema_root_t *root, size_t size, { ema_t *ema_begin = root->guard->next; ema_t *ema_end = root->guard; - - // we need at least one node before calling this. + bool is_system = (root == &g_rts_ema_root); if(ema_begin == ema_end){ - if(ema_root_empty(&g_rts_ema_root)) - return false;//rts has to be inited at this time - size_t tmp = ema_root_end(&g_rts_ema_root); + size_t tmp = 0; + if (is_system) + { + // we need at least one node before calling this. + if(ema_root_empty(&g_rts_ema_root)) + return false;//rts has to be inited at this time + tmp = ema_root_end(&g_rts_ema_root); + }else + { + tmp = mm_user_base; + } tmp = ROUND_TO(tmp, align); if(!sgx_mm_is_within_enclave((void*)tmp, size)) return false; @@ -474,14 +479,17 @@ bool find_free_region(ema_root_t *root, size_t size, { *next_ema = next; *addr = ema_aligned_end(curr, align); - return true; + size_t end = *addr + size; + if( (is_system && (end <=mm_user_base || *addr > mm_user_base)) + || (!is_system && end < mm_user_end)) + return true; } // we look for space in front, but do not mix user with rts size_t tmp = ema_begin->start_addr - size; tmp = TRIM_TO(tmp, align); - if (root == &g_user_ema_root) + if (!is_system) { - if (ema_root_end(&g_rts_ema_root) < tmp){ + if (mm_user_base < tmp){ //we found gap bigger enough *addr = tmp; *next_ema = ema_begin; @@ -531,7 +539,7 @@ ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, void *private_data, ema_t* next_ema) { - ema_t *node = (ema_t *)malloc(sizeof(ema_t)); + ema_t *node = (ema_t *)emalloc(sizeof(ema_t)); if (!node) return NULL; *node = (ema_t){ @@ -557,7 +565,7 @@ void ema_destroy(ema_t *ema) { bit_array_delete(ema->eaccept_map); } - free(ema); + efree(ema); } static int eaccept_range_forward(const sec_info_t *si, size_t start, size_t end) diff --git a/sdk/emm/emalloc.c b/sdk/emm/emalloc.c new file mode 100644 index 000000000..a0134e5d8 --- /dev/null +++ b/sdk/emm/emalloc.c @@ -0,0 +1,522 @@ +/* + * Copyright (C) 2011-2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include "emalloc.h" +#include "ema.h" // SGX_PAGE_SIZE +#include "sgx_mm.h" //sgx_mm_alloc +#include +#include +#include +/* + * This file implements a Simple allocator for EMM internal memory + * It maintains a list of reserves, dynamically added on + * demand using sgx_mm_alloc recursively when reserve runs below + * a threshold. + */ + + +/** + * Meta reserve is only used to allocate EMAs for + * "reserve" areas used by emalloc. + * 16 pages would be enough to create enough reserves + * to be used to allocate bit maps for roughly 64T EPC + */ +#define META_RESERVE_SIZE 0x10000ULL +static uint8_t meta_reserve[META_RESERVE_SIZE]; +static size_t meta_used; +/** + * initial reserve size + * TODO: make it configurable by RTS + */ +static const size_t initial_reserve_size = 0x10000ULL; + +// this is enough for bit map of an 8T EMA +static const size_t max_emalloc_size = 0x10000000ULL; + +/* Blocks of memory managed. + * The allocator put these fields at the front + * of the block when a memory block is freed + * minimal allocation size is 8 bytes + * 8 bytes of header is overhead + */ +typedef struct _block +{ + uint64_t header; // size | alloc_mask + union { + char* payload[0]; + struct _block *next_prev[2]; /* used only when this block is free + * next_prev[0] points to next free + * block, next_prev[1] points to prev + * free block if this one is 16 bytes+ + */ + }; +} block_t; + +#define num_exact_list 0x100 +static const size_t header_size = sizeof(uint64_t); +static const size_t exact_match_increment = 0x8; +static const size_t min_block_size = 0x10; //include 8-byte header +static const size_t max_exact_size = min_block_size + exact_match_increment * (num_exact_list -1); +static block_t* exact_block_list[num_exact_list]; + +// the least significant bit in block header +// 1 == allocated/in-use, 0 == free +static const uint64_t alloc_mask = 1ULL; +//block size align to 8 bytes +static const uint64_t size_mask = ~((uint64_t)(exact_match_increment-1)); +// We don't expect many latge blocks +// !TODO: optimize if needed +static block_t* large_block_list = NULL; + +block_t* payload_to_block (void* p) +{ + return (block_t*) (((size_t)p) - header_size); +} + +void* block_to_payload(block_t* b) +{ + return (void *) (b->payload); +} + +bool is_alloced(block_t* b) +{ + return alloc_mask & b->header; +} + +uint64_t block_size(block_t* b) +{ + return b->header & size_mask; +} + +size_t block_end(block_t* b) +{ + return (size_t)(b) + block_size(b); +} +#ifndef NDEBUG +size_t num_free_blocks = 0; +#endif +/* + * A reserve is a continuous block of + * memory committed for emalloc purpose. + */ +typedef struct _mm_reserve +{ + size_t base; + size_t size; + size_t used; + struct _mm_reserve* next; +} mm_reserve_t; + +static mm_reserve_t* reserve_list = NULL; + +static mm_reserve_t* find_used_in_reserve(size_t addr, size_t size) +{ + if (size == 0) return NULL; + mm_reserve_t* r = reserve_list; + while (r) + { + if (addr >= r->base && + addr + size <= r->base + r->used) + return r; + r = r->next; + } + return NULL; +} + +static size_t get_list_idx(size_t size) +{ + assert(size%exact_match_increment ==0); + if(size < min_block_size) return 0; + size_t list = (size - min_block_size)/exact_match_increment; + assert(list < num_exact_list); + return list; +} + +static void remove_from_list(block_t* b, block_t** list_head) +{ + size_t bsize = block_size(b); + if(b == *list_head) + { + *list_head = b->next_prev[0]; + if((*list_head) && bsize > min_block_size ) + (*list_head)->next_prev[1] = NULL; + } + else + { + block_t* prev = NULL; + if (bsize > min_block_size) + prev = b->next_prev[1]; + block_t* next = b->next_prev[0]; + if(prev) + prev->next_prev[0] = next; + if(next) + next->next_prev[1] = prev; + } +} +static void remove_from_lists(block_t* b) +{ + size_t bsize = block_size(b); + if(bsize > max_exact_size) + remove_from_list(b, &large_block_list); + else + { + size_t l = get_list_idx(bsize); + remove_from_list(b, &exact_block_list[l]); + } +} + +static void prepend_to_list(block_t* b, block_t** head) +{ + b->next_prev[0] = *head; + if ((*head) && block_size(*head) > min_block_size) + { + (*head)->next_prev[1] = b; + } + *head = b; +} + +static void put_exact_block(block_t* b) +{ + size_t list = get_list_idx(block_size(b)); + prepend_to_list(b, &exact_block_list[list]); +#ifndef NDEBUG + num_free_blocks++; +#endif +} + +static block_t* neighbor_right(block_t* me) +{ + size_t end = block_end(me); + mm_reserve_t* r1 = find_used_in_reserve((size_t)me, end); + if (!r1) return NULL; + if(end == r1->base + r1->used) return NULL; + mm_reserve_t* r2 = find_used_in_reserve(end, block_size((block_t*)end)); + if (r1 != r2) return NULL; + return (block_t*) end; +} + +//!TODO merge with left neighbor +// which requires scanning or footer +static block_t* possibly_merge(block_t* b) +{ + block_t* nr = neighbor_right(b); + if (!nr) return b; + if (is_alloced(nr)) return b; + remove_from_lists(nr); + b->header += block_size(nr); +#ifndef NDEBUG + num_free_blocks--; +#endif + return possibly_merge(b); +} + +static void put_free_block(block_t* e) +{ + if (block_size(e) <= (size_t)max_exact_size) + { + put_exact_block(e); + return; + } + prepend_to_list(e, &large_block_list); +#ifndef NDEBUG + num_free_blocks++; +#endif +} + +static block_t* split_free_block(block_t* b, size_t s) +{ + size_t remain = b->header - s; + assert(remain >= (size_t)min_block_size); + b->header = s; + block_t* new_b = (block_t*)((uint8_t*)b+s); + new_b->header = remain; + return new_b; +} + +static block_t* get_exact_match(size_t bsize) +{ + size_t list = get_list_idx(bsize); + if (exact_block_list[list] == NULL) return NULL; + block_t* ret = exact_block_list[list]; + exact_block_list[list] = ret->next_prev[0]; + if (list > 0 && exact_block_list[list]) + exact_block_list[list]->next_prev[1] = NULL; +#ifndef NDEBUG + num_free_blocks--; +#endif + return ret; +} + +static block_t* get_free_block(size_t bsize) +{ + if(bsize <= max_exact_size) + return get_exact_match(bsize); + + if (large_block_list == NULL) + return NULL; + + block_t *tmp = large_block_list; + block_t *best = NULL; + + //find best match + while(tmp != NULL) + { + if(tmp->header >= bsize) + { + if (!best) + { + best = tmp; + } + else + if(best->header > tmp->header) + { + best = tmp; + } + } + tmp = (block_t *)tmp->next_prev[0]; + } + + if(!best) return NULL; + remove_from_list(best, &large_block_list); + + if(best->header >= (bsize + min_block_size)) + { + block_t* tail = split_free_block(best, bsize); + put_free_block (tail); + } + // !TODO optimize for large allocations + // Note: EMA objects are 80 bytes + // bit_arrays are mostly small except for really large EMAs +#ifndef NDEBUG + num_free_blocks--; +#endif + return best; +} + + +static block_t* get_large_block_end_at(size_t addr) +{ + if (large_block_list == NULL) + return NULL; + block_t *tmp = large_block_list; + + while (tmp != NULL) + { + if((((size_t)tmp) + tmp->header) == addr) + { + remove_from_list(tmp, &large_block_list); + return tmp; + } + tmp = tmp->next_prev[0]; + } + return NULL; +} + +static void merge_large_blocks_to_reserve(mm_reserve_t* r) +{ + size_t used_end = r->base + r->used; + block_t *merge = get_large_block_end_at (used_end); + while (merge != NULL) + { +#ifndef NDEBUG + num_free_blocks--; +#endif + used_end -= merge->header; + merge = get_large_block_end_at (used_end); + } + r->used = used_end - r->base; + return; +} + + +static void new_reserve (void* base, size_t rsize) +{ + mm_reserve_t *reserve = (mm_reserve_t*) base; + size_t head_size = sizeof(mm_reserve_t); + reserve->base = (size_t)(base) + head_size; + reserve->used = 0; + reserve->size = rsize - head_size; + reserve->next = reserve_list; + reserve_list = reserve; +} + +static block_t* alloc_from_reserve(size_t bsize) +{ + mm_reserve_t* r = reserve_list; + size_t ret = 0; + while (r) + { + if (r->size - r->used >= bsize) + { + ret = r->base + r->used; + r->used += bsize; + break; + } + r = r->next; + } + return (block_t*)ret; +} + +static bool adding_reserve = false; +static size_t chunk_size = initial_reserve_size; +static const size_t guard_size = 0x8000ULL; + +static int add_reserve (size_t rsize) +{ + void* base = NULL; + if(adding_reserve) + return 0; + chunk_size = chunk_size > rsize? chunk_size : rsize; + // this will call back to emalloc and efree. + // set the flag to avoid infinite loop + adding_reserve = true; + //!TODO + //create a separate internal API to remove circular calls + int ret = sgx_mm_alloc(NULL, chunk_size + 2*guard_size, SGX_EMA_RESERVE, + NULL, NULL,&base); + if (ret) + return ret; + ret = sgx_mm_alloc((void*)((size_t)base + guard_size), chunk_size, + SGX_EMA_COMMIT_ON_DEMAND, NULL, NULL,&base); + if(ret) + return ret; + + new_reserve(base, chunk_size); + sgx_mm_commit(base, rsize); + chunk_size = chunk_size * 2; //double next time + if (chunk_size > max_emalloc_size) + chunk_size = max_emalloc_size; + + adding_reserve = false; + + return 0; +} + +void* alloc_from_meta(size_t bsize) +{ + if (meta_used + bsize> META_RESERVE_SIZE) return NULL; + block_t* b = (block_t*) (&meta_reserve[meta_used]); + meta_used += bsize; + b->header = bsize | alloc_mask; + return block_to_payload(b); +} + +void emalloc_init() +{ + for (int i = 0; i < num_exact_list; i++) + { + exact_block_list[i] = NULL; + } + if (add_reserve(initial_reserve_size)) abort(); +} + +// Single thread only. +// Caller holds mm_lock +void* emalloc(size_t size) +{ + size_t bsize = ROUND_TO(size + header_size, exact_match_increment); + if (bsize < min_block_size) + bsize = min_block_size; + if(adding_reserve) // called back from add_reserve + return alloc_from_meta(bsize); + + block_t* b = get_free_block(bsize); + + if (b!= NULL) + { + b->header = bsize | alloc_mask; + return block_to_payload(b); + } + + b = alloc_from_reserve (bsize); + if (!b) + { + size_t new_reserve_size = + ROUND_TO(bsize + sizeof(mm_reserve_t), + initial_reserve_size); + if (add_reserve(new_reserve_size)) + return NULL; + b = alloc_from_reserve(bsize); + if(!b)//should never happen + return NULL; + } + + b->header = bsize | alloc_mask; + return block_to_payload(b); +} + + +static block_t* reconfigure_block(block_t* b){ + b->header = b->header & size_mask; + b->next_prev[0] = NULL; + if (b->header > min_block_size) + b->next_prev[1] = NULL; + + b = possibly_merge(b); + return b; +} +/* + * This is an internal interface only used + * by emm, intentionally crash for any error or + * inconsistency + */ +void efree(void* payload) +{ + block_t *b = payload_to_block(payload); + size_t bstart = (size_t)b; + size_t bsize = block_size(b); + if (bstart < (size_t)(&meta_reserve[META_RESERVE_SIZE]) + && bstart + bsize >(size_t)(&meta_reserve[0])) + { + if (adding_reserve) + { //we don't expect a lot of free blocks allocated + // in meta reserve. Do nothing now + return; + } + else + abort(); + } + // normal blocks + mm_reserve_t* r = find_used_in_reserve((size_t)b, block_size(b)); + if (!r) + abort(); + b = reconfigure_block(b); + size_t end = block_end(b); + if ((end - r->base) == r->used) + { + r->used -= b->header; + merge_large_blocks_to_reserve(r); + return; + } + + put_free_block(b); + return; +} + diff --git a/sdk/emm/include/emalloc.h b/sdk/emm/include/emalloc.h new file mode 100644 index 000000000..bfd712fa9 --- /dev/null +++ b/sdk/emm/include/emalloc.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2011-2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __SGX_EMALLOC_H__ +#define __SGX_EMALLOC_H__ +#include +#define ROUND_TO(x, align) ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) +#define TRIM_TO(x, align) ((size_t)(x) & (size_t)(~(align-1))) +#define MIN(x, y) (((x)>(y))?(y):(x)) +#define MAX(x, y) (((x)>(y))?(x):(y)) + +void emalloc_init(); +void* emalloc(size_t); +void efree(void* ptr); +#endif + diff --git a/sdk/emm/sgx_mm.c b/sdk/emm/sgx_mm.c index 2a5ad5c64..a7cf0e86e 100644 --- a/sdk/emm/sgx_mm.c +++ b/sdk/emm/sgx_mm.c @@ -34,14 +34,15 @@ #include #include "sgx_mm.h" #include "ema.h" +#include "emalloc.h" #include "sgx_mm_rt_abstraction.h" extern ema_root_t g_user_ema_root; extern ema_root_t g_rts_ema_root; #define LEGAL_ALLOC_PAGE_TYPE (SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PAGE_TYPE_SS_FIRST | SGX_EMA_PAGE_TYPE_SS_REST) -#define TRIM_TO(x, align) ((x) & ~(align-1)) sgx_mm_mutex *mm_lock = NULL; - +size_t mm_user_base = 0; +size_t mm_user_end = 0; //!FIXME: assume user and system EMAs are not interleaved // user EMAs are above the last system EMA int mm_alloc_internal(void *addr, size_t size, int flags, @@ -81,7 +82,7 @@ int mm_alloc_internal(void *addr, size_t size, int flags, if(sgx_mm_mutex_lock(mm_lock)) return EFAULT; - if (ema_root_empty(&g_rts_ema_root)){ + if (mm_user_base == 0){ //the rts is not initialized status = EFAULT; goto unlock; @@ -183,6 +184,12 @@ int sgx_mm_alloc(void *addr, size_t size, int flags, void *private, void **out_addr) { if (flags & SGX_EMA_SYSTEM) return EINVAL; + if(addr) + { + size_t tmp = (size_t)addr; + if (tmp >= mm_user_end || tmp < mm_user_base) + return EPERM; + } return mm_alloc_internal(addr, size, flags, handler, private, out_addr, &g_user_ema_root); } @@ -435,8 +442,11 @@ int sgx_mm_enclave_pfhandler(const sgx_pfinfo *pfinfo) return ret; } -void sgx_mm_init(void) +void sgx_mm_init(size_t user_base, size_t user_end) { mm_lock = sgx_mm_mutex_create(); + mm_user_base = user_base; + mm_user_end = user_end; sgx_mm_register_pfhandler(sgx_mm_enclave_pfhandler); + emalloc_init(); } diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 1680c754c..45636628e 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -79,7 +79,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); extern "C" int init_segment_emas(void* enclave_base); extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta); -extern "C" void sgx_mm_init(); +extern "C" void sgx_mm_init(size_t, size_t); // init_enclave() // Initialize enclave. // Parameters: @@ -269,12 +269,21 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) g_enclave_state = ENCLAVE_INIT_DONE; if (EDMM_supported) { - sgx_mm_init(); + //!TODO take user base and size from config + layout_t* last_layout = (layout_t*)(g_global_data.layout_table + g_global_data.layout_entry_num - 1); + if(IS_GROUP_ID(last_layout->group.id)) return SGX_ERROR_UNEXPECTED; + layout_entry_t *last_entry = &last_layout->entry; + size_t user_base = last_entry->rva + g_enclave_base; + size_t user_end = user_base + (((size_t)last_entry->page_count) << SE_PAGE_SHIFT); + assert(last_entry->si_flags == 0 && user_end == g_enclave_size + g_enclave_base); //last guard pages + user_base += 0x10000ULL; //reserve guard page, same number used in ema_init.c + if(user_base>=user_end) + return SGX_ERROR_UNEXPECTED; + sgx_mm_init(user_base, user_end); void* enclave_start = (void*)&__ImageBase; if (init_segment_emas(enclave_start)) return SGX_ERROR_UNEXPECTED; - int ret = init_rts_contexts_emas((layout_t*)g_global_data.layout_table, - (layout_t*)(g_global_data.layout_table + g_global_data.layout_entry_num), 0); + int ret = init_rts_contexts_emas((layout_t*)g_global_data.layout_table, last_layout, 0); if (ret != SGX_SUCCESS) { return SGX_ERROR_UNEXPECTED; } From 139aabbb0b1d764b8cbb233037db7c501cc3208d Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 26 May 2022 22:46:12 -0500 Subject: [PATCH 22/96] Update SGX_EMM.md to include sgx_mm_init API Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/design_docs/SGX_EMM.md | 55 +++++++++++++++++++++------------- sdk/emm/include/emm_private.h | 11 +++++++ 2 files changed, 46 insertions(+), 20 deletions(-) diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md index 8c7c44a3c..63e2c88a7 100644 --- a/sdk/emm/design_docs/SGX_EMM.md +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -554,31 +554,43 @@ Intel SGX SDK) before any other clients can use it. Therefore, code and data of the memory manager will be part of initial enclave image that are loaded with EADD before EINIT, and as a part of the trusted runtime. -The trusted runtime should enumerate all initial committed regions (code, -data, heap, stack, TCS, and SSA), and call the EMM internal APIs to set up -initial entries in the EMA list to track existing regions and mark some -of them as not modifiable by EMM public APIs. The runtime also ensures there is -enough reserved space on the heap for EMM to create the initial EMA list and -the entries. Once initialized, the memory manager can reserve its own -space for future expansion of the EMA list, and special EMAs to hold -EMA objects. To keep it simple, the expansion can be done eagerly: commit -more pages for EMA list once unused committed space in the EMA List Region -below certain threshold. - -Alternative option: At build time, the enclave signing tool can precalculate -and fill in EMA entries that hold info on initial regions to be committed by -EADD during enclave load. The calculated start addresses in these EMAs can be -relative to enclave secs->base. The runtime can patch those entries at -initialization by adding secs->base. The EMM can directly use those EMAs as -the initial entries of the EMA list. It only needs to reserve and commit -a number of additional pages for future EMA list expansion. +To initialize EMM internals, the trusted runtime should first invoke sgx_mm_init, +passing in an address range available for non-system or so-called user allocations. +``` +/* + * Initialize the EMM internals and reserve the whole range available for user + * allocations via the public sgx_mm_alloc API. This should be called before + * any other APIs invoked. The runtime should not intend to allocate any subregion + * in [user_start, user_end) for system usage, i.e., the EMM will fail any allocation + * request with SGX_EMA_SYSTEM flag in this range and return an EINVAL error. + * @param[in] user_start The start of the user address range, page aligned. + * @param[in] user_end The end (exclusive) of the user address range, page aligned. + */ +void sgx_mm_init(size_t user_start, size_t user_end); +``` + +The EMM consumes some minimal amount of memory to store the EMA objects for +book keeping of all allocations. During initialization, the EMM reserves an initial area +in the user range for those internal use. And it would allocate more of such reserves on +demand as EMAs created for allocation requests and the active reserves run out. The size +of the user range accomodate this internal consumption overhead, which can be estimated as +the total size of all regions to be tracked (both system and expected user allocations) +divided by 2^14. At runtime, in case the EMM could not find space to allocate EMA objects +then its API would return ENOMEM. + +After initialization, the trusted runtime should enumerate all initial committed regions (code, +data, heap, stack, TCS, and SSA), and call the EMM private APIs to set up +initial entries in the EMA list to track existing regions. These regions +are typically created by the enclave loader at predetermined locations and +some are loaded with content from the enclave image. Thus it's necessary to +reserve their ranges this way so that they won't be modifiable by EMM public APIs. ### EMM Private APIs for Trusted Runtimes These private APIs can be used by the trusted runtime to reserve and allocate regions not accessible from public APIs. They have the identical signature -as the public API counterparts and replace "sgx_mm_" prefix with "ema_" prefix. -The main difference is that the private ema_alloc allows an extra flag +as the public API counterparts and replace "sgx_mm_" prefix with "mm_" prefix. +The main difference is that the private mm_alloc allows an extra flag SGX_EMA_SYSTEM passed in. ``` @@ -695,3 +707,6 @@ file), any reserved region for special purposes, e.g., minimal heap, stack, TCS areas, SSAs for expected minimal number of threads, etc. The runtime would read those info to populate the initial EMAs described in the section above on [Support for EMM Initialization](#support-for-emm-initialization) +The memory layout can also contain an entry for the user range mentioned +above if the enclave intends to dynamically allocate and manage some regions +using the EMM public APIs. diff --git a/sdk/emm/include/emm_private.h b/sdk/emm/include/emm_private.h index 9c9792727..9fcf43406 100644 --- a/sdk/emm/include/emm_private.h +++ b/sdk/emm/include/emm_private.h @@ -40,6 +40,17 @@ extern "C" { #endif +/* + * Initialize the EMM internals and reserve the whole range available for user + * allocations via the public sgx_mm_alloc API. This should be called before + * any other APIs invoked. The runtime should not intend to allocate any subregion + * in [user_start, user_end) for system usage, i.e., the EMM will fail any allocation + * request with SGX_EMA_SYSTEM flag in this range and return an EINVAL error. + * @param[in] user_start The start of the user address range, page aligned. + * @param[in] user_end The end (exclusive) of the user address range, page aligned. + */ +void sgx_mm_init(size_t user_start, size_t user_end); + #define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system */ /* * Initialize an EMA. This can be used to setup EMAs to account regions that From ec544fea30538e51024aa588ce33babfc85ae3ab Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 29 May 2022 12:46:57 -0700 Subject: [PATCH 23/96] enclave_common: simplfy permissions change Kernel(V5) allows emodpr to PROT_NONE directly Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_mm_ocalls.cpp | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 758a75bc2..4deda461f 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -359,15 +359,6 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f if (type_to != type_from) return EINVAL; - // type_to == type_from - // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R - // separate mprotect is needed to change pte.R to pte.NONE - if (prot_to == prot_from && prot_to == PROT_NONE) - { - ret = mprotect((void *)addr, length, prot_to); - if (ret == -1) - return errno; - } if (prot_to == prot_from) { @@ -384,12 +375,8 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f { return EINVAL; } - //EACCEPT needs at least pte.R, PROT_NONE case done above. - if (prot_to != PROT_NONE) - { - ret = mprotect((void *)addr, length, prot_to); - if (ret == -1) - return errno; - } + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return errno; return ret; } From a6af9b5207ef5ded726bf30ad1c4af776143fde7 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Sun, 29 May 2022 12:21:42 -0700 Subject: [PATCH 24/96] emalloc: misc fixes commit EPC of reserves before first use fix compile error for gcc 7.5 style and typo fixes Signed-off-by: Xiaofeng Xu <20158212+xxu36@users.noreply.github.com> Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/emalloc.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/sdk/emm/emalloc.c b/sdk/emm/emalloc.c index a0134e5d8..10f594191 100644 --- a/sdk/emm/emalloc.c +++ b/sdk/emm/emalloc.c @@ -55,7 +55,7 @@ static size_t meta_used; * initial reserve size * TODO: make it configurable by RTS */ -static const size_t initial_reserve_size = 0x10000ULL; +#define initial_reserve_size 0x10000ULL // this is enough for bit map of an 8T EMA static const size_t max_emalloc_size = 0x10000000ULL; @@ -80,9 +80,9 @@ typedef struct _block } block_t; #define num_exact_list 0x100 -static const size_t header_size = sizeof(uint64_t); -static const size_t exact_match_increment = 0x8; -static const size_t min_block_size = 0x10; //include 8-byte header +size_t header_size = sizeof(uint64_t); +#define exact_match_increment 0x8 +#define min_block_size 0x10 //include 8-byte header static const size_t max_exact_size = min_block_size + exact_match_increment * (num_exact_list -1); static block_t* exact_block_list[num_exact_list]; @@ -90,8 +90,8 @@ static block_t* exact_block_list[num_exact_list]; // 1 == allocated/in-use, 0 == free static const uint64_t alloc_mask = 1ULL; //block size align to 8 bytes -static const uint64_t size_mask = ~((uint64_t)(exact_match_increment-1)); -// We don't expect many latge blocks +uint64_t size_mask = ~((uint64_t)(exact_match_increment-1)); +// We don't expect many large blocks // !TODO: optimize if needed static block_t* large_block_list = NULL; @@ -152,7 +152,7 @@ static mm_reserve_t* find_used_in_reserve(size_t addr, size_t size) static size_t get_list_idx(size_t size) { - assert(size%exact_match_increment ==0); + assert(size % exact_match_increment == 0); if(size < min_block_size) return 0; size_t list = (size - min_block_size)/exact_match_increment; assert(list < num_exact_list); @@ -400,16 +400,16 @@ static int add_reserve (size_t rsize) //!TODO //create a separate internal API to remove circular calls int ret = sgx_mm_alloc(NULL, chunk_size + 2*guard_size, SGX_EMA_RESERVE, - NULL, NULL,&base); + NULL, NULL, &base); if (ret) return ret; ret = sgx_mm_alloc((void*)((size_t)base + guard_size), chunk_size, - SGX_EMA_COMMIT_ON_DEMAND, NULL, NULL,&base); + SGX_EMA_COMMIT_ON_DEMAND, NULL, NULL, &base); if(ret) return ret; - new_reserve(base, chunk_size); sgx_mm_commit(base, rsize); + new_reserve(base, chunk_size); chunk_size = chunk_size * 2; //double next time if (chunk_size > max_emalloc_size) chunk_size = max_emalloc_size; @@ -419,7 +419,7 @@ static int add_reserve (size_t rsize) return 0; } -void* alloc_from_meta(size_t bsize) +static void* alloc_from_meta(size_t bsize) { if (meta_used + bsize> META_RESERVE_SIZE) return NULL; block_t* b = (block_t*) (&meta_reserve[meta_used]); @@ -498,6 +498,8 @@ void efree(void* payload) if (adding_reserve) { //we don't expect a lot of free blocks allocated // in meta reserve. Do nothing now + assert (bstart >= (size_t)(&meta_reserve[0])); + assert (bstart + bsize <= (size_t)(&meta_reserve[META_RESERVE_SIZE])); return; } else From ca27dd4f032d02f15993177ac519978d9301c206 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 6 Jun 2022 14:35:26 -0700 Subject: [PATCH 25/96] urts/user_handler: allow OCalls/Exception callback without OCall tables This is necessary to enable builtin OCALL and commit-on-demand for emalloc reserves Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/urts/linux/sig_handler.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/psw/urts/linux/sig_handler.cpp b/psw/urts/linux/sig_handler.cpp index b84d0a227..56fcc7fac 100644 --- a/psw/urts/linux/sig_handler.cpp +++ b/psw/urts/linux/sig_handler.cpp @@ -304,14 +304,14 @@ static int sgx_urts_vdso_handler(long rdi, long rsi, long rdx, long ursp, long r { //need to handle exception here __u64 *user_data = (__u64*)run->user_data; - void *ocall_table = reinterpret_cast(user_data[0]); CTrustThread* trust_thread = reinterpret_cast(user_data[1]); - if(ocall_table == NULL || trust_thread == NULL) + if (trust_thread == NULL) { run->user_data = SGX_ERROR_UNEXPECTED; return 0; } + void *ocall_table = reinterpret_cast(user_data[0]); unsigned int ret = do_ecall(ECMD_EXCEPT, ocall_table, NULL, trust_thread); if(SGX_SUCCESS == ret) { @@ -337,14 +337,14 @@ static int sgx_urts_vdso_handler(long rdi, long rsi, long rdx, long ursp, long r else { __u64 *user_data = (__u64*)run->user_data; - sgx_ocall_table_t *ocall_table = reinterpret_cast(user_data[0]); CTrustThread* trust_thread = reinterpret_cast(user_data[1]); - if(ocall_table == NULL || trust_thread == NULL) + if (trust_thread == NULL) { run->user_data = SGX_ERROR_UNEXPECTED; return 0; } + sgx_ocall_table_t *ocall_table = reinterpret_cast(user_data[0]); auto status = stack_sticker((unsigned int )rdi, ocall_table, (void *)rsi, trust_thread, trust_thread->get_tcs()); if(status == (int)SE_ERROR_READ_LOCK_FAIL) From 5d778d80c1ee1184d630178ac1ab07cc3683e500 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Wed, 8 Jun 2022 13:21:52 -0700 Subject: [PATCH 26/96] sdk/emm: update design doc Added a figure for overall arch Restructure the introduction section Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/design_docs/SGX_EMM.md | 34 ++- sdk/emm/design_docs/images/SGX2_emm_arch.svg | 286 +++++++++++++++++++ 2 files changed, 310 insertions(+), 10 deletions(-) create mode 100644 sdk/emm/design_docs/images/SGX2_emm_arch.svg diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md index 63e2c88a7..9870b1786 100644 --- a/sdk/emm/design_docs/SGX_EMM.md +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -1,7 +1,7 @@ SGX Enclave Memory Manager ================================= -## Motivation ## +## Introduction ## An enclave's memory is backed by a special reserved region in RAM, called Enclave Page Cache (EPC). Enclave memory management tasks include @@ -16,21 +16,35 @@ address ranges, and modify attributes of the reserved/committed pages. For details of specific memory management related flows, please refer to [the SGX EDMM driver API spec](SGX_EDMM_driver_interface.md). -The public EMM APIs defined here are most likely invoked by some intermediate -runtime level components for specific usages, such as dynamic heap/stack, mmap, -mprotect, higher level language JIT compiler, etc. + +As shown in the figure below, the EMM provides a set of public APIs to be invoked +by upper layer components for specific usages, such as dynamic heap/stack, mmap, +mprotect, higher level language JIT compiler, etc. Another goal of this design is +to make the EMM implementation portable across different runtimes such as +Intel SGX SDK and OpenEnclave. To achieve that, it requires the runtimes to implement +a runtime abstraction layer with APIs defined in this document. The main purpose of +the abstraction layer is to provide an OCall bridge to the enclave common loader outside +the enclave, which interacts with the OS to support the EDMM flows. + +![SGX2 EMM architecture](images/SGX2_emm_arch.svg) + **Note:** As the EMM is a component inside enclave, it should not have direct OS dependencies. -However, the design proposed in this document only considers call flows and semantics for Linux. +However, the design proposed in this document only considers call flows and semantics for Linux. +And the OCall implementation in enclave common loader is currently specified for Linux only though +similar implementation is possible on other OSes. + ## User Experience ## -**Runtime Abstraction** +**Porting EMM to Different Runtimes** + +To port EMM implementation portable across different SGX enclave runtimes, e.g., the Open Enclave and Intel SGX SDKs, +the runtimes needs to implement the runtime abstraction layer APIs. These APIs encapsulate runtime specific support +such as making OCalls, registering callbacks on page faults, on which the EMM implementation relies to collaborate with the OS. -To make the EMM implementation portable across different SGX enclave runtimes, e.g., the Open Enclave and Intel SGX SDKs, -this document also proposes a set of abstraction layer APIs for the runtimes to implement. The runtime abstraction -layer APIs encapsulate runtime specific support such as making OCalls, registering callbacks on page faults, on which -the EMM implementation relies to collaborate with the OS. +Additionally, the runtime needs to properly initialize the EMM and reserve its own regions using the private APIs +as described in the section on [Support for EMM Initialization](#support-for-emm-initialization). The EMM source code will be hosted and maintained in the [Intel SGX PSW and SDK repository](https://github.com/intel/linux-sgx). The EMM can be built as a separate library then linked into any runtime that implements the abstraction layer APIs. diff --git a/sdk/emm/design_docs/images/SGX2_emm_arch.svg b/sdk/emm/design_docs/images/SGX2_emm_arch.svg new file mode 100644 index 000000000..05f6904cf --- /dev/null +++ b/sdk/emm/design_docs/images/SGX2_emm_arch.svg @@ -0,0 +1,286 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-2 + + Rectangle.30 + Dynamic Code Loader / Protected Code Loader / SGX-LKL / other... + + + + + Dynamic Code Loader / Protected Code Loader / SGX-LKL / other high level user + + Rectangle.1 + SGX MM public APIs sgx_mm_{alloc | dealloc | commit | uncommi... + + SGX MM public APIs sgx_mm_{alloc | dealloc | commit | uncommit | commit_data| modify_type |modify_permissions} + + Rectangle.2 + mmap + + + + + mmap + + Rectangle.3 + Malloc (heap manager) + + + + + Malloc (heap manager) + + Dynamic connector + + + + Dynamic connector.5 + + + + Dynamic connector.6 + + + + Rectangle.7 + Reserved_memory (Intel) + + + + + Reserved_memory (Intel) + + Dynamic connector.8 + + + + Rectangle.9 + SGX MM implementation + + + + + SGX MM implementation + + Rectangle.10 + SGX runtime abstraction layer APIs sgx_mm_{un|}register_pfhan... + + SGX runtime abstraction layer APIs sgx_mm_{un|}register_pfhandler, sgx_mm_is_within_enclave, sgx_mm_{alloc|modify}_ocall sgx_mutex-* + + Rectangle.11 + mprotect + + + + + mprotect + + Dynamic connector.12 + + + + Rectangle.22 + SGX MM Private APIs mm_{init | alloc | dealloc | commit | unc... + + SGX MM Private APIs mm_{init | alloc | dealloc | commit | uncommit | commit_data| modify_type |modify_permissions} + + Rectangle.24 + pthread + + + + + pthread + + Dynamic connector.29 + + + + Rectangle.16 + Intel/OE/other runtime Abstraction Impl + + + + + Intel/OE/other runtime Abstraction Impl + + Dynamic connector.22 + + + + Sheet.25 + + + + Sheet.26 + Inside Enclave + + Inside Enclave + + Sheet.27 + Outside Enclave + + Outside Enclave + + Rectangle.28 + SGX Enclave Common Loader + + + + + SGX Enclave Common Loader + + Rectangle.45 + OCall Impl + + + + + OCall Impl + + Dynamic connector.49 + + + + Rectangle.52 + Kernel + + Kernel + + Rectangle.36 + SGX driver + + + + + SGX driver + + Rectangle.37 + vDSO SGX interface wrapper + + + + + vDSO SGX interface wrapper + + Dynamic connector.35 + + + + Rectangle.36 + Intel/OE/other runtime Initialization routines + + + + + Intel/OE/other runtime Initialization routines + + Dynamic connector.37 + + + + Rectangle.53 + common + + + + + common + + Rectangle + Runtime specific + + + + + Runtime specific + + Rectangle.40 + Maybe common + + + + + Maybe common + + From 68048bd2f94453d16a83a341260f768ea2f1b5ba Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 14 Jun 2022 19:50:29 -0700 Subject: [PATCH 27/96] psw/enclave_common: remove some unneeded wording from enclave_alloc api docs Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_enclave_common.h | 3 +-- psw/enclave_common/sgx_mm_ocalls.cpp | 2 +- sdk/emm/design_docs/SGX_EMM.md | 2 +- sdk/emm/include/sgx_mm_rt_abstraction.h | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/psw/enclave_common/sgx_enclave_common.h b/psw/enclave_common/sgx_enclave_common.h index 480864c6c..e25b18aab 100644 --- a/psw/enclave_common/sgx_enclave_common.h +++ b/psw/enclave_common/sgx_enclave_common.h @@ -251,7 +251,7 @@ bool COMM_API enclave_set_information( /* * Call OS to reserve region for EAUG, immediately or on-demand. * - * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. * @param[in] flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. @@ -259,7 +259,6 @@ bool COMM_API enclave_set_information( * translate following additional bits to proper parameters invoking mmap or other SGX specific * syscall(s) provided by the kernel. * The flags param of this interface should include exactly one of following for committing mode: - * - SGX_EMA_RESERVE: kernel map an address range with PROT_NONE, no EPC EAUGed. * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, * kernel is given a hint to EAUG EPC pages for the area as soon as possible. * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 4deda461f..707b6b354 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -8,7 +8,7 @@ using namespace std; /* * Call OS to reserve region for EAUG, immediately or on-demand. * - * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. * @param[in] flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md index 9870b1786..b2e1e5aeb 100644 --- a/sdk/emm/design_docs/SGX_EMM.md +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -471,7 +471,7 @@ bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); /* * Call OS to reserve region for EAUG, immediately or on-demand. * - * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. * @param[in] flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. diff --git a/sdk/emm/include/sgx_mm_rt_abstraction.h b/sdk/emm/include/sgx_mm_rt_abstraction.h index e6b519f69..520618972 100644 --- a/sdk/emm/include/sgx_mm_rt_abstraction.h +++ b/sdk/emm/include/sgx_mm_rt_abstraction.h @@ -70,7 +70,7 @@ extern "C" { /* * Call OS to reserve region for EAUG, immediately or on-demand. * - * @param[in] addr Desired page aligned start address, NULL if no desired address. + * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. * @param[in] flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. From e3865c67c36f5157c593b5b5cdb5124b6f1ee7d0 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 27 Jun 2022 17:53:46 -0700 Subject: [PATCH 28/96] EMM: update driver interface for upstream kernel Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- .../design_docs/SGX_EDMM_driver_interface.md | 80 ++++++++++--------- 1 file changed, 43 insertions(+), 37 deletions(-) diff --git a/sdk/emm/design_docs/SGX_EDMM_driver_interface.md b/sdk/emm/design_docs/SGX_EDMM_driver_interface.md index 5c67d59d7..64e307906 100644 --- a/sdk/emm/design_docs/SGX_EDMM_driver_interface.md +++ b/sdk/emm/design_docs/SGX_EDMM_driver_interface.md @@ -5,13 +5,14 @@ SGX EDMM Linux Driver Interface Design This document describes possible Linux driver interfaces to facilitate discussions among SGX runtime implementors (e.g., https://github.com/openenclave/openenclave/pull/3639) on supporting different SGX EDMM flows. -Although interfaces described here are inspired to be as likely as possible a candidate for future Linux kernel adoption, they are not intended to be a proposal for kernel implementation and are assumed to be implemented as an OOT driver. We hope from discussions enabled by this document, requirements and usage models can be identified to help shape future kernel interfaces. +Although interfaces described here are inspired to be as close as possible to a future Linux kernel APIs, they are not intended to be a description or proposal for kernel implementation. We hope from discussions enabled by this document, requirements and usage models can be identified to help shape future kernel interfaces. -Without losing generality, this document may describe how upper layer user space components would use the interfaces. However, details of design and implementation of those components are intentionally left out. The PR mentioned above would provide more contexts on other user space components and their relationships. Further, for those who may want to learn basic principles behind Intel(R) SGX EDMM instructions and how they are typically used, please refer to following references: +Without losing generality, this document may describe how upper layer user space components would use the interfaces. However, details of design and implementation of those components are intentionally left out. The PR mentioned above would provide more contexts on other user space components and their relationships. Further, for those who may want to learn basic principles behind Intel(R) SGX EDMM instructions and how they are typically used, please consult following references: - [HASP@ISCA 2016: 11:1-11:9](https://caslab.csl.yale.edu/workshops/hasp2016/HASP16-17.pdf) - [Intel SDM Vol.4, Ch.36-42](https://software.intel.com/content/www/us/en/develop/articles/intel-sdm.html) For design and implementation of current SGX1 support in upstream Linux kernel (merged in 5.11RC), please refer to [this patch series](https://lwn.net/Articles/837121/) +**Update on 6/27/2022:** At the time of this document creation, Linux kernel support for EDMM is not actively developed. The APIs described here are not the same as those actual implementation to be accepted in mainline kernel (as was expected and stated above). For current candidate patches for EDMM support in Linux kernel, please review at [this LKML thread](https://lore.kernel.org/lkml/YnrllJ2OqmcqLUuv@kernel.org/T/). However, all the usages and flows described here are supported by the upstream candidate except for the MAP_POPULATE flage for "direct allocation flow". ## Basic EDMM flows @@ -19,50 +20,50 @@ SGX EDMM instructions support dynamic EPC page allocation/deallocation for encla **Note:** This document is Linux specific. The term "kernel" and "kernel space" are used in this document when general Linux kernel space actions are described whether implemented in an OOT driver or in kernel tree. Kernel specific implementation details will be explicitly stated as "future kernel" or "kernel patches". And implementation details such as OCalls issued by enclaves, ETRACK and inter-processor interrupts (IPIs) issued in kernel are generally omitted for brevity. -- Allocate a new page at an address in ELRANGE of an enclave. +- Allocate a new page at an address in ELRANGE of an enclave. - This can be an explicit syscall or triggered by a page fault (#PF) when an unavailable page is accessed. - Kernel issues EAUG for the page. All new pages should have RW permissions initially. - The enclave then issues EACCEPT. - Deallocate an existing page - - Enclave signals via a syscall to kernel that a page is no longer in use. - - Kernel issues EMODT to change page type to PT_TRIM + - Enclave signals via a syscall to kernel that a page is no longer in use. + - Kernel issues EMODT to change page type to PT_TRIM - The enclave issues EACCEPT - Kernel issues EREMOVE on the page at appropriate time -- Change page type, for example, from PT_REG to PT_TCS or PT_TRIM. +- Change page type, for example, from PT_REG to PT_TCS or PT_TRIM. - Enclave requests via a syscall to kernel to change type of a page from PT_REG to PT_TCS/PT_TRIM - Kernel issues EMODT to change page type to PT_TCS/PT_TRIM - The enclave issues EACCEPT - Extend EPCM permissions of a page, e.g., R->RW/RX - Enclave issues EMODPE for the page - - Enclave requests via a syscall that the kernel update the page table permissions to match. - - Kernel modifies permissions in PTE + - Enclave requests via a syscall that the kernel update the page table permissions to match. + - Kernel modifies permissions in PTE - Reduce EPCM permissions of a page, e.g. RW/RX->R - Enclave requests that the kernel restrict the permissions of an EPC page - - Kernel performs EMODPR, updates page tables to match the new EPCM permissions, + - Kernel performs EMODPR, updates page tables to match the new EPCM permissions, - Enclave issues EACCEPT - + **Note:** Flows related to CET support inside enclave will be considered as a future enhancement. -Future kernel may extend mmap and mprotect syscalls to support SGX EDMM usages. But we can't add/change syscall interfaces from an out-of-tree driver. So, in this proposal for possible driver implementation, we reuse mmap for dynamic enclave memory mapping and expose a new IOCTL, sgx_enclave_mprotect, for enclave page modification. - +Future kernel may extend mmap and mprotect syscalls to support SGX EDMM usages. But we can't add/change syscall interfaces from an out-of-tree driver. So, in this proposal for possible driver implementation, we reuse mmap for dynamic enclave memory mapping and expose a new IOCTL, sgx_enclave_mprotect, for enclave page modification. + ## mmap -After enclave is initialized (EINIT IOCTL done), the standard Linux mmap syscall can be used to create a new mapping configured for dynamically allocating enclave memory using EAUG. Following comments are specific to SGX EDMM usages, please refer to [mmap man page](https://man7.org/linux/man-pages/man2/mmap.2.html) for generic definitions. +After enclave is initialized (EINIT IOCTL done), the standard Linux mmap syscall can be used to create a new mapping configured for dynamically allocating enclave memory using EAUG. Following comments are specific to SGX EDMM usages, please refer to [mmap man page](https://man7.org/linux/man-pages/man2/mmap.2.html) for generic definitions. ### Remarks -- To create a mapping for dynamic enclave memory allocation, mmap must be called with an open enclave file descriptor and with PROT_READ | PROT_WRITE for protection flags. +- To create a mapping for dynamic enclave memory allocation, mmap must be called with an open enclave file descriptor and with PROT_READ | PROT_WRITE for protection flags. - Enclave must issue EACCEPT for the pages after mmap before it can modify the content of the pages and extend/reduce permissions in secure way. - The offset in mmap parameter must be zero for enclaves. - MAP_* flags must be MAP_SHARED | MAP_FIXED masked with optional flags: - - MAP_POPULATE: hint for kernel to EAUG pages as soon as possible. + - MAP_POPULATE: hint for kernel to EAUG pages as soon as possible. - MAP_GROWSDOWN: used for stacks. The mapping will grow down to the next mapping. - If and only if the address range are within the ELRANGE of the enclave associated with the file descriptor, the mapping will be created. However, user space should not expect EAUG be done by the mmap call. - The kernel can choose EAUG pages immediately (likely for MAP_POPULATE), or EAUG pages upon page faults within the VMA, similar to how kernel would allocate regular memory. -- The kernel will assume the newly requested mapping is for dynamic allocation and initial permissions must be RW until user space request changes later. +- The kernel will assume the newly requested mapping is for dynamic allocation and initial permissions must be RW until user space request changes later. **Implementation Notes:** Current [SGX kernel patches](https://patchwork.kernel.org/project/intel-sgx/patch/20201112220135.165028-11-jarkko@kernel.org/) limit PTE permissions to the EPCM permissions given in SEC_INFO during EADD IOCTL calls. The dynamic allocation mappings should not be subject to those limits. A possible implementation may have these changes: - - sgx_encl_may_map + - sgx_encl_may_map - enforces RW permissions for pages other than those loaded due to EADD or ECREATE. - set up flags to track dynamic pages: type, permissions flag - sgx_vma_mprotect @@ -71,13 +72,15 @@ After enclave is initialized (EINIT IOCTL done), the standard Linux mmap syscall - SELinux policy specific to SGX enclaves - update flags for the dynamic pages -## munmap +**update on 6/27/2022:** The upstream candidate does not yet take hints like MAP_POPULATE, MAP_GROWSDOWN to optimize allocation. However, MAP_POPULATE has been considered and may be added for future. + +## munmap Calling munmap on an enclave page (dynamic allocated or not) has exactly the same effect of calling munmap on a regular RAM page. No sgx specific interface is needed. No behavior changes to current kernel space implementation. ### Remarks - Enclave memory mapings are shared (MAP_SHARED). The mappings in shared processes are kept alive and independently until the process exits - - munmap and closing file descriptors are not required for user space. A dead process automatically releases all mappings and file descriptors. + - munmap and closing file descriptors are not required for user space. A dead process automatically releases all mappings and file descriptors. - Upon all enclave mappings are removed and file handles to the enclave are closed, either by explicit munmap/fclose syscalls or when all hosting apps exited: - The kernel may mark its remaining pages are reclaimable and issue EREMOVE on them any time the kernel deems appropriate. @@ -105,6 +108,9 @@ struct sgx_enclave_mprotect { __u64 prot; }; ``` +**update on 6/27/2022:** The upstream candidate provides separate ioctls interfaces: SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS for EMODPR, +SGX_IOC_ENCLAVE_MODIFY_TYPES for change types to PT_TRIM or PT_TCS, SGX_IOC_ENCLAVE_REMOVE_PAGES for notifying kernel to +perform EREMOVE after eaccept PT_TRIM pages. ### Remarks @@ -157,7 +163,7 @@ trusted_mprotect(..., perms_target, ...){ ## Example advanced flows -More advanced flows can be implemented as combinations of the basic flows. Here we present a few examples. +More advanced flows can be implemented as combinations of the basic flows. Here we present a few examples. ### Dynamic code loading @@ -174,7 +180,7 @@ a more robust and flexible way to load trusted code without those pitfalls. Following are two example sequences in which a dynamic code page is loaded using EACCEPTCOPY on demand when the code page is read for execution at the first time. - + **Dynamic loading with direct allocation** ![SGX2 EACCEPTPY flow-direct EAUG](images/SGX2_eaccept2.svg) @@ -189,7 +195,7 @@ code ahead of time. In that case, the sequence would be as follows in case of di 1. Enclave calls mmap to configure a region in enclave ELRANGE for EAUG 2. Kernel EAUG all pages requested. 3. Enclave EACCEPTCOPYs trusted code from an existing EPC page to the target page, which sets RX permissions in EPCM specified in PageInfo operand. -4. Enclave makes ocall which invokes mprotect syscall to change PTE permissions from RW to RX +4. Enclave makes ocall which invokes mprotect syscall to change PTE permissions from RW to RX ### Lazy dynamic stack expansion An enclave can lazily expand its stacks as follows. @@ -204,16 +210,16 @@ An enclave can lazily expand its stacks as follows. ## Exception Handling -This section focuses on changes around \#PF handling which is affected by the new page states (i.e. states in EPCM) introduced by SGX/EDMM, along with the mechanisms for handling exceptions in enclaves. +This section focuses on changes around \#PF handling which is affected by the new page states (i.e. states in EPCM) introduced by SGX/EDMM, along with the mechanisms for handling exceptions in enclaves. -An exception or interrupt during enclave execution will trigger an enclave exit, i.e., Asynchronous Enclave Exits (AEX). To protect the secrecy of the enclave, SGX CPU at AEX would save the state of certain registers within enclave memory, specifically, the thread's current State Save Area (SSA). Then it loads those registers with fixed values called synthetic state, of which the RIP (Instruction Pointer Register) is always set to the AEP (Asynchronous Exit Pointer) address. The AEP is passed in as an operand for the EENTER instruction and points to a trampoline code sequence which ualtimately invokes the ERESUME instruction to reenter the enclave. +An exception or interrupt during enclave execution will trigger an enclave exit, i.e., Asynchronous Enclave Exits (AEX). To protect the secrecy of the enclave, SGX CPU at AEX would save the state of certain registers within enclave memory, specifically, the thread's current State Save Area (SSA). Then it loads those registers with fixed values called synthetic state, of which the RIP (Instruction Pointer Register) is always set to the AEP (Asynchronous Exit Pointer) address. The AEP is passed in as an operand for the EENTER instruction and points to a trampoline code sequence which ualtimately invokes the ERESUME instruction to reenter the enclave. -As with all non-enclave exception scenarios, the kernel fault handler registered in the Interrupt Descriptor Table (IDT) would be the first in line to handle exceptions for AEX, and it needs to either handle it in kernel space, or if it can't handle, invoke user space exception handler. In both cases, after handlers return, control is tranferred to AEP trampoline, which enventually invokes ERESUME to reenter enclave. +As with all non-enclave exception scenarios, the kernel fault handler registered in the Interrupt Descriptor Table (IDT) would be the first in line to handle exceptions for AEX, and it needs to either handle it in kernel space, or if it can't handle, invoke user space exception handler. In both cases, after handlers return, control is tranferred to AEP trampoline, which enventually invokes ERESUME to reenter enclave. Current kernel implementation (in release 5.11) can invoke user space exception handler in two ways depending on how EENTER and AEP trampoline are managed: 1. Direct EENTER in runtime: the user space runtime manages EENTER, AEP trampoline directly and use Linux signal APIs to register and handle exceptions. - 2. vDSO interface: the user space invokes [__vdso_sgx_enter_enclave](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124), passing in a callback for exception handling, and the vDSO implementation manages EENTER and AEP trampoline. + 2. vDSO interface: the user space invokes [__vdso_sgx_enter_enclave](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124), passing in a callback for exception handling, and the vDSO implementation manages EENTER and AEP trampoline. The direct EENTER method requires signal handling in runtime library which is known to be challenging in Linux environment. Therefore, the vDSO interface is preferred and assumed in following discussion. (Runtime implementing direct EENTER method would have similar flow but the callbacks from vDSO are replaced with Linux signals.) For more details about the new SGX vDSO interface please refer to documentation in the [kernel header file](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124). General sequence is as follows: @@ -233,24 +239,24 @@ Current kernel implementation (in release 5.11) can invoke user space exception ### Fault Handling in Kernel -SGX enclave execution may cause “EPCM Induced #PF”. For those #PFs, SGX enabled CPUs set the SGX bit (bit 15) in Page Fault Error Code (PFEC). It is always generated in the PFEC register if the fault is due to an EPCM attribute mismatch. The kernel #PF handler will only see the faulting address (via CR3) and the PFEC codes on a page fault. It must rely on this information and its own stored information about the address of the fault (VMA and PTE) to make a decision on how to handle the fault. In many cases, the kernel can only issue a signal or call user handler callback registered in the SGX vDSO function with run.function=ERESUME and pass on all relevant exception info. +SGX enclave execution may cause “EPCM Induced #PF”. For those #PFs, SGX enabled CPUs set the SGX bit (bit 15) in Page Fault Error Code (PFEC). It is always generated in the PFEC register if the fault is due to an EPCM attribute mismatch. The kernel #PF handler will only see the faulting address (via CR3) and the PFEC codes on a page fault. It must rely on this information and its own stored information about the address of the fault (VMA and PTE) to make a decision on how to handle the fault. In many cases, the kernel can only issue a signal or call user handler callback registered in the SGX vDSO function with run.function=ERESUME and pass on all relevant exception info. -In addition, a running enclave can lose EPC context due to power events (S3/S4 transitions) or VM being suspended. A page fault on EENTER instruction (either at an initial ecall or at re-entering enclave for exception handling) results in those cases, and the user handler would receive a callback from vDSO with run.function = EENTER. +In addition, a running enclave can lose EPC context due to power events (S3/S4 transitions) or VM being suspended. A page fault on EENTER instruction (either at an initial ecall or at re-entering enclave for exception handling) results in those cases, and the user handler would receive a callback from vDSO with run.function = EENTER. -This table summarizes kernel, vDSO, user handler actions in different fault scenarios related to enclave operations. All exceptions considered here happen inside enclave causing AEX, or at EENTER/ERESUME, so the kernel will convert them to the synchronous callbacks thru vDSO interface as needed. +This table summarizes kernel, vDSO, user handler actions in different fault scenarios related to enclave operations. All exceptions considered here happen inside enclave causing AEX, or at EENTER/ERESUME, so the kernel will convert them to the synchronous callbacks thru vDSO interface as needed. -| Fault Condition | Key #PF PFEC Contents | Kernel/vDSO Action | Untrusted User Handler | +| Fault Condition | Key #PF PFEC Contents | Kernel/vDSO Action | Untrusted User Handler | |---|---|---|---| | Access a page which has been swapped out | #PF where PFEC.P=0 | ELD the page from backing store, ERSUME | N/A | | Access Page Mapped PROT_NONE
(page that the enclave has not mmap'ed) | #PF where PFEC.P=0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | | Access Page Mapped PROT_W (Page had been mmap'ed by enclave, but not EAUG'ed) | #PF where PFEC.P=0 | EAUG and map the page then ERESUME | N/A | -| Page Protection mismatch in PTE| #PF where PFEC.W/R or PFEC.I/D will not match PTE | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Page Protection mismatch in EPCM| #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Access Page with EPCM.Pending | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Page Protection mismatch in PTE| #PF where PFEC.W/R or PFEC.I/D will not match PTE | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Page Protection mismatch in EPCM| #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | +| Access Page with EPCM.Pending | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | | Access Page with EPCM.Modified | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | | Access Page with type PT_TRIM | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | | EENTER with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=EENTER) | return error to app signaling enclave lost
App should reload enclave | -| ERESUME with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler
and will trigger #PF on EENTER | +| ERESUME with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler
and will trigger #PF on EENTER | **Note:** When an error/exception happens when kernel handles a fault on behalf of an enclave, the kernel sees the original fault happened at AEP and would fix it up as a callback to user handler with run.function = ERESUME. For example, in the first case of the table above, a fault on ELD (EPC loss caused by power events) would be fixed up in this way. @@ -265,10 +271,10 @@ To securely handle all faulting scenarios and EDMM flows, in addition to informa * Address Range: Range of Enclave Linear Addresses that are covered by the region * Permissions: Combination of Read, Write, Execute * Page Type: SGX page type of pages in the region - PT_TCS, PT_REG, or PT_TRIM -* State: the state of the region. The state may indicate that the region is in transition. For example is is changing page type or permissions. +* State: the state of the region. The state may indicate that the region is in transition. For example is is changing page type or permissions. * Table of information about the EACCEPT state of each page in the region. This may be a temporary structure which keeps track of pages which are EACCEPTed for operations requiring EACCEPT. This can ensure that the enclave does not EACCEPT a page twice. For example, when a page is EAUG'ed to an enclave linear address, the enclave should only EACCEPT that page once. If the enclave could be convinced to EACCEPT the page twice, then the OS can potentially EAUG two pages at the same enclave linear address and freely swap them by modifying PTEs. -Enclaves should prevent two threads from simultaneously operating on the same region, e.g, trying to EMODPE on a page while permission change is in progress in another thread. One way to ensure this is to use some lock/synchronization mechanism to protect the state of each region, have the second thread wait if page is in transition state. +Enclaves should prevent two threads from simultaneously operating on the same region, e.g, trying to EMODPE on a page while permission change is in progress in another thread. One way to ensure this is to use some lock/synchronization mechanism to protect the state of each region, have the second thread wait if page is in transition state. When an enclave is called after faulting, the enclave can consult its stored memory region states and the ExitInfo.Vector and MISC.EXINFO in SSA to determine what to do with the fault. The following table lists actions on specific page faults. @@ -293,4 +299,4 @@ For Windows, similar collaboration between debugger and runtime can be implement For runtimes using EDDM to load dynamic modules into enclave after EINIT, the runtime needs to signal module loading events to the debugger so that the debugger can load additional symbols for those modules. That can also be implemented using exceptions or pre-defined breakpoints. -**Note:** Kernel does not fixup Debug Exceptions (#DB) and Breakpoints (#BP). +**Note:** Kernel does not fixup Debug Exceptions (#DB) and Breakpoints (#BP). From a2a4b4856da941a183a496982f8424e9611c0e87 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 27 Jun 2022 19:17:54 -0700 Subject: [PATCH 29/96] Enclave Common API for EDMM Adjust Enclave Common API compatible with existing style and definitions: 1) split flags in enclave_alloc to page type and alloc flags 2) return non-OS specific error code 3) add new API enclave_get_features Update EMM ocall definition: 1) Return EFAULT for all ocall failures 2) align sgx_mm_alloc with enclave_alloc to use separate parameters for page type and alloc_flags Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- psw/enclave_common/sgx_enclave_common.cpp | 5 + psw/enclave_common/sgx_enclave_common.h | 137 ++++++++++++----- psw/enclave_common/sgx_mm_ocalls.cpp | 179 +++++++++++++--------- psw/urts/linux/enclave_creator_hw.cpp | 2 +- psw/urts/linux/urts_emm.cpp | 7 +- sdk/emm/README.md | 2 +- sdk/emm/design_docs/SGX_EMM.md | 27 ++-- sdk/emm/ema.c | 11 +- sdk/emm/include/sgx_mm.h | 5 + sdk/emm/include/sgx_mm_rt_abstraction.h | 33 ++-- sdk/emm/ut/stub.c | 2 +- sdk/trts/ema_rt.c | 16 +- 12 files changed, 271 insertions(+), 155 deletions(-) diff --git a/psw/enclave_common/sgx_enclave_common.cpp b/psw/enclave_common/sgx_enclave_common.cpp index daa557e88..f3dace1fd 100644 --- a/psw/enclave_common/sgx_enclave_common.cpp +++ b/psw/enclave_common/sgx_enclave_common.cpp @@ -1311,4 +1311,9 @@ extern "C" bool COMM_API enclave_set_information( return false; } +uint32_t COMM_API enclave_get_features() +{ + //!TODO + return 0; +} #include "sgx_mm_ocalls.cpp" diff --git a/psw/enclave_common/sgx_enclave_common.h b/psw/enclave_common/sgx_enclave_common.h index e25b18aab..d799b7be3 100644 --- a/psw/enclave_common/sgx_enclave_common.h +++ b/psw/enclave_common/sgx_enclave_common.h @@ -86,9 +86,28 @@ typedef enum { ENCLAVE_PAGE_WRITE = 1 << 1, /* Enables write access to the committed region of pages. */ ENCLAVE_PAGE_EXECUTE = 1 << 2, /* Enables execute access to the committed region of pages. */ ENCLAVE_PAGE_THREAD_CONTROL = 1 << 8, /* The page contains a thread control structure. */ + ENCLAVE_PAGE_REG = 2 << 8, /* The page contains a PT_REG page. */ + ENCLAVE_PAGE_SS_FIRST = 5 << 8, /* The page contains the first page of a Shadow Stack (future). */ + ENCLAVE_PAGE_SS_REST = 6 << 8, /* The page contains a non-first page of a Shadow Stack (future). */ ENCLAVE_PAGE_UNVALIDATED = 1 << 12, /* The page contents that you supply are excluded from measurement and content validation. */ } enclave_page_properties_t; +/* + * Hints to OS on how application may use the pages allocated with enclave_alloc. + */ +typedef enum { + ENCLAVE_EMA_NONE = 0, /* No suggestions provided. */ + ENCLAVE_EMA_RESERVE = 1, /* Suggest that the kernel should reserve the memory range and not immediately EAUG pages. */ + ENCLAVE_EMA_COMMIT_NOW = 2, /* Gives a hint that the kernel should EAUG pages immediately. */ + ENCLAVE_EMA_COMMIT_ON_DEMAND = 4, /* Gives a hint that the kernel can EAUG pages later. */ + ENCLAVE_EMA_GROWSDOWN = 16, /* Gives a hint to the kernel that the application will access pages above the + last accessed page. The kernel may want to EAUG pages from higher to lower addresses + with no gaps in addresses above the last committed page. */ + ENCLAVE_EMA_GROWSUP = 32, /* Gives a hint to the kernel that the application will access pages below the + last accessed page. The kernel may want to EAUG pages from lower to higher addresses + with no gaps in addresses below the last committed page. */ +} enclave_alloc_flags; + typedef enum { ENCLAVE_LAUNCH_TOKEN = 0x1 } enclave_info_type_t; @@ -100,7 +119,7 @@ typedef enum { #define ENCLAVE_CREATE_EX_EL_RANGE (1 << ENCLAVE_CREATE_EX_EL_RANGE_BIT_IDX) // Reserve Bit 0 for the el_range config //update the following when adding new extended feature -#define _ENCLAVE_CREATE_LAST_EX_FEATURE_IDX_ ENCLAVE_CREATE_EX_EL_RANGE_BIT_IDX +#define _ENCLAVE_CREATE_LAST_EX_FEATURE_IDX_ ENCLAVE_CREATE_EX_EL_RANGE_BIT_IDX #define _ENCLAVE_CREATE_EX_FEATURES_MASK_ (((uint32_t)-1) >> (ENCLAVE_CREATE_MAX_EX_FEATURES_COUNT - 1 - _ENCLAVE_CREATE_LAST_EX_FEATURE_IDX_)) @@ -148,7 +167,7 @@ void* COMM_API enclave_create_ex( COMM_IN const uint32_t ex_features, COMM_IN const void* ex_features_p[32], COMM_OUT_OPT uint32_t* enclave_error); - + /* enclave_create() * Parameters: @@ -175,7 +194,7 @@ void* COMM_API enclave_create( /* enclave_load_data() * Parameters: * target_address [in] - The address in the enclave where you want to load the data. - * target_size [in] - The size of the range that you want to load in the enclave, in bytes. + * target_size [in] - The size of the range that you want to load in the enclave, in bytes. * source_buffer [in, optional] - An optional pointer to the data you want to load into the enclave. * data_properties [in] - The properties of the pages you want to add to the enclave. * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. @@ -193,7 +212,7 @@ size_t COMM_API enclave_load_data( /* enclave_initialize() * Parameters: * base_address [in] - The enclave base address as returned from the enclave_create API. - * info [in] - A pointer to the architecture-specific information to use to initialize the enclave. + * info [in] - A pointer to the architecture-specific information to use to initialize the enclave. * info_size [in] - The length of the structure that the info parameter points to, in bytes. * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. * Return Value: @@ -224,7 +243,7 @@ bool COMM_API enclave_delete( * info_type[in] - Identifies the type of information requested. initialized. * output_info[out] - Pointer to information returned by the API * output_info_size[in, out] - Size of the output_info buffer, in bytes. If the API succeeds, then this will return the number of bytes returned in output_info. If the API fails with, ENCLAVE_INVALID_SIZE, then this will return the required size - * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. + * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. */ bool COMM_API enclave_get_information( COMM_IN void* base_address, @@ -239,7 +258,7 @@ bool COMM_API enclave_get_information( * info_type[in] - Identifies the type of information requested. not been initialized. * input_info[in] - Pointer to information provided to the API * input_info_size[in] - Size of the information, in bytes, provided in input_info from the API. - * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. + * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. */ bool COMM_API enclave_set_information( COMM_IN void* base_address, @@ -253,56 +272,102 @@ bool COMM_API enclave_set_information( * * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing + * @param[in] page_properties Page types to be allocated, must be one of these: + * - ENCLAVE_PAGE_REG: regular page type. This is the default if not specified. + * - ENCLAVE_PAGE_SS_FIRST: the first page in shadow stack. + * - ENCLAVE_PAGE_SS_REST: the rest page in shadow stack. + * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and * translate following additional bits to proper parameters invoking mmap or other SGX specific * syscall(s) provided by the kernel. - * The flags param of this interface should include exactly one of following for committing mode: - * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * The alloc_flags param of this interface should include exactly one of following for committing mode: + * - ENCLAVE_EMA_COMMIT_NOW: reserves memory range with ENCLAVE_PAGE_READ|SGX_EMA_PROT_WRITE, if supported, * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * - ENCLAVE_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. * ORed with zero or one of the committing order flags: - * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * - ENCLAVE_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher * to lower addresses, no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * - ENCLAVE_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower * to higher addresses, no gaps in addresses below the last committed. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @retval 0 The operation was successful. - * @retval EINVAL Any parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. + * @retval ENCLAVE_NOT_SUPPORTED: Enavle feature is not supported by the system + * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * @retval ENCLAVE_INVALID_ADDRESS: the start address does not point to an enclave. + * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. + * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave + * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * @retval ENCLAVE_UNEXPECTED, unexpected error. */ -int COMM_API enclave_alloc(uint64_t addr, size_t length, int flags); + +uint32_t COMM_API enclave_alloc( + COMM_IN uint64_t addr, + COMM_IN size_t length, + COMM_IN uint32_t page_properties, + COMM_IN uint32_t alloc_flags); /* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * * @param[in] addr Start address of the memory to change protections. * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. * Must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM or TCS - * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * ENCLAVE_PAGE_READ + * ENCLAVE_PAGE_WRITE + * ENCLAVE_PAGE_EXEC + * ENCLAVE_PAGE_REG: regular page, changeable to TRIM or TCS + * ENCLAVE_PAGE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: + * ENCLAVE_PAGE_READ + * ENCLAVE_PAGE_WRITE + * ENCLAVE_PAGE_EXEC + * ENCLAVE_PAGE_TRIM: change the page type to PT_TRIM. Note the address * range for trimmed pages may still be reserved by enclave with * proper permissions. - * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS - * @retval 0 The operation was successful. - * @retval EINVAL A parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + * ENCLAVE_PAGE_TCS: change the page type to PT_TCS + * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. + * @retval ENCLAVE_NOT_SUPPORTED: Enclave feature is not supported by the system + * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. + * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave + * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * @retval ENCLAVE_UNEXPECTED, unexpected error. + */ + +uint32_t COMM_API enclave_modify( + COMM_IN uint64_t addr, + COMM_IN size_t length, + COMM_IN uint32_t page_properties_from, + COMM_IN uint32_t page_properties_to); + + + + +/** + * The enclave features flags describe additional enclave features + * which are supported by the platform. A value of 0 indicates not features are supported. + */ +typedef enum +{ + ENCLAVE_FEATURE_NONE = 0, + ENCLAVE_FEATURE_SGX1 = 0x00000001, /* The platform (HW and OS) supports SGX1 */ + ENCLAVE_FEATURE_SGX2 = 0x00000002, /* The platform (HW and OS) supports SGX2 */ +}enclave_features; + +/* + * Get enclave features which are supported by the platform. + * @return an enclave_features enum indicating enclave features which are supported on the platform + * */ +uint32_t COMM_API enclave_get_features(); -int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_from, int flags_to); #ifdef __cplusplus } diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 707b6b354..2deeb304f 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -5,39 +5,53 @@ #include using namespace std; #define PROT_MASK (PROT_READ|PROT_WRITE|PROT_EXEC) + /* * Call OS to reserve region for EAUG, immediately or on-demand. * * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing + * @param[in] page_properties Page types to be allocated, must be one of these: + * - ENCLAVE_PAGE_REG: regular page type. This is the default if not specified. + * - ENCLAVE_PAGE_SS_FIRST: the first page in shadow stack. + * - ENCLAVE_PAGE_SS_REST: the rest page in shadow stack. + * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. - * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and + * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and * translate following additional bits to proper parameters invoking mmap or other SGX specific * syscall(s) provided by the kernel. - * The flags param of this interface should include exactly one of following for committing mode: - * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, + * The alloc_flags param of this interface should include exactly one of following for committing mode: + * - ENCLAVE_EMA_COMMIT_NOW: reserves memory range with ENCLAVE_PAGE_READ|SGX_EMA_PROT_WRITE, if supported, * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * - ENCLAVE_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. * ORed with zero or one of the committing order flags: - * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * - ENCLAVE_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher * to lower addresses, no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * - ENCLAVE_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower * to higher addresses, no gaps in addresses below the last committed. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @retval 0 The operation was successful. - * @retval EINVAL Any parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. + * @retval ENCLAVE_NOT_SUPPORTED: feature is not supported by the system + * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * @retval ENCLAVE_INVALID_ADDRESS: the start address does not point to an enclave. + * @retval ENCLAVE_INVALID_PARAMETER: an invalid combinations of parameters. + * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave + * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * @retval ENCLAVE_UNEXPECTED, unexpected error. */ -extern "C" int COMM_API enclave_alloc(uint64_t addr, size_t length, int flags) + +uint32_t COMM_API enclave_alloc( + COMM_IN uint64_t addr, + COMM_IN size_t length, + COMM_IN uint32_t page_properties, + COMM_IN uint32_t alloc_flags) { - int ret = EINVAL; + int ret = ENCLAVE_UNEXPECTED; SE_TRACE(SE_TRACE_DEBUG, - "enclave_alloc for 0x%llX ( %llX ) with 0x%lX\n", - addr, length, flags); + "enclave_alloc for 0x%llX ( %llX ) with alloc flags = 0x%lX\n", + addr, length, alloc_flags); if (s_driver_type == SGX_DRIVER_DCAP) { @@ -49,23 +63,24 @@ extern "C" int COMM_API enclave_alloc(uint64_t addr, size_t length, int flags) } int enclave_fd = get_file_handle_from_address((void *)addr); if (enclave_fd == -1) - return ret; + return ENCLAVE_INVALID_ADDRESS; int map_flags = MAP_SHARED | MAP_FIXED; //!TODO: support COMMIT_NOW when kernel supports - if (flags & SGX_EMA_COMMIT_NOW) + if (alloc_flags & ENCLAVE_EMA_COMMIT_NOW) { } //!TODO support CET - int type = flags & SGX_EMA_PAGE_TYPE_MASK; - if((type == SGX_EMA_PAGE_TYPE_SS_FIRST) | (type == SGX_EMA_PAGE_TYPE_SS_REST)) - return EFAULT; - if((type == SGX_EMA_PAGE_TYPE_SS_FIRST) && length > SE_PAGE_SIZE) - return ret; + int type = page_properties; + if((type == ENCLAVE_PAGE_SS_FIRST) | (type == ENCLAVE_PAGE_SS_REST)) + return ENCLAVE_NOT_SUPPORTED; + if((type == ENCLAVE_PAGE_SS_FIRST) && length > SE_PAGE_SIZE) + return ENCLAVE_INVALID_PARAMETER; void *out = mmap((void *)addr, length, PROT_WRITE | PROT_READ, map_flags, enclave_fd, 0); if (out == MAP_FAILED) { - SE_TRACE(SE_TRACE_WARNING, "mmap failed, error = %d\n", errno); ret = errno; + SE_TRACE(SE_TRACE_WARNING, "mmap failed, error = %d\n", ret); + ret = error_driver2api(-1, ret); }else ret = 0; return ret; @@ -96,12 +111,14 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) do { int ret = ioctl(fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); + if (ret && ioc.count == 0 && errno != EBUSY && errno != EAGAIN) { //total failure + int err = errno; SE_TRACE(SE_TRACE_WARNING, "MODT failed, error = %d for 0x%llX ( %llX ), type: 0x%llX\n", - errno, addr, length, type); - return errno; + err, addr, length, type); + return err; } //for recoverable partial errors length -= ioc.count; @@ -137,10 +154,11 @@ static int trim_accept(int fd, uint64_t addr, size_t length) ret = ioctl(fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &ioc); if(ret && ioc.count == 0 && errno != EBUSY && errno != EAGAIN ) { //total failure + int err = errno; SE_TRACE(SE_TRACE_WARNING, "REMOVE failed, error = %d for 0x%llX ( %llX )\n", - errno, addr, length); - return errno; + err, addr, length); + return err; } ioc.length -= ioc.count; ioc.offset += ioc.count; @@ -170,10 +188,11 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) //TODO: use error code if (ret && ioc.count == 0 && errno != EBUSY && errno!=EAGAIN ) { //total failure + int err = errno; SE_TRACE(SE_TRACE_WARNING, "MODP failed, error = %d for 0x%llX ( %llX ), prot: 0x%llX\n", - errno, addr, length, prot); - return errno; + err, addr, length, prot); + return err; } ioc.length -= ioc.count; ioc.offset += ioc.count; @@ -252,48 +271,60 @@ static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) return SGX_SUCCESS; } + /* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * * @param[in] addr Start address of the memory to change protections. * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. * Must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM or TCS - * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address + * ENCLAVE_PAGE_READ + * ENCLAVE_PAGE_WRITE + * ENCLAVE_PAGE_EXEC + * ENCLAVE_PAGE_REG: regular page, changeable to TRIM or TCS + * ENCLAVE_PAGE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. + * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: + * ENCLAVE_PAGE_READ + * ENCLAVE_PAGE_WRITE + * ENCLAVE_PAGE_EXEC + * ENCLAVE_PAGE_TRIM: change the page type to PT_TRIM. Note the address * range for trimmed pages may still be reserved by enclave with * proper permissions. - * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS - * @retval 0 The operation was successful. - * @retval EINVAL A parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + * ENCLAVE_PAGE_TCS: change the page type to PT_TCS + * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. + * @retval ENCLAVE_NOT_SUPPORTED: SGX EDMM is not supported by the system + * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. + * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave + * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * @retval ENCLAVE_UNEXPECTED, unexpected error. */ -extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_from, int flags_to) +uint32_t COMM_API enclave_modify( + COMM_IN uint64_t addr, + COMM_IN size_t length, + COMM_IN uint32_t page_properties_from, + COMM_IN uint32_t page_properties_to) { - int ret = EFAULT; + int ret = ENCLAVE_UNEXPECTED; SE_TRACE(SE_TRACE_DEBUG, "enclave_modify for 0x%llX ( %llX ) from 0x%lX to %lX\n", - addr, length, flags_from, flags_to); + addr, length, page_properties_from, page_properties_to); if (s_driver_type == SGX_DRIVER_DCAP) { - return ret; + return ENCLAVE_NOT_SUPPORTED; } uint64_t enclave_base = (uint64_t)get_enclave_base_address_from_address((void *)addr); if (enclave_base == 0) { - return EINVAL; + return ENCLAVE_INVALID_ADDRESS; } if (length % SE_PAGE_SIZE != 0) - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; function _trim = trim; function _trim_accept = trim_accept; function _mktcs = mktcs; @@ -307,19 +338,19 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f _emodpr = emodpr_legacy; fd = s_hdevice; } - if(fd == -1) return EINVAL; + if(fd == -1) return ENCLAVE_INVALID_ADDRESS; - int type_to = (flags_to & SGX_EMA_PAGE_TYPE_MASK); - int type_from = (flags_from & SGX_EMA_PAGE_TYPE_MASK); + int type_to = (page_properties_to & SGX_EMA_PAGE_TYPE_MASK); + int type_from = (page_properties_from & SGX_EMA_PAGE_TYPE_MASK); if (type_from == SGX_EMA_PAGE_TYPE_TRIM && type_to != SGX_EMA_PAGE_TYPE_TRIM) { - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; } - int prot_to = (flags_to & PROT_MASK); - int prot_from = (flags_from & PROT_MASK); + int prot_to = (page_properties_to & PROT_MASK); + int prot_from = (page_properties_from & PROT_MASK); if ((prot_to != prot_from) && (type_to != type_from)) { - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; } if ((type_from & type_to & SGX_EMA_PAGE_TYPE_TRIM)) @@ -327,7 +358,7 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f //user space can only do EACCEPT for PT_TRIM type ret = _trim_accept(fd, addr, length); if (ret) - return ret; + return error_driver2api(-1, ret); if (prot_to == PROT_NONE) { //EACCEPT done and notified. @@ -335,7 +366,7 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f //only mprotect is needed ret = mprotect((void *)addr, length, prot_to); if (ret == -1) - return ret; + return error_driver2api(ret, errno); } return ret; } @@ -344,21 +375,27 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f { assert(type_from != SGX_EMA_PAGE_TYPE_TRIM); if (prot_to != prot_from) - return EINVAL; - return _trim(fd, addr, length); + return ENCLAVE_INVALID_PARAMETER; + ret = _trim(fd, addr, length); + if (ret) + return error_driver2api(-1, ret); + return 0; } if (type_to == SGX_EMA_PAGE_TYPE_TCS) { if (type_from != SGX_EMA_PAGE_TYPE_REG) - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; if ((prot_from != (SGX_EMA_PROT_READ_WRITE)) && prot_to != prot_from) - return EINVAL; - return _mktcs(fd, addr, length); + return ENCLAVE_INVALID_PARAMETER; + ret = _mktcs(fd, addr, length); + if (ret) + return error_driver2api(-1, ret); + return 0; } if (type_to != type_from) - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; if (prot_to == prot_from) { @@ -369,14 +406,14 @@ extern "C" int COMM_API enclave_modify(uint64_t addr, size_t length, int flags_f { ret = _emodpr(fd, addr, length, prot_to); if (ret) - return ret; + return error_driver2api(-1, ret); } else { - return EINVAL; + return ENCLAVE_INVALID_PARAMETER; } ret = mprotect((void *)addr, length, prot_to); if (ret == -1) - return errno; + return error_driver2api(ret, errno); return ret; } diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index f954ea716..c3976009c 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -308,7 +308,7 @@ void EnclaveCreatorHW::close_device() int EnclaveCreatorHW::alloc(uint64_t addr, uint64_t size, int flag) { - int ret = enclave_alloc(addr, size, flag); + int ret = enclave_alloc(addr, size, flag, SGX_EMA_COMMIT_ON_DEMAND); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_alloc failed %d\n", ret); diff --git a/psw/urts/linux/urts_emm.cpp b/psw/urts/linux/urts_emm.cpp index 65dea9a6b..a2dab3b16 100644 --- a/psw/urts/linux/urts_emm.cpp +++ b/psw/urts/linux/urts_emm.cpp @@ -41,7 +41,8 @@ typedef struct ms_alloc_ocall_t { int32_t retval; size_t addr; size_t size; - uint32_t flags; + uint32_t page_properties; + uint32_t alloc_flags; } ms_emm_alloc_ocall_t; extern "C" sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms) @@ -49,9 +50,9 @@ extern "C" sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms) ms_emm_alloc_ocall_t* ms = SGX_CAST(ms_emm_alloc_ocall_t*, pms); #ifdef SE_SIM - ms->retval = mprotect((void*)ms->addr, ms->size, ms->flags|PROT_MASK); + ms->retval = mprotect((void*)ms->addr, ms->size, ms->page_properties|PROT_MASK); #else - ms->retval = enclave_alloc(ms->addr, ms->size, ms->flags); + ms->retval = enclave_alloc(ms->addr, ms->size,ms->page_properties, ms->alloc_flags); #endif return SGX_SUCCESS; } diff --git a/sdk/emm/README.md b/sdk/emm/README.md index 72c3754b6..6ea5f04c9 100644 --- a/sdk/emm/README.md +++ b/sdk/emm/README.md @@ -10,7 +10,7 @@ The typical target users of these APIs are intermediate level components in SGX with dynamic expansion capabilities, mmap/mprotect/pthread API implementations for enclaves, dynamic code loader and JIT compilers,etc. -This implementation aims to be reusable in any SGX runtime that provides a minimal C runtime (malloc required) and +This implementation aims to be reusable in any SGX runtime that provides a minimal C runtime and implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](include/sgx_mm_rt_abstraction.h). The instructions here are for developing and testing the EMM functionality only. diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md index b2e1e5aeb..7d3a619e1 100644 --- a/sdk/emm/design_docs/SGX_EMM.md +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -365,7 +365,7 @@ int sgx_mm_modify_permissions(void *addr, size_t length, int prot); * @retval EACCES Original page type can not be changed to target type. * @retval EINVAL The memory region was not allocated or outside enclave * or other invalid parameters that are not supported. - * @retval EPERM Target page type is no allowed by this API, e.g., PT_TRIM, + * @retval EPERM Target page type is not allowed by this API, e.g., PT_TRIM, * PT_SS_FIRST, PT_SS_REST. */ int sgx_mm_modify_type(void *addr, size_t length, int type); @@ -468,16 +468,20 @@ bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); ### OCalls ``` + /* * Call OS to reserve region for EAUG, immediately or on-demand. * * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing + * @param[in] page_type One of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. - * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and - * translate following additional bits to proper parameters invoking mmap or other SGX specific - * syscall(s) provided by the kernel. + * implementation should translate following additional bits to proper + * parameters invoking syscall(mmap on Linux) provided by the kernel. * The flags param of this interface should include exactly one of following for committing mode: * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, * kernel is given a hint to EAUG EPC pages for the area as soon as possible. @@ -487,15 +491,11 @@ bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); * to lower addresses, no gaps in addresses above the last committed. * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower * to higher addresses, no gaps in addresses below the last committed. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. * @retval 0 The operation was successful. - * @retval EINVAL Any parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + * @retval EFAULT for all failures. */ -int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); + +int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int alloc_flags); /* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. @@ -518,8 +518,7 @@ int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); * proper permissions. * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS * @retval 0 The operation was successful. - * @retval EINVAL A parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + * @retval EFAULT for all failures. */ int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_to); diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c index 69b45c437..454095a3c 100644 --- a/sdk/emm/ema.c +++ b/sdk/emm/ema.c @@ -721,7 +721,7 @@ static int ema_do_uncommit_real(ema_t *node, size_t real_start, size_t real_end, int ret = sgx_mm_modify_ocall(block_start, block_length, prot | type, prot | SGX_EMA_PAGE_TYPE_TRIM); if (ret != 0) { - return ret; + return EFAULT; } ret = eaccept_range_forward(&si, block_start, block_end); @@ -733,7 +733,7 @@ static int ema_do_uncommit_real(ema_t *node, size_t real_start, size_t real_end, ret =sgx_mm_modify_ocall(block_start, block_length, prot | SGX_EMA_PAGE_TYPE_TRIM, prot | SGX_EMA_PAGE_TYPE_TRIM); - if(ret) return ret; + if(ret) return EFAULT; real_start = block_end; } @@ -867,6 +867,7 @@ int ema_change_to_tcs(ema_t *node, size_t addr) int ret = sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, prot | type, prot | SGX_EMA_PAGE_TYPE_TCS); if (ret != 0) { + ret = EFAULT; goto fail; } @@ -906,6 +907,7 @@ int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot) int ret = sgx_mm_modify_ocall(real_start, real_end - real_start, prot | type, new_prot | type); if (ret != 0) { + ret = EFAULT; goto fail; } @@ -951,6 +953,8 @@ int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot) {//do mprotect if target is PROT_NONE ret = sgx_mm_modify_ocall(real_start, real_end - real_start, type | SGX_EMA_PROT_NONE, type | SGX_EMA_PROT_NONE); + if (ret) + ret = EFAULT; } fail: node->transition = 0; @@ -1157,8 +1161,9 @@ int ema_do_alloc(ema_t* node) size_t tmp_addr = node->start_addr; size_t size = node->size; int ret = sgx_mm_alloc_ocall(tmp_addr, size, - (int)(alloc_flags | (node->si_flags & SGX_EMA_PAGE_TYPE_MASK))); + (int)(node->si_flags & SGX_EMA_PAGE_TYPE_MASK), (int)alloc_flags); if (ret) { + ret = EFAULT; return ret; } diff --git a/sdk/emm/include/sgx_mm.h b/sdk/emm/include/sgx_mm.h index 5f6a3d866..e6bd4c8f8 100644 --- a/sdk/emm/include/sgx_mm.h +++ b/sdk/emm/include/sgx_mm.h @@ -178,6 +178,7 @@ typedef int (*sgx_enclave_fault_handler_t)(const sgx_pfinfo *pfinfo, void *priva * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is set. * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in SGX_EMA_ALIGNED(n). * @retval ENOMEM Out of memory, or no free space to satisfy alignment boundary. + * @retval EFAULT All other errors. */ int sgx_mm_alloc(void *addr, size_t length, int flags, sgx_enclave_fault_handler_t handler, void *handler_private, @@ -190,6 +191,7 @@ int sgx_mm_alloc(void *addr, size_t length, int flags, * @param[in] length Size in bytes of multiples of page size. * @retval 0 The operation was successful. * @retval EINVAL The address range is not allocated or outside enclave. + * @retval EFAULT All other errors. */ int sgx_mm_uncommit(void *addr, size_t length); @@ -217,6 +219,7 @@ int sgx_mm_dealloc(void *addr, size_t length); * or other invalid parameters that are not supported. * @retval EPERM The request permissions are not allowed, e.g., by target page type or * SELinux policy. + * @retval EFAULT All other errors. */ int sgx_mm_modify_permissions(void *addr, size_t length, int prot); @@ -232,6 +235,7 @@ int sgx_mm_modify_permissions(void *addr, size_t length, int prot); * or other invalid parameters that are not supported. * @retval EPERM Target page type is no allowed by this API, e.g., PT_TRIM, * PT_SS_FIRST, PT_SS_REST. + * @retval EFAULT All other errors. */ int sgx_mm_modify_type(void *addr, size_t length, int type); @@ -265,6 +269,7 @@ int sgx_mm_commit(void *addr, size_t length); * @retval EPERM Any page in requested range is previously committed. * @retval EPERM The target permissions are not allowed by OS security policy, * e.g., SELinux rules. + * @retval EFAULT All other errors. */ int sgx_mm_commit_data(void *addr, size_t length, uint8_t *data, int prot); diff --git a/sdk/emm/include/sgx_mm_rt_abstraction.h b/sdk/emm/include/sgx_mm_rt_abstraction.h index 520618972..5500b58be 100644 --- a/sdk/emm/include/sgx_mm_rt_abstraction.h +++ b/sdk/emm/include/sgx_mm_rt_abstraction.h @@ -72,11 +72,14 @@ extern "C" { * * @param[in] addr Desired page aligned start address. * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing + * @param[in] page_type One of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing * order, address preference, page type. The untrusted side. - * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED, and - * translate following additional bits to proper parameters invoking mmap or other SGX specific - * syscall(s) provided by the kernel. + * implementation should translate following additional bits to proper + * parameters invoking syscall(mmap on Linux) provided by the kernel. * The flags param of this interface should include exactly one of following for committing mode: * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, * kernel is given a hint to EAUG EPC pages for the area as soon as possible. @@ -86,29 +89,24 @@ extern "C" { * to lower addresses, no gaps in addresses above the last committed. * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower * to higher addresses, no gaps in addresses below the last committed. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. * @retval 0 The operation was successful. - * @retval EINVAL Any parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mmap(). + * @retval EFAULT for all failures. */ - int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); + int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int alloc_flags); - /* +/* * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * * @param[in] addr Start address of the memory to change protections. * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. + * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. * Must be bitwise OR of following: * SGX_EMA_PROT_READ * SGX_EMA_PROT_WRITE * SGX_EMA_PROT_EXEC * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and TCS * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: + * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: * SGX_EMA_PROT_READ * SGX_EMA_PROT_WRITE * SGX_EMA_PROT_EXEC @@ -117,13 +115,12 @@ extern "C" { * proper permissions. * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS * @retval 0 The operation was successful. - * @retval EINVAL A parameter passed in is not valid. - * @retval errno Error as reported by dependent syscalls, e.g., mprotect(). + * @retval EFAULT for all failures. */ - int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_to); + int sgx_mm_modify_ocall(uint64_t addr, size_t length, int page_properties_from, int page_properties_to); - /* +/* * Define a mutex and init/lock/unlock/destroy functions. */ typedef struct _sgx_mm_mutex sgx_mm_mutex; diff --git a/sdk/emm/ut/stub.c b/sdk/emm/ut/stub.c index 2c3489557..8fe080d52 100644 --- a/sdk/emm/ut/stub.c +++ b/sdk/emm/ut/stub.c @@ -51,7 +51,7 @@ int do_emodpe(const sec_info_t* si, size_t addr) return 0; } -int sgx_mm_alloc_ocall(size_t addr, size_t length, int flags) +int sgx_mm_alloc_ocall(size_t addr, size_t length, int props, int flags) { return 0; } diff --git a/sdk/trts/ema_rt.c b/sdk/trts/ema_rt.c index db36e0075..997b1b23e 100644 --- a/sdk/trts/ema_rt.c +++ b/sdk/trts/ema_rt.c @@ -50,10 +50,11 @@ typedef struct ms_emm_alloc_ocall_t { int retval; size_t addr; size_t size; - int flags; + int page_type; + int alloc_flags; } ms_emm_alloc_ocall_t; -int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int flags) +int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int page_type, int alloc_flags) { #ifdef SE_SIM (void)addr; @@ -68,11 +69,12 @@ int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int flags) ms->addr = (size_t)addr; ms->size = size; - ms->flags = flags; + ms->page_type = page_type; + ms->alloc_flags = alloc_flags; status = sgx_ocall((unsigned int)EDMM_ALLOC, ms); - if(status == SGX_SUCCESS) - ret = ms->retval; + if(status == SGX_SUCCESS && ms->retval == SGX_SUCCESS) + ret = 0; sgx_ocfree(); return ret; @@ -106,8 +108,8 @@ int SGXAPI sgx_mm_modify_ocall(size_t addr, size_t size, int flags_from, int fla ms->flags_from = flags_from; ms->flags_to = flags_to; status = sgx_ocall((unsigned int)EDMM_MODIFY, ms); - if(status == SGX_SUCCESS) - ret = ms->retval; + if(status == SGX_SUCCESS && ms->retval == SGX_SUCCESS) + ret = 0; sgx_ocfree(); return ret; From 8da2132bd81b4dbabac4ffad59f99edfecc6dd1b Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Fri, 8 Jul 2022 12:09:46 -0700 Subject: [PATCH 30/96] emm: handle potential spurious #PF Also EXEC always require READ Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/sgx_mm.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/sdk/emm/sgx_mm.c b/sdk/emm/sgx_mm.c index a7cf0e86e..2344cd0cd 100644 --- a/sdk/emm/sgx_mm.c +++ b/sdk/emm/sgx_mm.c @@ -357,6 +357,8 @@ int mm_modify_permissions_internal(void *addr, size_t size, int prot, ema_root_t if (size == 0) return EINVAL; if (size % SGX_PAGE_SIZE) return EINVAL; if (start % SGX_PAGE_SIZE) return EINVAL; + if ((prot & SGX_EMA_PROT_EXEC) && !(prot & SGX_EMA_PROT_READ)) + return EINVAL; ema_t *first = NULL, *last = NULL; @@ -397,18 +399,14 @@ int sgx_mm_enclave_pfhandler(const sgx_pfinfo *pfinfo) } if (ema_page_committed(ema, addr)) { - if (is_ema_transition(ema)) - {//as long as permissions expected, transition will be done - // TODO: check EXEC? - //This is never reached because of global lock - if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || - (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) - { - ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; - } - else - ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + // Check for spurious #PF + if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || + (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) + { + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; } + else + ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; goto unlock; } if (get_ema_alloc_flags(ema) & SGX_EMA_COMMIT_ON_DEMAND) From be1417a0da5a619ae1ef8393f8ff4df1e00504bd Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Tue, 12 Jul 2022 08:17:15 -0700 Subject: [PATCH 31/96] EMM: update design doc specify the runtime provided mutex to be recursive Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- sdk/emm/design_docs/SGX_EMM.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md index 7d3a619e1..8f94d4bb3 100644 --- a/sdk/emm/design_docs/SGX_EMM.md +++ b/sdk/emm/design_docs/SGX_EMM.md @@ -529,7 +529,7 @@ int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_ ``` /* - * Define a mutex and create/lock/unlock/destroy functions. + * Define a recursive mutex and create/lock/unlock/destroy functions. */ typedef struct _sgx_mm_mutex sgx_mm_mutex; sgx_mm_mutex *sgx_mm_mutex_create(void); From 33718ab2765497540fb02f117e8b6800fd329ff3 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 14 Jul 2022 09:00:50 -0700 Subject: [PATCH 32/96] Add sgx-emm submodule Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- .gitmodules | 3 +++ external/sgx-emm/emm_src | 1 + 2 files changed, 4 insertions(+) create mode 160000 external/sgx-emm/emm_src diff --git a/.gitmodules b/.gitmodules index 3703d34a4..f00c461f3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,3 +17,6 @@ path = external/protobuf/protobuf_code url = https://github.com/protocolbuffers/protobuf.git branch = 3.14.x +[submodule "external/sgx-emm/emm_src"] + path = external/sgx-emm/emm_src + url = https://github.com/intel/sgx-emm diff --git a/external/sgx-emm/emm_src b/external/sgx-emm/emm_src new file mode 160000 index 000000000..8d4cb8c69 --- /dev/null +++ b/external/sgx-emm/emm_src @@ -0,0 +1 @@ +Subproject commit 8d4cb8c6942b63618eedac44e25e2f319e08ac38 From ee88ac292501071cad5ed9043586ae2b3ffcede7 Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Thu, 14 Jul 2022 11:33:51 -0700 Subject: [PATCH 33/96] EMM: restructure to use sgx-emm submodule Signed-off-by: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> --- common/inc/internal/bit_array.h | 2 +- common/inc/internal/ema.h | 2 +- common/inc/internal/emm_private.h | 2 +- common/inc/internal/sgx_mm_rt_abstraction.h | 2 +- common/inc/sgx_mm.h | 2 +- common/inc/sgx_mm_primitives.h | 2 +- common/inc/sgx_mm_rt_abstraction.h | 2 +- {sdk/emm => external/sgx-emm}/Makefile | 12 +- .../sgx-emm}/api_tests/App/App.cpp | 0 .../sgx-emm}/api_tests/App/App.h | 0 .../sgx-emm}/api_tests/App/sgx.h | 0 .../sgx-emm}/api_tests/Enclave/Enclave.cpp | 2 +- .../sgx-emm}/api_tests/Enclave/Enclave.edl | 0 .../sgx-emm}/api_tests/Enclave/Enclave.h | 0 .../sgx-emm}/api_tests/Enclave/Enclave.lds | 0 .../Enclave/Enclave_private_test.pem | 0 .../sgx-emm}/api_tests/Enclave/config.xml | 0 .../sgx-emm}/api_tests/Makefile | 0 {sdk/emm => external/sgx-emm}/api_tests/tcs.h | 0 .../sgx-emm}/api_tests/test_loop.sh | 0 {sdk/emm => external/sgx-emm}/ut/Makefile | 0 {sdk/emm => external/sgx-emm}/ut/stub.c | 0 .../sgx-emm}/ut/test_bit_array.c | 0 {sdk/emm => external/sgx-emm}/ut/test_ema.c | 0 {sdk/emm => external/sgx-emm}/ut/test_emm.c | 0 .../emm => external/sgx-emm}/ut/test_public.c | 0 psw/enclave_common/Makefile | 2 +- psw/urts/linux/Makefile | 2 +- sdk/Makefile.source | 4 +- sdk/emm/README.md | 135 -- sdk/emm/bit_array.c | 501 ------- .../design_docs/SGX_EDMM_driver_interface.md | 302 ----- sdk/emm/design_docs/SGX_EMM.md | 725 ---------- .../design_docs/images/SGX2_alloc_direct.svg | 213 --- sdk/emm/design_docs/images/SGX2_alloc_pf.svg | 220 --- sdk/emm/design_docs/images/SGX2_eaccept.svg | 301 ----- sdk/emm/design_docs/images/SGX2_eaccept2.svg | 278 ---- sdk/emm/design_docs/images/SGX2_emm_arch.svg | 286 ---- sdk/emm/design_docs/images/SGX2_perms.svg | 227 ---- sdk/emm/design_docs/images/SGX2_tcs.svg | 216 --- sdk/emm/design_docs/images/SGX2_trim.svg | 250 ---- sdk/emm/ema.c | 1184 ----------------- sdk/emm/emalloc.c | 524 -------- sdk/emm/emm_private.c | 128 -- sdk/emm/include/bit_array.h | 117 -- sdk/emm/include/ema.h | 130 -- sdk/emm/include/emalloc.h | 44 - sdk/emm/include/emm_private.h | 97 -- sdk/emm/include/sgx_mm.h | 290 ---- sdk/emm/include/sgx_mm_primitives.h | 62 - sdk/emm/include/sgx_mm_rt_abstraction.h | 157 --- sdk/emm/sgx_mm.c | 450 ------- sdk/emm/sgx_primitives.S | 86 -- sdk/trts/linux/Makefile | 2 +- 54 files changed, 18 insertions(+), 6943 deletions(-) rename {sdk/emm => external/sgx-emm}/Makefile (90%) rename {sdk/emm => external/sgx-emm}/api_tests/App/App.cpp (100%) rename {sdk/emm => external/sgx-emm}/api_tests/App/App.h (100%) rename {sdk/emm => external/sgx-emm}/api_tests/App/sgx.h (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/Enclave.cpp (99%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/Enclave.edl (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/Enclave.h (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/Enclave.lds (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/Enclave_private_test.pem (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Enclave/config.xml (100%) rename {sdk/emm => external/sgx-emm}/api_tests/Makefile (100%) rename {sdk/emm => external/sgx-emm}/api_tests/tcs.h (100%) rename {sdk/emm => external/sgx-emm}/api_tests/test_loop.sh (100%) rename {sdk/emm => external/sgx-emm}/ut/Makefile (100%) rename {sdk/emm => external/sgx-emm}/ut/stub.c (100%) rename {sdk/emm => external/sgx-emm}/ut/test_bit_array.c (100%) rename {sdk/emm => external/sgx-emm}/ut/test_ema.c (100%) rename {sdk/emm => external/sgx-emm}/ut/test_emm.c (100%) rename {sdk/emm => external/sgx-emm}/ut/test_public.c (100%) delete mode 100644 sdk/emm/README.md delete mode 100644 sdk/emm/bit_array.c delete mode 100644 sdk/emm/design_docs/SGX_EDMM_driver_interface.md delete mode 100644 sdk/emm/design_docs/SGX_EMM.md delete mode 100644 sdk/emm/design_docs/images/SGX2_alloc_direct.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_alloc_pf.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_eaccept.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_eaccept2.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_emm_arch.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_perms.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_tcs.svg delete mode 100644 sdk/emm/design_docs/images/SGX2_trim.svg delete mode 100644 sdk/emm/ema.c delete mode 100644 sdk/emm/emalloc.c delete mode 100644 sdk/emm/emm_private.c delete mode 100644 sdk/emm/include/bit_array.h delete mode 100644 sdk/emm/include/ema.h delete mode 100644 sdk/emm/include/emalloc.h delete mode 100644 sdk/emm/include/emm_private.h delete mode 100644 sdk/emm/include/sgx_mm.h delete mode 100644 sdk/emm/include/sgx_mm_primitives.h delete mode 100644 sdk/emm/include/sgx_mm_rt_abstraction.h delete mode 100644 sdk/emm/sgx_mm.c delete mode 100644 sdk/emm/sgx_primitives.S diff --git a/common/inc/internal/bit_array.h b/common/inc/internal/bit_array.h index 018139ddf..8b14d3a15 120000 --- a/common/inc/internal/bit_array.h +++ b/common/inc/internal/bit_array.h @@ -1 +1 @@ -../../../sdk/emm/include/bit_array.h \ No newline at end of file +../../../external/sgx-emm/emm_src/include/bit_array.h \ No newline at end of file diff --git a/common/inc/internal/ema.h b/common/inc/internal/ema.h index 7163f2344..1cd936219 120000 --- a/common/inc/internal/ema.h +++ b/common/inc/internal/ema.h @@ -1 +1 @@ -../../../sdk/emm/include/ema.h \ No newline at end of file +../../../external/sgx-emm/emm_src/include/ema.h \ No newline at end of file diff --git a/common/inc/internal/emm_private.h b/common/inc/internal/emm_private.h index 25af68953..cf6d63de1 120000 --- a/common/inc/internal/emm_private.h +++ b/common/inc/internal/emm_private.h @@ -1 +1 @@ -../../../sdk/emm/include/emm_private.h \ No newline at end of file +../../../external/sgx-emm/emm_src/include/emm_private.h \ No newline at end of file diff --git a/common/inc/internal/sgx_mm_rt_abstraction.h b/common/inc/internal/sgx_mm_rt_abstraction.h index 9300fa4d3..c396ee7b4 120000 --- a/common/inc/internal/sgx_mm_rt_abstraction.h +++ b/common/inc/internal/sgx_mm_rt_abstraction.h @@ -1 +1 @@ -../../../sdk/emm/include/sgx_mm_rt_abstraction.h \ No newline at end of file +../../../external/sgx-emm/emm_src/include/sgx_mm_rt_abstraction.h \ No newline at end of file diff --git a/common/inc/sgx_mm.h b/common/inc/sgx_mm.h index cc284d3bf..ec4db6615 120000 --- a/common/inc/sgx_mm.h +++ b/common/inc/sgx_mm.h @@ -1 +1 @@ -../../sdk/emm/include/sgx_mm.h \ No newline at end of file +../../external/sgx-emm/emm_src/include/sgx_mm.h \ No newline at end of file diff --git a/common/inc/sgx_mm_primitives.h b/common/inc/sgx_mm_primitives.h index c0817f167..88d2c9b61 120000 --- a/common/inc/sgx_mm_primitives.h +++ b/common/inc/sgx_mm_primitives.h @@ -1 +1 @@ -../../sdk/emm/include/sgx_mm_primitives.h \ No newline at end of file +../../external/sgx-emm/emm_src/include/sgx_mm_primitives.h \ No newline at end of file diff --git a/common/inc/sgx_mm_rt_abstraction.h b/common/inc/sgx_mm_rt_abstraction.h index 4b440b598..8ffd119d4 120000 --- a/common/inc/sgx_mm_rt_abstraction.h +++ b/common/inc/sgx_mm_rt_abstraction.h @@ -1 +1 @@ -../../sdk/emm/include/sgx_mm_rt_abstraction.h \ No newline at end of file +../../external/sgx-emm/emm_src/include/sgx_mm_rt_abstraction.h \ No newline at end of file diff --git a/sdk/emm/Makefile b/external/sgx-emm/Makefile similarity index 90% rename from sdk/emm/Makefile rename to external/sgx-emm/Makefile index ca8338c57..43c82abec 100644 --- a/sdk/emm/Makefile +++ b/external/sgx-emm/Makefile @@ -30,18 +30,16 @@ include ../../buildenv.mk HAVE_PRIMITIVES ?= 1 -CPPFLAGS += -Iinclude \ +CPPFLAGS += -Iemm_src/include \ -I$(COMMON_DIR)/inc/tlibc \ -Wno-missing-braces -Wno-unused-parameter -OBJS := bit_array.o \ - ema.o \ - emalloc.o \ - emm_private.o \ - sgx_mm.o +C_Files := $(wildcard emm_src/*.c) +OBJS := $(C_Files:.c=.o) +OBJS := $(sort $(OBJS)) ifneq ($(HAVE_PRIMITIVES), 1) -ASM_OBJ := sgx_primitives.o +ASM_OBJ := emm_src/sgx_primitives.o endif LIB_NAME := libsgx_mm.a diff --git a/sdk/emm/api_tests/App/App.cpp b/external/sgx-emm/api_tests/App/App.cpp similarity index 100% rename from sdk/emm/api_tests/App/App.cpp rename to external/sgx-emm/api_tests/App/App.cpp diff --git a/sdk/emm/api_tests/App/App.h b/external/sgx-emm/api_tests/App/App.h similarity index 100% rename from sdk/emm/api_tests/App/App.h rename to external/sgx-emm/api_tests/App/App.h diff --git a/sdk/emm/api_tests/App/sgx.h b/external/sgx-emm/api_tests/App/sgx.h similarity index 100% rename from sdk/emm/api_tests/App/sgx.h rename to external/sgx-emm/api_tests/App/sgx.h diff --git a/sdk/emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp similarity index 99% rename from sdk/emm/api_tests/Enclave/Enclave.cpp rename to external/sgx-emm/api_tests/Enclave/Enclave.cpp index 3a56172d3..28a52b10f 100644 --- a/sdk/emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -35,7 +35,7 @@ #include /* vsnprintf */ #include #include -#include "../../include/sgx_mm.h" +#include "../../emm_src/include/sgx_mm.h" #define SGX_PAGE_SIZE 4096 #include "sgx_thread.h" #include diff --git a/sdk/emm/api_tests/Enclave/Enclave.edl b/external/sgx-emm/api_tests/Enclave/Enclave.edl similarity index 100% rename from sdk/emm/api_tests/Enclave/Enclave.edl rename to external/sgx-emm/api_tests/Enclave/Enclave.edl diff --git a/sdk/emm/api_tests/Enclave/Enclave.h b/external/sgx-emm/api_tests/Enclave/Enclave.h similarity index 100% rename from sdk/emm/api_tests/Enclave/Enclave.h rename to external/sgx-emm/api_tests/Enclave/Enclave.h diff --git a/sdk/emm/api_tests/Enclave/Enclave.lds b/external/sgx-emm/api_tests/Enclave/Enclave.lds similarity index 100% rename from sdk/emm/api_tests/Enclave/Enclave.lds rename to external/sgx-emm/api_tests/Enclave/Enclave.lds diff --git a/sdk/emm/api_tests/Enclave/Enclave_private_test.pem b/external/sgx-emm/api_tests/Enclave/Enclave_private_test.pem similarity index 100% rename from sdk/emm/api_tests/Enclave/Enclave_private_test.pem rename to external/sgx-emm/api_tests/Enclave/Enclave_private_test.pem diff --git a/sdk/emm/api_tests/Enclave/config.xml b/external/sgx-emm/api_tests/Enclave/config.xml similarity index 100% rename from sdk/emm/api_tests/Enclave/config.xml rename to external/sgx-emm/api_tests/Enclave/config.xml diff --git a/sdk/emm/api_tests/Makefile b/external/sgx-emm/api_tests/Makefile similarity index 100% rename from sdk/emm/api_tests/Makefile rename to external/sgx-emm/api_tests/Makefile diff --git a/sdk/emm/api_tests/tcs.h b/external/sgx-emm/api_tests/tcs.h similarity index 100% rename from sdk/emm/api_tests/tcs.h rename to external/sgx-emm/api_tests/tcs.h diff --git a/sdk/emm/api_tests/test_loop.sh b/external/sgx-emm/api_tests/test_loop.sh similarity index 100% rename from sdk/emm/api_tests/test_loop.sh rename to external/sgx-emm/api_tests/test_loop.sh diff --git a/sdk/emm/ut/Makefile b/external/sgx-emm/ut/Makefile similarity index 100% rename from sdk/emm/ut/Makefile rename to external/sgx-emm/ut/Makefile diff --git a/sdk/emm/ut/stub.c b/external/sgx-emm/ut/stub.c similarity index 100% rename from sdk/emm/ut/stub.c rename to external/sgx-emm/ut/stub.c diff --git a/sdk/emm/ut/test_bit_array.c b/external/sgx-emm/ut/test_bit_array.c similarity index 100% rename from sdk/emm/ut/test_bit_array.c rename to external/sgx-emm/ut/test_bit_array.c diff --git a/sdk/emm/ut/test_ema.c b/external/sgx-emm/ut/test_ema.c similarity index 100% rename from sdk/emm/ut/test_ema.c rename to external/sgx-emm/ut/test_ema.c diff --git a/sdk/emm/ut/test_emm.c b/external/sgx-emm/ut/test_emm.c similarity index 100% rename from sdk/emm/ut/test_emm.c rename to external/sgx-emm/ut/test_emm.c diff --git a/sdk/emm/ut/test_public.c b/external/sgx-emm/ut/test_public.c similarity index 100% rename from sdk/emm/ut/test_public.c rename to external/sgx-emm/ut/test_public.c diff --git a/psw/enclave_common/Makefile b/psw/enclave_common/Makefile index 7f8786460..eed4c8a7b 100644 --- a/psw/enclave_common/Makefile +++ b/psw/enclave_common/Makefile @@ -47,7 +47,7 @@ CFLAGS += $(ADDED_INC) INC += -I$(SGX_HEADER_DIR) \ -I$(COMMON_DIR)/inc/internal \ -I$(COMMON_DIR)/inc/internal/linux \ - -I$(LINUX_SDK_DIR)/emm/include \ + -I$(LINUX_EXTERNAL_DIR)/sgx-emm/emm_src/include \ -I$(LINUX_PSW_DIR)/urts/ \ -I$(LINUX_PSW_DIR)/urts/linux \ -I$(LINUX_PSW_DIR)/enclave_common diff --git a/psw/urts/linux/Makefile b/psw/urts/linux/Makefile index e67b62814..f815e7faf 100644 --- a/psw/urts/linux/Makefile +++ b/psw/urts/linux/Makefile @@ -51,7 +51,7 @@ INC += -I$(SGX_HEADER_DIR) \ -I$(LINUX_PSW_DIR)/urts/ \ -I$(LINUX_PSW_DIR)/urts/linux \ -I$(LINUX_PSW_DIR)/urts/parser \ - -I$(LINUX_SDK_DIR)/emm/include \ + -I$(LINUX_EXTERNAL_DIR)/sgx-emm/emm_src/include \ -I$(VTUNE_DIR)/include \ -I$(VTUNE_DIR)/sdk/src/ittnotify diff --git a/sdk/Makefile.source b/sdk/Makefile.source index 69283aed7..6aeac2972 100644 --- a/sdk/Makefile.source +++ b/sdk/Makefile.source @@ -178,7 +178,7 @@ ec_dh_lib: # --------------------------------------------------- .PHONY: sgx_mm sgx_mm: - $(MAKE) -C emm/ + $(MAKE) -C $(LINUX_EXTERNAL_DIR)/sgx-emm/ .PHONY: trts trts: @@ -295,7 +295,7 @@ clean: $(MAKE) -C tlibcxx/ clean $(MAKE) -C tseal/linux/ clean $(MAKE) -C selib/linux/ clean - $(MAKE) -C emm/ clean + $(MAKE) -C $(LINUX_EXTERNAL_DIR)/sgx-emm/ clean $(MAKE) -C trts/ clean $(MAKE) -C tsetjmp/ clean $(MAKE) -C tsafecrt/ clean diff --git a/sdk/emm/README.md b/sdk/emm/README.md deleted file mode 100644 index 6ea5f04c9..000000000 --- a/sdk/emm/README.md +++ /dev/null @@ -1,135 +0,0 @@ -Introduction ---------------------------------- -This directory contains an implementation of the Enclave Memory Manager proposed in [this design doc](design_docs/SGX_EMM.md). - -Its public APIs as defined in [sgx_mm.h](include/sgx_mm.h) are intended to encapsulate low level details -of managing the basic EDMM flows for dynamically allocating/deallocating EPC pages, changing EPC page -permissions and page types. - -The typical target users of these APIs are intermediate level components in SGX runtimes: heap, stack managers -with dynamic expansion capabilities, mmap/mprotect/pthread API implementations for enclaves, dynamic code -loader and JIT compilers,etc. - -This implementation aims to be reusable in any SGX runtime that provides a minimal C runtime and -implements the abstraction layer APIs as defined in [sgx_mm_rt_abstraction.h](include/sgx_mm_rt_abstraction.h). - -The instructions here are for developing and testing the EMM functionality only. -Consult the main README of this repo for general usages. - -**Note:** The kernel patch series for upstream are under review on LKML in [this thread](https://lore.kernel.org/lkml/YnrllJ2OqmcqLUuv@kernel.org/T/). -Please refer to the cover letter of the series for changes between versions. - -This EMM implementation is based on the testing branch for the kernel hosted [here](https://github.com/rchatre/linux/tree/sgx/sgx2_submitted_v5_plus_rwx), which includes a temporary patch to allow pages EAUG'ed with RWX permissions. - -As the kernel interfaces evolve, this EMM implementation and/or interface may change. However, the goal is to minimize the EMM public API changes so that impact to upper layer implementations are minimized. - -Prerequisites -------------------------------- - -#### Build and install kernel with EDMM support -On Ubuntu 18.04/20.04, follow the general instructions from [here](https://wiki.ubuntu.com/KernelTeam/GitKernelBuild) with these changes. - -- For step 1, clone this kernel repo and checkout the branch with sgx EDMM support -``` -$ git clone https://github.com/rchatre/linux.git -$ cd linux -$ git checkout sgx/sgx2_submitted_v5_plus_rwx -``` - -- For step 6, modify .config to set "CONFIG_X86_SGX=y". - -**Note:** on Ubuntu 20.04, ensure that /dev does not have noexec set: -``` -mount | grep "/dev .*noexec" -``` -If so, remount it executable: -``` -sudo mount -o remount,exec /dev -``` - -#### Verify kernel build and EDMM support -At the root of the kernel source repo, -``` -$ cd tools/testing/selftests/sgx/ && make -#./test_sgx -``` -#### Add udev rules to map sgx device nodes and set right permissions -Download [10-sgx.rules](https://github.com/intel/SGXDataCenterAttestationPrimitives/blob/master/driver/linux/10-sgx.rules) and activate it as follows. -``` -$ sudo cp 10-sgx.rules /etc/udev/rules.d -$ sudo groupadd sgx_prv -$ sudo udevadm trigger -``` -Build and Install SDK and PSW ------------------------------- - -#### Clone linux-sgx repo and checkout edmm branch -``` -$ git clone https://github.com/intel/linux-sgx.git $repo_root -$ cd $repo_root -$ git checkout edmm_v5 -``` -Following steps assume $repo_root is the top directory of the linux-sgx repo you cloned. - -#### To build and install SDK with EDMM support -``` -$ cd $repo_root -$ make preparation -$ make sdk_install_pkg_no_mitigation -$ cd linux/installer/bin -$ ./sgx_linux_x64_sdk_2.15.100.3.bin -# follow its prompt to set SDK installation destination directory, $SGX_SDK -$ source $SGX_SDK/environment -``` - -#### To build and setup libsgx_enclave_common and libsgx_urts -To test EMM functionalities without involving remote attestation, we only need libsgx_enclave_common and libsgx_urts built and point LD_LIBRARY_PATH to them. - -``` -$ cd $repo_root/psw/urts/linux -$ make -$ cd /build/linux -$ ln -s libsgx_enclave_common.so libsgx_enclave_common.so.1 -$ export LD_LIBRARY_PATH=/build/linux/ -``` - -#### To build and run API tests -``` -$ cd $repo_root/sdk/emm/api_tests/ -$ make -$ ./test_mm_api -# or run tests in loop in background -$ nohup bash ./test_loop.sh 1000 & -#check results in nohup log: -$ tail -f nohup.out -``` - -Limitations of current implementation ---------------------------------------- -1. The EMM holds a global recursive mutex for the whole duration of each API invocation. - - No support for concurrent operations (modify type/permissions, commit and commit_data) on different regions. -2. The EMM internally uses a separate dynamic allocator (emalloc) to manage its internal memory usage: allocations for EMA objects and bitmaps of the regions. - - During initialization, the EMM emalloc will create an initial reserve region from the user range (given by RTS, see below). And it may add more reserves later also from the user range if needed. - - RTS and SDK signing tools can estimate this overhead with (total size of all RTS regions and user regions)/2^14. And account for it when calculating the enclave size. - - Before calling any EMM APIs, the RTS needs initialize EMM by calling sgx_mm_init pass in an address range [user_start, user_end) for user allocation. - - The EMM allocates all user requested region(via sgx_mm_alloc API) in this range only. -3. Allocations created by the RTS enclave loader at fixed address ranges can be reserved with SGX_EMA_SYSTEM flag after EMM initializations. - - For example, for a heap region to be dynamically expanded: - - The RTS calls mm_init_ema to create region for the static heap (EADDed), and mm_alloc to reserve COMMIT_ON_DEMAND for dynamic heap. - - Stack expansion should be done in 1st phase exception handler and use a reserved static stack so that stack is not overrun in sgx_mm API calls during stack expansion. -4. The EMM relies on vDSO interface to guarantee that fault handler is called on the same OS thread where fault happened. - - This is due to the use of the global recursive mutex. If fault handler comes in from different thread while the mutex is held, it will deadlock. - - Note a #PF could happen when more stack is needed inside EMM functions while the mutex is locked. - - vDSO user handler should ensure it re-enters enclave with the original TCS and on the same OS thread. - - To avoid potential deadlocks, no other mutex/lock should be used in this path from user handler to first phase exception handler inside enclave. -5. Not optimized for performance -6. No extensive validation, failure or incorrect error codes possible for corner cases. - -Notes on Intel SDK specific implementation ------------------------------------------ -1. Intel SDK RTS abstraction layer mutex implementation is a spinlock because there is no built-in OCalls for wait/wake on OS event. -2. Intel SDK signing tool reserves all unused address space as guard pages, leaving no space for user allocation. In this implementation, we simply changed tRTS to leave majority of that space as free. In future, we may need change the signing tool to encode this info in the metadata. -3. API tests are built with Intel SDK. Though most of tests are RTS independent, the TCS related tests use hardcoded Intel thread context layout info. -4. All make files assume linux-sgx repo layout and environment. - - diff --git a/sdk/emm/bit_array.c b/sdk/emm/bit_array.c deleted file mode 100644 index 7a6ca0e4d..000000000 --- a/sdk/emm/bit_array.c +++ /dev/null @@ -1,501 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include "bit_array.h" -#include "emalloc.h" - -#define NUM_OF_BYTES(nbits) (ROUND_TO((nbits), 8) >> 3) -#define TEST_BIT(A, p) ((A)[((p)/8)] & ((uint8_t)(1 << ((p)%8)))) -#define SET_BIT(A, p) ((A)[((p)/8)] |= ((uint8_t)(1 << ((p)%8)))) -#define CLEAR_BIT(A, p) ((A)[((p)/8)] &= (uint8_t)(~(1 << ((p)%8)))) -#define FLIP_BIT(A, p) ((A)[((p)/8)] ^= (uint8_t)(1 << ((p)%8))) - -struct bit_array_ { - size_t n_bytes; - size_t n_bits; - uint8_t *data; -}; - -// Create a new bit array to track the status of 'num' of bits. -// The contents of the data is uninitialized. -bit_array *bit_array_new(size_t num_of_bits) -{ - // FIXME: check against MAX - - size_t n_bytes = NUM_OF_BYTES(num_of_bits); - - if (n_bytes == 0) - return NULL; - - bit_array *ba = (bit_array *)emalloc(sizeof(bit_array)); - if(!ba) return NULL; - ba->n_bytes = n_bytes; - ba->n_bits = num_of_bits; - ba->data = (uint8_t*)emalloc(n_bytes); - if (!ba->data) - { - efree(ba); - return NULL; - } - return ba; -} - -// Create a new bit array to track the status of 'num' of bits. -// All the tracked bits are set (value 1). -bit_array *bit_array_new_set(size_t num_of_bits) -{ - bit_array *ba = bit_array_new(num_of_bits); - if (!ba) - return NULL; - - memset(ba->data, 0xFF, ba->n_bytes); - return ba; -} - -// Create a new bit array to track the status of 'num' of bits. -// All the tracked bits are reset (value 0). -bit_array *bit_array_new_reset(size_t num_of_bits) -{ - bit_array *ba = bit_array_new(num_of_bits); - if (!ba) - return NULL; - - memset(ba->data, 0, ba->n_bytes); - return ba; -} - -// Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits. -void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data) -{ - if (ba->data) { - efree(ba->data); - } - - size_t n_bytes = NUM_OF_BYTES(num_of_bits); - ba->n_bytes = n_bytes; - ba->n_bits = num_of_bits; - ba->data = data; -} - -// Delete the bit_array 'ba' and the data it owns -void bit_array_delete(bit_array *ba) -{ - efree(ba->data); - efree(ba); -} - -#if 0 -// Returns the number of bits that are set -size_t bit_array_count(bit_array *ba) -{ - -} -#endif - -// Returns the number of tracked bits in the bit_array -size_t bit_array_size(bit_array *ba) -{ - return ba->n_bits; -} - -// Returns whether the bit at position 'pos' is set -bool bit_array_test(bit_array *ba, size_t pos) -{ - return TEST_BIT(ba->data, pos); -} -uint8_t set_mask(size_t start, size_t bits_to_set) -{ - assert(start<8); - assert(bits_to_set<=8); - assert(start + bits_to_set <= 8); - return (uint8_t) (((1 << bits_to_set) - 1) << start); -} -bool bit_array_test_range(bit_array *ba, size_t pos, size_t len) -{ - size_t byte_index = pos / 8; - size_t bit_index = pos % 8; - size_t bits_in_first_byte = 8 - bit_index; - - if (len <= bits_in_first_byte) { - uint8_t mask = set_mask(bit_index, len); - if ((ba->data[byte_index] & mask) != mask) { - return false; - } - return true; - } - - uint8_t mask = set_mask(bit_index, bits_in_first_byte); - if ((ba->data[byte_index] & mask) != mask) { - return false; - } - - size_t bits_remain = len - bits_in_first_byte; - while (bits_remain >=8) { - if (ba->data[++byte_index] != 0xFF) { - return false; - } - bits_remain -= 8; - } - - // handle last several bits - if (bits_remain > 0) { - mask = set_mask(0, bits_remain); - if ((ba->data[++byte_index] & mask) != mask) { - return false; - } - } - - return true; -} - -bool bit_array_test_range_any(bit_array *ba, size_t pos, size_t len) -{ - size_t byte_index = pos / 8; - size_t bit_index = pos % 8; - size_t bits_in_first_byte = 8 - bit_index; - - if (len <= bits_in_first_byte) { - uint8_t mask = set_mask(bit_index, len); - if ((ba->data[byte_index] & mask)) { - return true; - } - return false; - } - - uint8_t mask = set_mask(bit_index, bits_in_first_byte); - if ((ba->data[byte_index] & mask)) { - return true; - } - - size_t bits_remain = len - bits_in_first_byte; - while (bits_remain >=8) { - if (ba->data[++byte_index]) { - return true; - } - bits_remain -= 8; - } - - // handle last several bits - if (bits_remain > 0) { - mask = set_mask(0, bits_remain); - if ((ba->data[++byte_index] & mask)) { - return true; - } - } - return false; -} - -// Returns whether any of the bits is set -bool bit_array_any(bit_array *ba) -{ - uint8_t v = 0xFF; - size_t i; - - for (i = 0; i < ba->n_bytes - 1; ++i) { - if ((v & ba->data[i])) - return true; - } - - // check the last several bits - size_t bits_in_last_byte = ba->n_bits - ((ba->n_bytes - 1) << 3); - uint8_t mask = set_mask(0, bits_in_last_byte); - - if (mask & ba->data[i]) - return true; - - return false; -} - -// Returns whether none of the bits is set -bool bit_array_none(bit_array *ba) -{ - return !bit_array_any(ba); -} - -// Returns whether all of the bits are set -bool bit_array_all(bit_array *ba) -{ - uint8_t v = 0xFF; - size_t i; - for (i = 0; i < ba->n_bytes - 1; ++i) { - if ((v ^ ba->data[i])) - return false; - } - - // check the last several bits - size_t bits_in_last_byte = ba->n_bits - ((ba->n_bytes - 1) << 3); - uint8_t mask = set_mask(0, bits_in_last_byte); - - if ((mask & ba->data[i]) != mask) - return false; - - return true; -} - -// Set the bit at 'pos' -void bit_array_set(bit_array *ba, size_t pos) -{ - SET_BIT(ba->data, pos); -} - -void bit_array_set_range(bit_array *ba, size_t pos, size_t len) -{ - size_t byte_index = pos / 8; - size_t bit_index = pos % 8; - size_t bits_in_first_byte = 8 - bit_index; - - if (len <= bits_in_first_byte) { - uint8_t mask = set_mask(bit_index, len); - ba->data[byte_index] |= mask; - return; - } - - uint8_t mask = set_mask(bit_index, bits_in_first_byte); - ba->data[byte_index] |= mask; - size_t bits_remain = len - bits_in_first_byte; - while (bits_remain >=8) { - ba->data[++byte_index] = 0xFF; - bits_remain -= 8; - } - - // handle last several bits - if (bits_remain > 0) { - mask = set_mask(0, bits_remain); - ba->data[++byte_index] |= mask; - } - - return; -} - -// Set all the bits -void bit_array_set_all(bit_array *ba) -{ - memset(ba->data, 0xFF, ba->n_bytes); -} - -// Clear the bit at 'pos' -void bit_array_reset(bit_array *ba, size_t pos) -{ - CLEAR_BIT(ba->data, pos); -} - -uint8_t clear_mask(size_t start, size_t bits_to_clear) -{ - return (uint8_t)(~set_mask(start, bits_to_clear)); -} - -void bit_array_reset_range(bit_array *ba, size_t pos, size_t len) -{ - size_t byte_index = pos / 8; - size_t bit_index = pos % 8; - size_t bits_in_first_byte = 8 - bit_index; - - if (len <= bits_in_first_byte) { - uint8_t mask = clear_mask(bit_index, len); - ba->data[byte_index] &= mask; - return; - } - - uint8_t mask = clear_mask(bit_index, bits_in_first_byte); - ba->data[byte_index] &= mask; - - size_t bits_remain = len - bits_in_first_byte; - while (bits_remain >=8) { - ba->data[++byte_index] = 0; - bits_remain -= 8; - } - - // handle last several bits - if (bits_remain > 0) { - mask = clear_mask(0, bits_remain); - ba->data[++byte_index] &= mask; - } - - return; -} - -// Clear all the bits -void bit_array_reset_all(bit_array *ba) -{ - memset(ba->data, 0, ba->n_bytes); -} - -// Flip the bit at 'pos' -void bit_array_flip(bit_array *ba, size_t pos) -{ - FLIP_BIT(ba->data, pos); -} - -#if 0 -// Flip all the bits -void bit_array_flip_all(bit_array *ba) -{ - -} -#endif - -// Split the bit array at 'pos' -int bit_array_split(bit_array *ba, size_t pos, bit_array **new_lower, bit_array **new_higher) -{ - // not actually a split - if (pos == 0) { - *new_lower = NULL; - *new_higher = ba; - return 0; - } - - // not actually a split - if (pos >= ba->n_bits) { - *new_lower = ba; - *new_higher = NULL; - return 0; - } - - size_t byte_index = pos / 8; - uint8_t bit_index = pos % 8; - - size_t l_bits = (byte_index << 3) + bit_index; - size_t l_bytes = NUM_OF_BYTES(l_bits); - size_t r_bits = ba->n_bits - l_bits; - - // new data for bit_array of lower pages - uint8_t *data = (uint8_t *)emalloc(l_bytes); - if (!data) return ENOMEM; - size_t i; - for (i = 0; i < byte_index; ++i) { - data[i] = ba->data[i]; - } - - if (bit_index > 0) { - uint8_t tmp = ba->data[i] & (uint8_t)((1 << bit_index) - 1); - data[i] = tmp; - } - - // new bit_array for higher pages - bit_array *ba2 = bit_array_new(r_bits); - if(!ba2) - { - efree(data); - return ENOMEM; - } - - size_t bits_remain = r_bits; - size_t curr_byte = byte_index; - size_t dst_byte = 0; - uint8_t u1 = 0, u2 = 0; - - while (bits_remain >= 8) { - u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); - u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); - ba2->data[dst_byte++] = u1 | u2; - bits_remain -= 8; - } - - if (bits_remain > (uint8_t)(8 - bit_index)) { - u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); - u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); - ba2->data[dst_byte] = u1 | u2;; - } - else if (bits_remain > 0) { - u1 = (uint8_t)(ba->data[curr_byte] >> bit_index); - ba2->data[dst_byte] = u1; - } - - bit_array_reattach(ba, l_bits, data); - - *new_lower = ba; - *new_higher = ba2; - return 0; -} - -// Merge two bit arrays -// Returns a new bit array, merging two input bit arrays -bit_array* bit_array_merge(bit_array *ba1, bit_array *ba2) -{ - size_t total_bits = ba1->n_bits + ba2->n_bits; - bit_array *ba = bit_array_new(total_bits); - if (!ba) return NULL; - - // copy ba1 data into new bit_array - memcpy(ba->data, ba1->data, ba1->n_bytes); - - size_t idle_bits = (ba1->n_bytes << 3) - ba1->n_bits; - - // last byte of ba1 is fully occupied, copy ba2 data as a whole - if (idle_bits == 0) { - memcpy(&ba->data[ba1->n_bytes], ba2->data, ba2->n_bytes); - bit_array_delete(ba1); - bit_array_delete(ba2); - return ba; - } - - // fix the byte copied from ba1's last byte - size_t i = ba1->n_bytes - 1; - - size_t bits_remain = ba2->n_bits; - ba->data[i++] |= (uint8_t)(ba2->data[0] << (8 - idle_bits)); - - if (bits_remain <= idle_bits) { - bit_array_delete(ba1); - bit_array_delete(ba2); - return ba; - } - - bits_remain -= idle_bits; - size_t curr_byte = 0; - size_t dst_byte = i; - uint8_t u1 = 0, u2 = 0; - - while (bits_remain >= 8) { - u1 = (uint8_t)(ba2->data[curr_byte++] >> idle_bits); - u2 = (uint8_t)(ba2->data[curr_byte] << (8 - idle_bits)); - ba->data[dst_byte++] = u1 | u2; - bits_remain -= 8; - } - - if (bits_remain > (8 - idle_bits)) { - u1 = (uint8_t)(ba2->data[curr_byte++] >> idle_bits); - u2 = (uint8_t)(ba2->data[curr_byte] << (8 - idle_bits)); - ba->data[dst_byte] = u1 | u2;; - } else if (bits_remain > 0) { - u1 = (uint8_t)(ba2->data[curr_byte] >> idle_bits); - ba->data[dst_byte] = u1; - } - - bit_array_delete(ba1); - bit_array_delete(ba2); - return ba; -} diff --git a/sdk/emm/design_docs/SGX_EDMM_driver_interface.md b/sdk/emm/design_docs/SGX_EDMM_driver_interface.md deleted file mode 100644 index 64e307906..000000000 --- a/sdk/emm/design_docs/SGX_EDMM_driver_interface.md +++ /dev/null @@ -1,302 +0,0 @@ -SGX EDMM Linux Driver Interface Design -===================================== - -## Motivation - -This document describes possible Linux driver interfaces to facilitate discussions among SGX runtime implementors (e.g., https://github.com/openenclave/openenclave/pull/3639) on supporting different SGX EDMM flows. - -Although interfaces described here are inspired to be as close as possible to a future Linux kernel APIs, they are not intended to be a description or proposal for kernel implementation. We hope from discussions enabled by this document, requirements and usage models can be identified to help shape future kernel interfaces. - -Without losing generality, this document may describe how upper layer user space components would use the interfaces. However, details of design and implementation of those components are intentionally left out. The PR mentioned above would provide more contexts on other user space components and their relationships. Further, for those who may want to learn basic principles behind Intel(R) SGX EDMM instructions and how they are typically used, please consult following references: -- [HASP@ISCA 2016: 11:1-11:9](https://caslab.csl.yale.edu/workshops/hasp2016/HASP16-17.pdf) -- [Intel SDM Vol.4, Ch.36-42](https://software.intel.com/content/www/us/en/develop/articles/intel-sdm.html) - -For design and implementation of current SGX1 support in upstream Linux kernel (merged in 5.11RC), please refer to [this patch series](https://lwn.net/Articles/837121/) -**Update on 6/27/2022:** At the time of this document creation, Linux kernel support for EDMM is not actively developed. The APIs described here are not the same as those actual implementation to be accepted in mainline kernel (as was expected and stated above). For current candidate patches for EDMM support in Linux kernel, please review at [this LKML thread](https://lore.kernel.org/lkml/YnrllJ2OqmcqLUuv@kernel.org/T/). However, all the usages and flows described here are supported by the upstream candidate except for the MAP_POPULATE flage for "direct allocation flow". - -## Basic EDMM flows - -SGX EDMM instructions support dynamic EPC page allocation/deallocation for enclaves and page property modification post-EINIT. Following are the basic EDMM flows on which other more advanced usages of EDMM can be built. - -**Note:** This document is Linux specific. The term "kernel" and "kernel space" are used in this document when general Linux kernel space actions are described whether implemented in an OOT driver or in kernel tree. Kernel specific implementation details will be explicitly stated as "future kernel" or "kernel patches". And implementation details such as OCalls issued by enclaves, ETRACK and inter-processor interrupts (IPIs) issued in kernel are generally omitted for brevity. - -- Allocate a new page at an address in ELRANGE of an enclave. - - This can be an explicit syscall or triggered by a page fault (#PF) when an unavailable page is accessed. - - Kernel issues EAUG for the page. All new pages should have RW permissions initially. - - The enclave then issues EACCEPT. -- Deallocate an existing page - - Enclave signals via a syscall to kernel that a page is no longer in use. - - Kernel issues EMODT to change page type to PT_TRIM - - The enclave issues EACCEPT - - Kernel issues EREMOVE on the page at appropriate time -- Change page type, for example, from PT_REG to PT_TCS or PT_TRIM. - - Enclave requests via a syscall to kernel to change type of a page from PT_REG to PT_TCS/PT_TRIM - - Kernel issues EMODT to change page type to PT_TCS/PT_TRIM - - The enclave issues EACCEPT -- Extend EPCM permissions of a page, e.g., R->RW/RX - - Enclave issues EMODPE for the page - - Enclave requests via a syscall that the kernel update the page table permissions to match. - - Kernel modifies permissions in PTE -- Reduce EPCM permissions of a page, e.g. RW/RX->R - - Enclave requests that the kernel restrict the permissions of an EPC page - - Kernel performs EMODPR, updates page tables to match the new EPCM permissions, - - Enclave issues EACCEPT - -**Note:** Flows related to CET support inside enclave will be considered as a future enhancement. - -Future kernel may extend mmap and mprotect syscalls to support SGX EDMM usages. But we can't add/change syscall interfaces from an out-of-tree driver. So, in this proposal for possible driver implementation, we reuse mmap for dynamic enclave memory mapping and expose a new IOCTL, sgx_enclave_mprotect, for enclave page modification. - -## mmap - -After enclave is initialized (EINIT IOCTL done), the standard Linux mmap syscall can be used to create a new mapping configured for dynamically allocating enclave memory using EAUG. Following comments are specific to SGX EDMM usages, please refer to [mmap man page](https://man7.org/linux/man-pages/man2/mmap.2.html) for generic definitions. - -### Remarks - -- To create a mapping for dynamic enclave memory allocation, mmap must be called with an open enclave file descriptor and with PROT_READ | PROT_WRITE for protection flags. - - Enclave must issue EACCEPT for the pages after mmap before it can modify the content of the pages and extend/reduce permissions in secure way. -- The offset in mmap parameter must be zero for enclaves. -- MAP_* flags must be MAP_SHARED | MAP_FIXED masked with optional flags: - - MAP_POPULATE: hint for kernel to EAUG pages as soon as possible. - - MAP_GROWSDOWN: used for stacks. The mapping will grow down to the next mapping. -- If and only if the address range are within the ELRANGE of the enclave associated with the file descriptor, the mapping will be created. However, user space should not expect EAUG be done by the mmap call. - - The kernel can choose EAUG pages immediately (likely for MAP_POPULATE), or EAUG pages upon page faults within the VMA, similar to how kernel would allocate regular memory. -- The kernel will assume the newly requested mapping is for dynamic allocation and initial permissions must be RW until user space request changes later. - -**Implementation Notes:** Current [SGX kernel patches](https://patchwork.kernel.org/project/intel-sgx/patch/20201112220135.165028-11-jarkko@kernel.org/) limit PTE permissions to the EPCM permissions given in SEC_INFO during EADD IOCTL calls. The dynamic allocation mappings should not be subject to those limits. A possible implementation may have these changes: - - sgx_encl_may_map - - enforces RW permissions for pages other than those loaded due to EADD or ECREATE. - - set up flags to track dynamic pages: type, permissions flag - - sgx_vma_mprotect - - Allow permissions changes to dynamic pages within limitations of OS policies, e.g., - - never allow WX - - SELinux policy specific to SGX enclaves - - update flags for the dynamic pages - -**update on 6/27/2022:** The upstream candidate does not yet take hints like MAP_POPULATE, MAP_GROWSDOWN to optimize allocation. However, MAP_POPULATE has been considered and may be added for future. - -## munmap -Calling munmap on an enclave page (dynamic allocated or not) has exactly the same effect of calling munmap on a regular RAM page. No sgx specific interface is needed. No behavior changes to current kernel space implementation. - -### Remarks - -- Enclave memory mapings are shared (MAP_SHARED). The mappings in shared processes are kept alive and independently until the process exits - - munmap and closing file descriptors are not required for user space. A dead process automatically releases all mappings and file descriptors. -- Upon all enclave mappings are removed and file handles to the enclave are closed, either by explicit munmap/fclose syscalls or when all hosting apps exited: - - The kernel may mark its remaining pages are reclaimable and issue EREMOVE on them any time the kernel deems appropriate. - -## mprotect IOCTL -This IOCTL emulates the mprotect syscall with SGX specific extensions. In future kernel implementation, it could be mprotect or pkey_mprotect syscall with sgx extensions for the "prot" parameter. - -``` -#define SGX_IOC_ENCLAVE_MPROTECT _IOW(SGX_MAGIC, 0x06, struct sgx_enclave_mprotect) -/** - * struct sgx_enclave_mprotect - parameter structure for the - * %SGX_IOC_ENCLAVE_MPROTECT ioctl - * @addr: address of the memory to change protections - * @length: length of the area. This must be a multiple of the page size. - * @prot: this must be or'ed of following: - PROT_READ - PROT_WRITE - PROT_EXEC - PROT_TRIM (new): change the page type to PT_TRIM, implies RW. User space should immediately EACCEPT, and then call mprotect with PROT_NONE. - PROT_TCS (new): change the page type to PT_TCS - PROT_NONE: Signal the kernel EACCEPT is done for PT_TRIM pages. Kernel can EREMOVE the pages at a time it deems appropriate. - */ -struct sgx_enclave_mprotect { - __u64 addr; - __u64 length; - __u64 prot; -}; -``` -**update on 6/27/2022:** The upstream candidate provides separate ioctls interfaces: SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS for EMODPR, -SGX_IOC_ENCLAVE_MODIFY_TYPES for change types to PT_TRIM or PT_TCS, SGX_IOC_ENCLAVE_REMOVE_PAGES for notifying kernel to -perform EREMOVE after eaccept PT_TRIM pages. - -### Remarks - -Kernel should ensure that SGX instructions can succeed or catch and handle any fault. - - The kernel may maintain EPCM information on each page which includes access permission RWX, page types of PT_REG, PT_TRIM, PT_TCS. - - The kernel should EREMOVE pages of PT_TRIM only after user space signals kernel EACCEPT is done with mprotect(...,PROT_NONE,...). This is because EACCEPT may cause undesired #PF if the target page is already EREMOVED. - - The kernel catches fault on EMODPR, EMODT and converts to error code returned to user space. - -The enclave run-time (or trusted run-time) may implement a parallel memory management structure which would provide information to the enclave on the enclave memory mappings. The run-time can have a trusted API analogous to mmap which makes a call out of the enclave to issue the mmap and then either perform EACCEPT on the pages and update the internal memory structures or configure the enclave to perform the EACCEPT when a #PF is delivered to the enclave. With the outlined kernel interface, either implementation is possible. -## Sequence Diagrams for Basic Flows - -### Direct Allocation with MAP_POPULATE - -![SGX2 direct allocation flow](images/SGX2_alloc_direct.svg) - -### \#PF Based Allocation - -![SGX2 #PF based allocation flow](images/SGX2_alloc_pf.svg) - -### EPC Deallocation - -![SGX2 deallocation flow](images/SGX2_trim.svg) - -### Permission Changes - -![SGX2 permissions change flow](images/SGX2_perms.svg) - -**Notes:** -- EACCEPT is needed for enclave to ensure untrusted runtime and OS indeed invoke EMODPR and EPCM permissions are set as expected. -- It is assumed that both OS and enclave keep track of page permissions. However, it is possible for enclave to avoid that with implementation like this: - -``` -//Change page permission to perms_target in EPCM without remember previous permissions. -//The tradeoff here is possibly more ocall, emodpr, emodpe, eaccept than necessary. -trusted_mprotect(..., perms_target, ...){ - ocall_mprotect(..., perms_target, ...); //expect EPCM.perms<=perms_target - emodpe(..., perms_target, ...); //expect EPCM.perms>=perms_target - eaccept(..., perms_target); //verify EPCM.perms==perms_target - assert( ZF == 0); -} -``` - - - - -### TCS Allocation - -![SGX2 TCS allocation flow](images/SGX2_tcs.svg) - - -## Example advanced flows - -More advanced flows can be implemented as combinations of the basic flows. Here we present a few examples. - -### Dynamic code loading - -To load dynamic code after EINIT, the enclave has to have verify the code to be trustworthy. The mechanism -for an enclave to establish trustworthiness of the new code is out-of-scope of this document. - -Assuming a new code page is verified to be trusted and stored at an existing enclave page, then there could -be many ways for an enclave to load the trusted code to a new executable page. For example, in SGX1 environment -without EDMM support, the enclave can reserve RWX regions and load trusted code directly into those regions. -It is straightforward but not flexible or efficient use of EPC. Additionally the requirement of a RWX region -goes against security policies of not running code in writable pages. With EDMM, the EACCEPTCOPY instruction -allows an enclave copy code to a pending page and reset EPCM permissions to RX at the same time, thus provides -a more robust and flexible way to load trusted code without those pitfalls. - -Following are two example sequences in which a dynamic code page is loaded using EACCEPTCOPY on demand when -the code page is read for execution at the first time. - -**Dynamic loading with direct allocation** - -![SGX2 EACCEPTPY flow-direct EAUG](images/SGX2_eaccept2.svg) - -**Dynamic loading with #PF based allocation** - -![SGX2 EACCEPTPY flow](images/SGX2_eaccept.svg) - -In the sequences above it is assumed that the enclave would load a code page only if it is being executed the -first time. This is to minimize the EPC usage. An enclave could also choose to EACCEPTCOPY to preload the -code ahead of time. In that case, the sequence would be as follows in case of direct allocation. -1. Enclave calls mmap to configure a region in enclave ELRANGE for EAUG -2. Kernel EAUG all pages requested. -3. Enclave EACCEPTCOPYs trusted code from an existing EPC page to the target page, which sets RX permissions in EPCM specified in PageInfo operand. -4. Enclave makes ocall which invokes mprotect syscall to change PTE permissions from RW to RX - -### Lazy dynamic stack expansion -An enclave can lazily expand its stacks as follows. -1. Enclave calls mmap with MAP_GROWSDOWN for a stack region in enclave ELRANGE -2. At some time later, enclave pushes to the top of the stack where no EPC page is populated yet, this results in #PF, causing enclave AEX. -3. Kernel determines faulting address is in a stack region in enclave, EAUGs a page, invokes user space handler via vDSO. -4. The user space handler delivers it into enclave exception handler -5. The enclave exception handler checks the faulting address against its record, determines the fault has happened in a stack area not yet EACCEPT'ed. -6. Enclave issues EACCEPT, returns to untrusted user handler of the hosting process, which returns to kernel fault handler. -7. Kernel fault handler returns to enclave AEX address at which an ERESUME instruction is stored -8. Enclave resumed and the original push instruction is retried and succeeds. - -## Exception Handling - -This section focuses on changes around \#PF handling which is affected by the new page states (i.e. states in EPCM) introduced by SGX/EDMM, along with the mechanisms for handling exceptions in enclaves. - -An exception or interrupt during enclave execution will trigger an enclave exit, i.e., Asynchronous Enclave Exits (AEX). To protect the secrecy of the enclave, SGX CPU at AEX would save the state of certain registers within enclave memory, specifically, the thread's current State Save Area (SSA). Then it loads those registers with fixed values called synthetic state, of which the RIP (Instruction Pointer Register) is always set to the AEP (Asynchronous Exit Pointer) address. The AEP is passed in as an operand for the EENTER instruction and points to a trampoline code sequence which ualtimately invokes the ERESUME instruction to reenter the enclave. - -As with all non-enclave exception scenarios, the kernel fault handler registered in the Interrupt Descriptor Table (IDT) would be the first in line to handle exceptions for AEX, and it needs to either handle it in kernel space, or if it can't handle, invoke user space exception handler. In both cases, after handlers return, control is tranferred to AEP trampoline, which enventually invokes ERESUME to reenter enclave. - -Current kernel implementation (in release 5.11) can invoke user space exception handler in two ways depending on how EENTER and AEP trampoline are managed: - - 1. Direct EENTER in runtime: the user space runtime manages EENTER, AEP trampoline directly and use Linux signal APIs to register and handle exceptions. - 2. vDSO interface: the user space invokes [__vdso_sgx_enter_enclave](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124), passing in a callback for exception handling, and the vDSO implementation manages EENTER and AEP trampoline. - - The direct EENTER method requires signal handling in runtime library which is known to be challenging in Linux environment. Therefore, the vDSO interface is preferred and assumed in following discussion. (Runtime implementing direct EENTER method would have similar flow but the callbacks from vDSO are replaced with Linux signals.) For more details about the new SGX vDSO interface please refer to documentation in the [kernel header file](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/include/uapi/asm/sgx.h?h=x86/sgx#n124). General sequence is as follows: - - 1. User space initializes an sgx_enclave_run struct, run = {..., TCS, sgx_enclave_user_handler, ...} - 2. User space calls __vdso_sgx_enter_enclave (..., EENTER, run, ...) - 3. vDSO invokes EENTER (TCS, vDSO AEP) to enter the enclave, the vDSO AEP points to an ERESUME instruction in vDSO code. - 4. If the enclave finishes sucessfully, and EEXITs, the vDSO sets run.function = EEXIT, goto step 7. - 5. In an event of AEX, the kernel handles the fault if possible, e.g. EAUG on #PF, returns to vDSO AEP. - 6. Otherwise, the kernel dispatches the fault to vDSO (via an entry in exception fix-up table), which copies exception info to run.exception_vector, run.exception_error_code, run.exception_addr, last seen ENCLU leaf in RAX (ERESUME) to run.function - 7. vDSO invokes sgx_enclave_user_handler(..., run) - 8. The sgx_enclave_user_handler process enclave exit event: - * If run.function == EENTER, error case, return negative to fail the last __vdso_sgx_enter_enclave call. User space should treat as if enclave loses EPC context due to power envents or other causes assuming no bugs in code, and try to reload the enclave. - * If run.function == EEXIT, return 0 for normal enclave ecall return, return EENTER after invoking proper ocall with runtime specific convention. - * If run.function == ERESUME, invokes calls __vdso_sgx_enter_enclave (..., EENTER, run2, ...) to handle exception inside enclave, then return ERESUME. - 9. The vDSO returns to caller if the user handler's return is not EENTER or ERESUME, otherwise use ERESUME or EENTER accordingly to reenter the enclave. - - -### Fault Handling in Kernel - -SGX enclave execution may cause “EPCM Induced #PF”. For those #PFs, SGX enabled CPUs set the SGX bit (bit 15) in Page Fault Error Code (PFEC). It is always generated in the PFEC register if the fault is due to an EPCM attribute mismatch. The kernel #PF handler will only see the faulting address (via CR3) and the PFEC codes on a page fault. It must rely on this information and its own stored information about the address of the fault (VMA and PTE) to make a decision on how to handle the fault. In many cases, the kernel can only issue a signal or call user handler callback registered in the SGX vDSO function with run.function=ERESUME and pass on all relevant exception info. - -In addition, a running enclave can lose EPC context due to power events (S3/S4 transitions) or VM being suspended. A page fault on EENTER instruction (either at an initial ecall or at re-entering enclave for exception handling) results in those cases, and the user handler would receive a callback from vDSO with run.function = EENTER. - -This table summarizes kernel, vDSO, user handler actions in different fault scenarios related to enclave operations. All exceptions considered here happen inside enclave causing AEX, or at EENTER/ERESUME, so the kernel will convert them to the synchronous callbacks thru vDSO interface as needed. - -| Fault Condition | Key #PF PFEC Contents | Kernel/vDSO Action | Untrusted User Handler | -|---|---|---|---| -| Access a page which has been swapped out | #PF where PFEC.P=0 | ELD the page from backing store, ERSUME | N/A | -| Access Page Mapped PROT_NONE
(page that the enclave has not mmap'ed) | #PF where PFEC.P=0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Access Page Mapped PROT_W (Page had been mmap'ed by enclave, but not EAUG'ed) | #PF where PFEC.P=0 | EAUG and map the page then ERESUME | N/A | -| Page Protection mismatch in PTE| #PF where PFEC.W/R or PFEC.I/D will not match PTE | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Page Protection mismatch in EPCM| #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Access Page with EPCM.Pending | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Access Page with EPCM.Modified | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| Access Page with type PT_TRIM | #PF where PFEC.SGX=1 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler | -| EENTER with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=EENTER) | return error to app signaling enclave lost
App should reload enclave | -| ERESUME with invalid TCS
(EPC loss due to power events or VM suspend/resume) | #PF where PFEC.SGX=1 or PFEC.P = 0 | invoke user handler
(run.fun=ERESUME) | invoke enclave handler
and will trigger #PF on EENTER | - -**Note:** When an error/exception happens when kernel handles a fault on behalf of an enclave, the kernel sees the original fault happened at AEP and would fix it up as a callback to user handler with run.function = ERESUME. For example, in the first case of the table above, a fault on ELD (EPC loss caused by power events) would be fixed up in this way. - -# Enclave Handling of Faults - -Once an exception is passed into an enclave, the enclave has to rely on trusted info stored in the active SSA by CPU during AEX to make right decisions in handling the exception. It should not rely on any info passed in from the untrusted side. To gain access to fault related info in SSA, an enclave configured to use SGX2 EDMM features should also configure the SECS.MISCSELECT to report EXIINFO in the State Save Area frame on a #PF or a General Protection (#GP) Fault. This will ensure that the enclave has the following information in the SSA frame on a #PF: -* ExitInfo.Vector = #PF identifies that a #PF caused an asynchronous exit -* MISC.EXINFO.MADDR = the linear address that page faulted (analogous to CR2) -* MISC.EXINFO.ERRCD = Page-Fault Error Code - information about the page fault - -To securely handle all faulting scenarios and EDMM flows, in addition to information stored in SSA, the enclave should store information about its own memory configuration and relevant states. This can be an array or table of structures storing information about each mapped region of enclave memory. The information that the enclave should store includes: -* Address Range: Range of Enclave Linear Addresses that are covered by the region -* Permissions: Combination of Read, Write, Execute -* Page Type: SGX page type of pages in the region - PT_TCS, PT_REG, or PT_TRIM -* State: the state of the region. The state may indicate that the region is in transition. For example is is changing page type or permissions. -* Table of information about the EACCEPT state of each page in the region. This may be a temporary structure which keeps track of pages which are EACCEPTed for operations requiring EACCEPT. This can ensure that the enclave does not EACCEPT a page twice. For example, when a page is EAUG'ed to an enclave linear address, the enclave should only EACCEPT that page once. If the enclave could be convinced to EACCEPT the page twice, then the OS can potentially EAUG two pages at the same enclave linear address and freely swap them by modifying PTEs. - -Enclaves should prevent two threads from simultaneously operating on the same region, e.g, trying to EMODPE on a page while permission change is in progress in another thread. One way to ensure this is to use some lock/synchronization mechanism to protect the state of each region, have the second thread wait if page is in transition state. - -When an enclave is called after faulting, the enclave can consult its stored memory region states and the ExitInfo.Vector and MISC.EXINFO in SSA to determine what to do with the fault. The following table lists actions on specific page faults. - -| EXITINFO/MISC.EXINFO
Information | State of Region | Cause of Fault | Enclave Action | -|---|---|---|---| -| ERRCD(PFEC).P=0 | n/a | Enclave has accessed an unallocated memory region|Call exception handlers, abort if not handled | -| ERRCD(PFEC).W/R or I/D does not match protections | Not In-Transition | Enclave has incorrectly accessed memory region|Call exception handlers, abort if not handled | -| ERRCD(PFEC).W/R or I/D does not match protections | In-Transition | Enclave has incorrectly accessed memory region which may be changing protections|If future protections will allow access then pend on Lock/Mutex for region, else call exception handlers, abort if not handled | -| ERRCD(PFEC).SGX=1 | Not In-Transition | Error in run-time or kernel | Depending on run-time design, the enclave should not encounter this. | -| ERRCD(PFEC).SGX=1 | In-Transition | Page is being accessed during transition | If future protections/page-type will allow access then pend on Lock/Mutex for region, else call exception handlers, abort if not handled | - -## Debugger Support - -The EDMM flows do not affect how debugger read/write enclave memory by EDBGRD/EDBGWR. However, the DBGOPTIN (i.e. bit 0) of TCS.FLAGS must be set to enable hardware breakpoints and allow single-stepping inside the enclave thread entered with that TCS. Therefore, for TCS pages added dynamically using EDMM instructions, debuggers are required to trap TCS creation flow in order to set DBGOPTIN. For GDB on Linux, the debugger scripts can collaborate with runtime to set DBGOPTIN. One possible implementation could be described as below. -- Whenever a new TCS page is ready to use, the runtime invokes a special empty function and passes the TCS address as its argument. -- GDB startup script sets a breakpoint on that empty function to receive the debug interrupt. -- Once the breakpoint has been hit, GDB script extracts the address of the TCS page, and sets DBGOPTIN for that TCS. -- GDB resumes the interrupted application. -- From now on, hardware breakpoints and single stepping are allowed inside enclave threads entered with the newly created TCS - -For Windows, similar collaboration between debugger and runtime can be implemented using exceptions instead of breakpoints on empty functions. - -For runtimes using EDDM to load dynamic modules into enclave after EINIT, the runtime needs to signal module loading events to the debugger so that the debugger can load additional symbols for those modules. That can also be implemented using exceptions or pre-defined breakpoints. - -**Note:** Kernel does not fixup Debug Exceptions (#DB) and Breakpoints (#BP). diff --git a/sdk/emm/design_docs/SGX_EMM.md b/sdk/emm/design_docs/SGX_EMM.md deleted file mode 100644 index 8f94d4bb3..000000000 --- a/sdk/emm/design_docs/SGX_EMM.md +++ /dev/null @@ -1,725 +0,0 @@ -SGX Enclave Memory Manager -================================= - -## Introduction ## - -An enclave's memory is backed by a special reserved region in RAM, called -Enclave Page Cache (EPC). Enclave memory management tasks include -allocating/reserving virtual address ranges, committing physical EPC pages, -changing EPC page permissions or page types, and removing EPC pages. -Those tasks require collaboration between the trusted runtime, the untrusted -runtime, and the OS. The SGX enclave memory manager (EMM) serves as a central -component in the enclave trusted runtime that abstracts the interaction with -the untrusted runtime for all memory management flows and provides APIs for -its clients to reserve virtual address ranges, commit EPC memory to the reserved -address ranges, and modify attributes of the reserved/committed pages. - -For details of specific memory management related flows, please refer to -[the SGX EDMM driver API spec](SGX_EDMM_driver_interface.md). - -As shown in the figure below, the EMM provides a set of public APIs to be invoked -by upper layer components for specific usages, such as dynamic heap/stack, mmap, -mprotect, higher level language JIT compiler, etc. Another goal of this design is -to make the EMM implementation portable across different runtimes such as -Intel SGX SDK and OpenEnclave. To achieve that, it requires the runtimes to implement -a runtime abstraction layer with APIs defined in this document. The main purpose of -the abstraction layer is to provide an OCall bridge to the enclave common loader outside -the enclave, which interacts with the OS to support the EDMM flows. - -![SGX2 EMM architecture](images/SGX2_emm_arch.svg) - - -**Note:** As the EMM is a component inside enclave, it should not have direct OS dependencies. -However, the design proposed in this document only considers call flows and semantics for Linux. -And the OCall implementation in enclave common loader is currently specified for Linux only though -similar implementation is possible on other OSes. - - -## User Experience ## - -**Porting EMM to Different Runtimes** - -To port EMM implementation portable across different SGX enclave runtimes, e.g., the Open Enclave and Intel SGX SDKs, -the runtimes needs to implement the runtime abstraction layer APIs. These APIs encapsulate runtime specific support -such as making OCalls, registering callbacks on page faults, on which the EMM implementation relies to collaborate with the OS. - -Additionally, the runtime needs to properly initialize the EMM and reserve its own regions using the private APIs -as described in the section on [Support for EMM Initialization](#support-for-emm-initialization). - -The EMM source code will be hosted and maintained in the [Intel SGX PSW and SDK repository](https://github.com/intel/linux-sgx). -The EMM can be built as a separate library then linked into any runtime that implements the abstraction layer APIs. - -**Allocate, Deallocate Enclave Memory** - -The EMM provides an API, sgx_mm_alloc, for its clients to request enclave memory -allocations. An enclave memory allocation represents both a reserved virtual -address range and a commitment of EPC pages. EPC pages are committed for -enclaves via special SGX instructions: loaded by EADD/EEXTEND before EINIT -or dynamically added using EAUG followed by EACCEPT. - -The sgx_mm_alloc API allows clients to specify one of three committing modes -for an allocation: -- SGX_EMA_RESERVE, only the virtual address range is reserved. No EPC pages will -be committed in this mode. -- SGX_EMA_COMMIT_NOW: reserves and commits physical EPC upon allocation. -EACCEPT will be done immediately on SGX2 platforms. -- SGX_EMA_COMMIT_ON_DEMAND: EACCEPT is done on demand, see below on committing -and uncommitting. - -An allocation, once created, will own its address range until the deallocation -API, sgx_mm_dealloc, is called upon. No two active allocations can have -overlapping address ranges. - -**Commit, Uncommit Enclave Memory** - -When a page in COMMIT_ON_DEMAND allocations is accessed, a page fault occurs if -the page was not yet committed. The EMM will perform EACCEPT to commit the EPC -page on page fault after OS doing EAUG. - -The clients can also call the EMM commit API, sgx_mm_commit, to proactively -commit specific sub-regions in a COMMIT_ON_DEMAND allocation to avoid -future page fault. - -Some EMM clients, e.g., a dynamic code loader wishing to load code on -page faults, can register a custom handler for page faults at the time of -allocation request. In the custom page fault handler, it can invoke an API, -sgx_mm_commit_data, to commit and load data to newly committed EPC page at -the same time as supported by EACCEPTCOPY. - -Committed pages will stay committed (regardless how they were committed) until -the clients calls the uncommit API, sgx_mm_uncommit, on them or the allocation -they belong to is deallocated by sgx_mm_dealloc. - -**Modify Page Attributes** - -The EMM clients may call sgx_mm_modify_permissions/sgx_mm_modify_type to request permissions -or page type changes for pages in existing allocations. - -## Notes on Internal Design ## - -The enclave memory manager keeps track of memory allocation and layout info inside -enclave address range (ELRANGE) using an internal structure called the Enclave Memory -Area (EMA) List. The EMA and the EMA list are considered private data structures of the memory -manager, and their internals are not exposed in client-facing APIs. -- The EMA list tracks all memory regions in use (reserved, committed, -commit-on-demand) in ELRANGE. -- Ranges in ELRANGE not tracked by an EMA are considered free and ready for new allocations. -- The EMM labels certain EMAs reserved for runtime or its internal usage and make them -not accessible from public APIs. -- A thread calling an EMM API on an EMA with an operation pending in another thread will wait -until the pending operation is finished. - -**Assumptions:** - -- When an enclave is loaded, the OS reserves the whole address range covered by ELRANGE. -It is assumed the host app will not remap any part of this reserved range. -- When an enclave is loaded with base address at zero, only a partial ELRANGE may be - reserved by the OS. In that case, the EMM will assume the partial ELRANGE as a valid reserved - range for use inside the enclave. - - The runtime can setup the partial valid range in ELRANGE by marking the unusable range up front - as SGX_EMA_RESERVE using the EMM private EMA_allocate API. -- The memory manager does not check EPC pressure, or proactively trim pages when EPC runs low. -The OS can reclaim EPC pages when EPC running low or cgroups threshold reached -- The memory manager does not maintain and recycle committed then freed pages - - Whenever a page is freed (via dealloc or uncommit API), it is trimmed from the enclave - and needs to be re-allocated and committed before re-use. - - The owner of a region can re-purpose a sub-region of it by calling sgx_mm_modify_type/permissions - to split out the sub-region to be reused. -- The memory manager does not call back into the client for #GP handling. Memory manager code will ensure that -itself would not cause #GP, and only register a #PF handler with the enclave global exception -handler registry through the runtime abstraction layer. A client wishing to handle #GP can register -its own exception handler with the global handler registry. -- The memory manager is implemented on SGX2 platforms only. - -Public APIs ------------------ - -### sgx_mm_alloc - -Allocate a new memory region inside enclave and optionally register a custom page fault handler -for the region - -``` -/** - * Page fault (#PF) info reported in the SGX SSA MISC region. - */ -typedef struct _sgx_pfinfo -{ - uint64_t maddr; // address for #PF. - union _pfec - { - uint32_t errcd; - struct - { // PFEC bits. - uint32_t p : 1; // P flag. - uint32_t rw : 1; // RW access flag, 0 for read, 1 for write. - uint32_t : 13; // U/S, I/O, PK and reserved bits not relevant for SGX PF. - uint32_t sgx : 1; // SGX bit. - uint32_t : 16; // reserved bits. - }; - } pfec; - uint32_t reserved; -} sgx_pfinfo; - -/* Return value used by the EMM #PF handler to indicate - * to the dispatcher that it should continue searching for the next handler. - */ -#define SGX_MM_EXCEPTION_CONTINUE_SEARCH 0 - -/* Return value used by the EMM #PF handler to indicate - * to the dispatcher that it should stop searching and continue execution. - */ -#define SGX_MM_EXCEPTION_CONTINUE_EXECUTION -1 - - -/* - * Custom page fault (#PF) handler, do usage specific processing upon #PF, - * e.g., loading data and verify its trustworthiness, then call sgx_mm_commit_data - * to explicitly EACCEPTCOPY data. - * This custom handler is passed into sgx_mm_alloc, and associated with the - * newly allocated region. The memory manager calls the handler when a #PF - * happens in the associated region. The handler may invoke abort() if it - * determines the exception is invalid based on certain internal states - * it maintains. - * - * @param[in] pfinfo info reported in the SSA MISC region for page fault. - * @param[in] private_data private data provided by handler in sgx_mm_alloc call. - * @retval SGX_MM_EXCEPTION_CONTINUE_EXECUTION Success on handling the exception. - * @retval SGX_MM_EXCEPTION_CONTINUE_SEARCH Exception not handled and should be passed to - * some other handler. - * - */ -typedef int (*sgx_enclave_fault_handler_t)(const sgx_pfinfo *pfinfo, void *private_data); - -/* bit 0 - 7 are allocation flags. */ -#define SGX_EMA_ALLOC_FLAGS_SHIFT 0 -#define SGX_EMA_ALLOC_FLAGS(n) (((unsigned int)(n) << SGX_EMA_ALLOC_FLAGS_SHIFT)) -#define SGX_EMA_ALLOC_FLAGS_MASK SGX_EMA_ALLOC_FLAGS(0xFF) - -/* Only reserve an address range, no physical memory committed.*/ -#define SGX_EMA_RESERVE SGX_EMA_ALLOC_FLAGS(1) - -/* Reserve an address range and commit physical memory. */ -#define SGX_EMA_COMMIT_NOW SGX_EMA_ALLOC_FLAGS(2) - -/* Reserve an address range and commit physical memory on demand.*/ -#define SGX_EMA_COMMIT_ON_DEMAND SGX_EMA_ALLOC_FLAGS(4) - -/* Always commit pages from higher to lower addresses, - * no gaps in addresses above the last committed. - */ -#define SGX_EMA_GROWSDOWN SGX_EMA_ALLOC_FLAGS(0x10) - -/* Always commit pages from lower to higher addresses, - * no gaps in addresses below the last committed. -*/ -#define SGX_EMA_GROWSUP SGX_EMA_ALLOC_FLAGS(0x20) - -/* Map addr must be exactly as requested. */ -#define SGX_EMA_FIXED SGX_EMA_ALLOC_FLAGS(0x40) - -/* bit 8 - 15 are page types. */ -#define SGX_EMA_PAGE_TYPE_SHIFT 8 -#define SGX_EMA_PAGE_TYPE(n) ((n) << SGX_EMA_PAGE_TYPE_SHIFT) -#define SGX_EMA_PAGE_TYPE_MASK SGX_EMA_PAGE_TYPE(0xFF) -#define SGX_EMA_PAGE_TYPE_TCS SGX_EMA_PAGE_TYPE(0x1) /* TCS page type. */ -#define SGX_EMA_PAGE_TYPE_REG SGX_EMA_PAGE_TYPE(0x2) /* regular page type, default if not specified. */ -#define SGX_EMA_PAGE_TYPE_TRIM SGX_EMA_PAGE_TYPE(0x4) /* TRIM page type. */ -#define SGX_EMA_PAGE_TYPE_SS_FIRST SGX_EMA_PAGE_TYPE(0x5) /* the first page in shadow stack. */ -#define SGX_EMA_PAGE_TYPE_SS_REST SGX_EMA_PAGE_TYPE(0x6) /* the rest pages in shadow stack. */ - -/* Use bit 24-31 for alignment masks. */ -#define SGX_EMA_ALIGNMENT_SHIFT 24 -/* - * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and - * < # bits in a pointer (32 or 64). - */ -#define SGX_EMA_ALIGNED(n) (((unsigned int)(n) << SGX_EMA_ALIGNMENT_SHIFT)) -#define SGX_EMA_ALIGNMENT_MASK SGX_EMA_ALIGNED(0xFFUL) -#define SGX_EMA_ALIGNMENT_64KB SGX_EMA_ALIGNED(16UL) -#define SGX_EMA_ALIGNMENT_16MB SGX_EMA_ALIGNED(24UL) -#define SGX_EMA_ALIGNMENT_4GB SGX_EMA_ALIGNED(32UL) - -/* Permissions flags */ -#define SGX_EMA_PROT_NONE 0x0 -#define SGX_EMA_PROT_READ 0x1 -#define SGX_EMA_PROT_WRITE 0x2 -#define SGX_EMA_PROT_EXEC 0x4 -#define SGX_EMA_PROT_READ_WRITE (SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE) -#define SGX_EMA_PROT_READ_EXEC (SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC) -#define SGX_EMA_PROT_READ_WRITE_EXEC (SGX_EMA_PROT_READ_WRITE|SGX_EMA_PROT_EXEC) -/* - * Allocate a new memory region in enclave address space (ELRANGE). - * @param[in] addr Starting address of the region, page aligned. If NULL is provided, - * then the function will select the starting address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing - * order, address preference, and page type. - * Flags should include exactly one of following for committing mode: - * - SGX_EMA_RESERVE: just reserve an address range, no EPC committed. - * To allocate memory on a reserved range, call this - * function again with SGX_EMA_COMMIT_ON_DEMAND or SGX_EMA_COMMIT_NOW. - * - SGX_EMA_COMMIT_NOW: reserves memory range and commit EPC pages. EAUG and - * EACCEPT are done on SGX2 platforms. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages - * are committed (EACCEPT) on demand upon #PF on SGX2 platforms. - * ORed with zero or one of the committing order flags for SGX2 platforms: - * - SGX_EMA_GROWSDOWN: always commit pages from higher to lower addresses, - * no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: always commit pages from lower to higher addresses, - * no gaps in addresses below the last committed. - * Optionally ORed with - * - SGX_EMA_FIXED: allocate at fixed address, will return error if the - * requested address is in use. - * - SGX_EMA_ALIGNED(n): Align the region on a requested boundary. - * Fail if a suitable region cannot be found, - * The argument n specifies the binary logarithm of - * the desired alignment and must be at least 12. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * - * @param[in] handler A custom handler for page faults in this region, NULL if - * no custom handling needed. - * @param[in] handler_private Private data for the @handler, which will be passed - * back when the handler is called. - * @param[out] out_addr Pointer to store the start address of allocated range. - * Set to valid address by the function on success, NULL otherwise. - * @retval 0 The operation was successful. - * @retval EACCES Region is outside enclave address space. - * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is set. - * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in SGX_EMA_ALIGNED(n). - * @retval ENOMEM Out of memory, or no free space to satisfy alignment boundary. - */ -int sgx_mm_alloc(void *addr, size_t length, int flags, - sgx_enclave_fault_handler_t handler, void *handler_private, - void **out_addr); - -``` - -**Remarks:** -- Permissions of newly allocated regions are always SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE and of page - type SGX_EMA_PAGE_TYPE_REG, except for SGX_EMA_RESERVE mode regions which will have SGX_EMA_PROT_NONE. -- Once allocated by sgx_mm_alloc, a region will stay in the allocated state and become - deallocated once sgx_mm_dealloc is called. -- If sgx_mm_dealloc on a partial range of a previously allocated region, then the - region is split, and the freed range is deallocated. The remainder of the - region stays allocated. -- If all pages in the region are freed by sgx_mm_dealloc, then the whole region - is released, and the memory manager no longer tracks the region. - - -### sgx_mm_uncommit and sgx_mm_dealloc - -``` -/* - * Uncommit (trim) physical EPC pages in a previously committed range. - * The pages in the allocation are freed, but the address range is still reserved. - * @param[in] addr Page aligned start address of the region to be trimmed. - * @param[in] length Size in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL The address range is not allocated or outside enclave. - */ -int sgx_mm_uncommit(void *addr, size_t length); - -/* - * Deallocate the address range. - * The pages in the allocation are freed and the address range is released for future allocation. - * @param[in] addr Page aligned start address of the region to be freed and released. - * @param[in] length Size in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL The address range is not allocated or outside enclave. - */ -int sgx_mm_dealloc(void *addr, size_t length); - -``` - -### sgx_mm_modify_type, sgx_mm_modify_permissions - -``` -/* - * Change permissions of an allocated region. - * @param[in] addr Start address of the region, must be page aligned. - * @param[in] length Size in bytes of multiples of page size. - * @param[in] prot permissions bitwise OR of following with: - * - SGX_EMA_PROT_READ: Pages may be read. - * - SGX_EMA_PROT_WRITE: Pages may be written. - * - SGX_EMA_PROT_EXEC: Pages may be executed. - * @retval 0 The operation was successful. - * @retval EACCES Original page type can not be changed to target type. - * @retval EINVAL The memory region was not allocated or outside enclave - * or other invalid parameters that are not supported. - * @retval EPERM The request permissions are not allowed, e.g., by target page type or - * SELinux policy. - */ -int sgx_mm_modify_permissions(void *addr, size_t length, int prot); - -/* - * Change the page type of an allocated region. - * @param[in] addr Start address of the region, must be page aligned. - * @param[in] length Size in bytes of multiples of page size. - * @param[in] type page type, only SGX_EMA_PAGE_TYPE_TCS is supported. - * - * @retval 0 The operation was successful. - * @retval EACCES Original page type can not be changed to target type. - * @retval EINVAL The memory region was not allocated or outside enclave - * or other invalid parameters that are not supported. - * @retval EPERM Target page type is not allowed by this API, e.g., PT_TRIM, - * PT_SS_FIRST, PT_SS_REST. - */ -int sgx_mm_modify_type(void *addr, size_t length, int type); - -``` -**Remarks:** -- The memory manager will track current permissions for each region, and can - determine whether new permissions require an OCall for EMODPR, e.g., RW<->RX, RW->R. -- These APIs should not be used to change EPC page type to PT_TRIM. Trimming pages - are done by sgx_mm_uncommit and sgx_mm_dealloc only. - - -### sgx_mm_commit - -``` - -/* - * Commit a partial or full range of memory allocated previously with SGX_EMA_COMMIT_ON_DEMAND. - * The API will return 0 if all pages in the requested range are successfully committed. - * Calling this API on pages already committed has no effect. - * @param[in] addr Page aligned starting address. - * @param[in] length Length of the region in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL Any requested page is not in any previously allocated regions, or - * outside the enclave address range. - * @retval EFAULT All other errors. - */ -int sgx_mm_commit(void *addr, size_t length); - -``` - -### sgx_mm_commit_data - -``` - -/* - * Load data into target pages within a region previously allocated by sgx_mm_alloc. - * This can be called to load data and set target permissions at the same time, - * e.g., dynamic code loading. The caller has verified data to be trusted and expected - * to be loaded to the target address range. Calling this API on pages already committed - * will fail. - * - * @param[in] addr Page aligned target starting addr. - * @param[in] length Length of data, in bytes of multiples of page size. - * @param[in] data Data of @length. - * @param[in] prot Target permissions. - * @retval 0 The operation was successful. - * @retval EINVAL Any page in requested address range is not previously allocated, or - * outside the enclave address range. - * @retval EPERM Any page in requested range is previously committed. - * @retval EPERM The target permissions are not allowed by OS security policy, - * e.g., SELinux rules. - */ -int sgx_mm_commit_data(void *addr, size_t length, uint8_t *data, int prot); - -``` -**Remarks:** -- The memory manager decides whether OCalls are needed to ask the OS to make Page Table Entry (PTE) -permissions changes. No separate sgx_mm_modify_permissions call is needed. - -Runtime Abstraction Layer ----------------------------------- - -To support and use the EMM, an SGX trusted runtime shall implement following -abstraction layer APIs. - -### Exception Handler Registration - -``` -/* - * The EMM page fault (#PF) handler. - * - * @param[in] pfinfo Info reported in the SSA MISC region for page fault. - * @retval SGX_EXCEPTION_CONTINUE_EXECUTION Success handling the exception. - * @retval SGX_EXCEPTION_CONTINUE_SEARCH The EMM does not handle the exception. - */ -typedef int (*sgx_mm_pfhandler_t)(const sgx_pfinfo *pfinfo); - -/* - * Register the EMM handler with the global exception handler registry - * The Runtime should ensure this handler is called first in case of - * a #PF before all other handlers. - * - * @param[in] pfhandler The EMM page fault handler. - * @retval true Success. - * @retval false Failure. - */ -bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler); - -/* - * Unregister the EMM handler with the global exception handler registry. - * @param[in] pfhandler The EMM page fault handler. - * @retval true Success. - * @retval false Failure. - */ -bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); - -``` - -### OCalls - -``` - -/* - * Call OS to reserve region for EAUG, immediately or on-demand. - * - * @param[in] addr Desired page aligned start address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] page_type One of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing - * order, address preference, page type. The untrusted side. - * implementation should translate following additional bits to proper - * parameters invoking syscall(mmap on Linux) provided by the kernel. - * The flags param of this interface should include exactly one of following for committing mode: - * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, - * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. - * ORed with zero or one of the committing order flags: - * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher - * to lower addresses, no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower - * to higher addresses, no gaps in addresses below the last committed. - * @retval 0 The operation was successful. - * @retval EFAULT for all failures. - */ - -int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int alloc_flags); - -/* - * Call OS to change permissions, type, or notify EACCEPT done after TRIM. - * - * @param[in] addr Start address of the memory to change protections. - * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] flags_from The original EPCM flags of the EPC pages to be modified. - * Must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and TCS - * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] flags_to The target EPCM flags. This must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address - * range for trimmed pages may still be reserved by enclave with - * proper permissions. - * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS - * @retval 0 The operation was successful. - * @retval EFAULT for all failures. - */ - -int sgx_mm_modify_ocall(uint64_t addr, size_t length, int flags_from, int flags_to); - -``` - -### Other Utilities - -``` -/* - * Define a recursive mutex and create/lock/unlock/destroy functions. - */ -typedef struct _sgx_mm_mutex sgx_mm_mutex; -sgx_mm_mutex *sgx_mm_mutex_create(void); -int sgx_mm_mutex_lock(sgx_mm_mutex *mutex); -int sgx_mm_mutex_unlock(sgx_mm_mutex *mutex); -int sgx_mm_mutex_destroy(sgx_mm_mutex *mutex); - -/* - * Check whether the given buffer is strictly within the enclave. - * - * Check whether the buffer given by the **ptr** and **size** parameters is - * strictly within the enclave's memory. If so, return true. If any - * portion of the buffer lies outside the enclave's memory, return false. - * - * @param[in] ptr The pointer to the buffer. - * @param[in] size The size of the buffer. - * - * @retval true The buffer is strictly within the enclave. - * @retval false At least some part of the buffer is outside the enclave, or - * the arguments are invalid. For example, if **ptr** is null or **size** - * causes arithmetic operations to wrap. - * - */ -bool sgx_mm_is_within_enclave(const void *ptr, size_t size); - -``` - -### Support for EMM Initialization - -In addition to implement the abstraction layer APIs, a runtime shall provide -iniitial enclave memory layout information to the EMM during early -initialization phase of the enclave. -The memory manager must be initialized in the first ECALL (ECMD_INIT_ENCLAVE in -Intel SGX SDK) before any other clients can use it. Therefore, code and data -of the memory manager will be part of initial enclave image that are loaded -with EADD before EINIT, and as a part of the trusted runtime. - -To initialize EMM internals, the trusted runtime should first invoke sgx_mm_init, -passing in an address range available for non-system or so-called user allocations. - -``` -/* - * Initialize the EMM internals and reserve the whole range available for user - * allocations via the public sgx_mm_alloc API. This should be called before - * any other APIs invoked. The runtime should not intend to allocate any subregion - * in [user_start, user_end) for system usage, i.e., the EMM will fail any allocation - * request with SGX_EMA_SYSTEM flag in this range and return an EINVAL error. - * @param[in] user_start The start of the user address range, page aligned. - * @param[in] user_end The end (exclusive) of the user address range, page aligned. - */ -void sgx_mm_init(size_t user_start, size_t user_end); -``` - -The EMM consumes some minimal amount of memory to store the EMA objects for -book keeping of all allocations. During initialization, the EMM reserves an initial area -in the user range for those internal use. And it would allocate more of such reserves on -demand as EMAs created for allocation requests and the active reserves run out. The size -of the user range accomodate this internal consumption overhead, which can be estimated as -the total size of all regions to be tracked (both system and expected user allocations) -divided by 2^14. At runtime, in case the EMM could not find space to allocate EMA objects -then its API would return ENOMEM. - -After initialization, the trusted runtime should enumerate all initial committed regions (code, -data, heap, stack, TCS, and SSA), and call the EMM private APIs to set up -initial entries in the EMA list to track existing regions. These regions -are typically created by the enclave loader at predetermined locations and -some are loaded with content from the enclave image. Thus it's necessary to -reserve their ranges this way so that they won't be modifiable by EMM public APIs. - -### EMM Private APIs for Trusted Runtimes -These private APIs can be used by the trusted runtime to reserve and allocate -regions not accessible from public APIs. They have the identical signature -as the public API counterparts and replace "sgx_mm_" prefix with "mm_" prefix. -The main difference is that the private mm_alloc allows an extra flag -SGX_EMA_SYSTEM passed in. - -``` - -#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80) /* EMA reserved by system */ - -/* - * Initialize an EMA. This can be used to setup EMAs to account regions that - * are loaded and initialized with EADD before EINIT. - * @param[in] addr Starting address of the region, page aligned. If NULL is provided, - * then the function will select the starting address. - * @param[in] size Size of the region in multiples of page size in bytes. - * @param[in] flags SGX_EMA_SYSTEM, or SGX_EMA_SYSTEM | SGX_EMA_RESERVE - * bitwise ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_TCS: TCS page. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @param[in] prot permissions, either SGX_EMA_PROT_NONE or a bitwise OR of following with: - * - SGX_EMA_PROT_READ: Pages may be read. - * - SGX_EMA_PROT_WRITE: Pages may be written. - * - SGX_EMA_PROT_EXEC: Pages may be executed. - * @param[in] handler A custom handler for page faults in this region, NULL if - * no custom handling needed. - * @param[in] handler_private Private data for the @handler, which will be passed - * back when the handler is called. - * @retval 0 The operation was successful. - * @retval EACCES Region is outside enclave address space. - * @retval EEXIST Any page in range requested is in use. - * @retval EINVAL Invalid page type, flags, or addr and length are not page aligned. - */ -int mm_init_ema(void *addr, size_t size, int flags, int prot, - sgx_enclave_fault_handler_t handler, - void *handler_private); -/** - * Same as sgx_mm_alloc, SGX_EMA_SYSTEM can be OR'ed with flags to indicate - * that the EMA can not be modified thru public APIs. - */ -int mm_alloc(void *addr, size_t size, uint32_t flags, - sgx_enclave_fault_handler_t handler, void *private_data, void** out_addr); -int mm_dealloc(void *addr, size_t size); -int mm_uncommit(void *addr, size_t size); -int mm_commit(void *addr, size_t size); -int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot); -int mm_modify_type(void *addr, size_t size, int type); -int mm_modify_permissions(void *addr, size_t size, int prot); - -``` - -Internal APIs and Structures -------------------------------------- - -The following are internal functions and structures to be used by the EMM implementation. -They can evolve over time, and are shown here for reference only. - -### Enclave Memory Area (EMA) struct - -Each enclave has a global doubly linked EMA list to keep track of all dynamically -allocated regions in enclave address space (ELRANGE). - -``` -typedef struct _ema_t { - size_t start_addr; // starting address, should be on a page boundary. - size_t size; // in bytes of multiples of page size. - uint32_t alloc_flags; // SGX_EMA_RESERVE, SGX_EMA_COMMIT_NOW, SGX_EMA_COMMIT_ON_DEMAND, - // OR'ed with SGX_EMA_SYSTEM, SGX_EMA_GROWSDOWN, ENA_GROWSUP. - uint64_t si_flags; // SGX_EMA_PROT_NONE, SGX_EMA_PROT_READ |{SGX_EMA_PROT_WRITE, SGX_EMA_PROT_EXEC}. - // Or'd with one of SGX_EMA_PAGE_TYPE_REG, SGX_EMA_PAGE_TYPE_TCS, SGX_EMA_PAGE_TYPE_TRIM. - ema_bit_array* eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for the page at start address. - // bit i in eaccept_map[j] for page at start_address+(i+j<<3)<<12. - sgx_mutex_t* lock; // lock to prevent concurrent modification. - int transition; // state to indicate whether a transition in progress, e.g page type/permission changes. - sgx_enclave_fault_handler_t - h; // custom PF handler (for EACCEPTCOPY use). - void* hprivate; // private data for handler. - _ema_t* next; // next in doubly linked list. - _ema_t* prev; // prev in doubly linked list. -} ema_t; - -``` - **Remarks:** - - Accesses to the list (find, insert, remove EMAs) are synchronized for thread-safety. - - Initial implementation will also have one lock per EMA to synchronize access and - modifications to the same EMA. We may optimize this as needed. - -### SGX primitives - -``` -typedef struct _sec_info_t -{ - uint64_t flags; - uint64_t reserved[7]; -} sec_info_t; - -// EACCEPT -int do_eaccept(const sec_info_t* si, size_t addr); -// EMODPE -int do_emodpe(const sec_info_t* si, size_t addr); -// EACCEPTCOPY -int do_eacceptcopy(const sec_info_t* si, size_t dest, size_t src); - -``` - - -Metadata, File format ---------------------------------------- - -The enclave metadata and file format are runtime specific. A detailed design is -out of scope of this document. - -It is required that the enclave file should include metadata of memory layout -of initial code and data (e.g., program headers and PT_LOAD segments in ELF -file), any reserved region for special purposes, e.g., minimal heap, stack, -TCS areas, SSAs for expected minimal number of threads, etc. The runtime -would read those info to populate the initial EMAs described in the section -above on [Support for EMM Initialization](#support-for-emm-initialization) -The memory layout can also contain an entry for the user range mentioned -above if the enclave intends to dynamically allocate and manage some regions -using the EMM public APIs. diff --git a/sdk/emm/design_docs/images/SGX2_alloc_direct.svg b/sdk/emm/design_docs/images/SGX2_alloc_direct.svg deleted file mode 100644 index 2c5849022..000000000 --- a/sdk/emm/design_docs/images/SGX2_alloc_direct.svg +++ /dev/null @@ -1,213 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - Page-1 - - Sheet.77 - - Object lifeline.11 - CPU-sgx - - Sheet.12 - - - - Sheet.13 - - - - Sheet.14 - - - Sheet.15 - - - - - CPU-sgx - - - Message.50 - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_POPULA... - - - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_POPULATE |MAP_FIXED, fd) - - Object lifeline.24 - Enclave - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - Sheet.28 - - - - - Enclave - - - Message.29 - OCALL_mmap - - - OCALL_mmap - - Message.30 - EAUG - - - EAUG - - Return Message.53 - address - - - address - - Message.33 - EACCEPT - - - EACCEPT - - Loop fragment - - - - - Sheet.59 - loop - - loop - - Sheet.60 - [for each page] - [for each page] - - - Loop fragment.61 - - - - - Sheet.62 - loop - - loop - - Sheet.63 - [for each page] - [for each page] - - - Self Message.52 - use the new pages - - - use the new pages - - Return Message.68 - - - - Interaction operand - [Kernel decides to EAUG pages now Otherwise this reduces to #... - - - [Kernel decides to EAUG pages now Otherwise this reduces to #PF based allocation flow] - - Object lifeline - Untrusted runtime - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Untrusted runtime - - - Object lifeline.6 - kernel - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - kernel - - - - diff --git a/sdk/emm/design_docs/images/SGX2_alloc_pf.svg b/sdk/emm/design_docs/images/SGX2_alloc_pf.svg deleted file mode 100644 index a753a970f..000000000 --- a/sdk/emm/design_docs/images/SGX2_alloc_pf.svg +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - Page-1 - - Sheet.94 - - Loop fragment.61 - - - - - Sheet.62 - loop - - loop - - Sheet.63 - [for each page] - [for each page] - - - Object lifeline - Untrusted runtime - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Untrusted runtime - - - Object lifeline.6 - kernel - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - kernel - - - Object lifeline.11 - CPU-sgx - - Sheet.12 - - - - Sheet.13 - - - - Sheet.14 - - - Sheet.15 - - - - - CPU-sgx - - - Message.50 - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... - - - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) - - Object lifeline.24 - Enclave - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - Sheet.28 - - - - - Enclave - - - Message.29 - OCALL_mmap - - - OCALL_mmap - - Message.30 - EAUG - - - EAUG - - Return Message.32 - address - - - address - - Message.33 - EACCEPT - - - EACCEPT - - Self Message.52 - use the new pages - - - use the new pages - - Asynchronous Message.55 - #PF - - - #PF - - Return Message.53 - - - - Message.79 - ERESUME - - - ERESUME - - Return Message.80 - return to AEP trampoline - - - return to AEP trampoline - - Return Message.83 - - - - Interaction operand - [Kernel decides to EAUG on #PF, otherwise this reduces to dir... - - - [Kernel decides to EAUG on #PF, otherwise this reduces to direct allocation flow with MAP_POPULATE] - - - diff --git a/sdk/emm/design_docs/images/SGX2_eaccept.svg b/sdk/emm/design_docs/images/SGX2_eaccept.svg deleted file mode 100644 index 70692d996..000000000 --- a/sdk/emm/design_docs/images/SGX2_eaccept.svg +++ /dev/null @@ -1,301 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - eacceptcopy - - Object lifeline - Untrusted runtime - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Untrusted runtime - - - Object lifeline.6 - kernel - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - kernel - - - Object lifeline.11 - CPU-sgx - - Sheet.12 - - - - Sheet.13 - - - - Sheet.14 - - - Sheet.15 - - - - - CPU-sgx - - - Message.50 - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... - - - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) - - Object lifeline.24 - Enclave - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - Sheet.28 - - - - - Enclave - - - Message.50.29 - OCALL_mmap - - - OCALL_mmap - - Message.50.30 - EAUG - - - EAUG - - Return Message.32 - address - - - address - - Asynchronous Message.55 - #PF (EPCM.pending) - - - #PF (EPCM.pending) - - Return Message.53 - - - - Message.50.79 - ERESUME - - - ERESUME - - Return Message.53.80 - return to AEP trampoline - - - return to AEP trampoline - - Return Message.83 - - - - Interaction operand - [Kernel to EAUG on #PF] - - - [Kernel to EAUG on #PF] - - Dynamic connector - Execute code not loaded - - - Execute code not loaded - - Asynchronous Message.55.99 - #PF on read - - - #PF on read - - Dynamic connector.103 - Retry: execute code not loaded - - - Retry: execute code not loaded - - Message.50.105 - vDSO callback - - - vDSO callback - - Message.50.106 - ExceptionHandler - - - ExceptionHandler - - Return Message.53.107 - return - - - return - - Message.108 - ERESUME - - - ERESUME - - Dynamic connector.109 - Retry: execute code - - - Retry: execute code - - Message.50.111 - OCall_mprotect(RX) - - - OCall_mprotect(RX) - - Message.112 - mprotect(RX) - - - mprotect(RX) - - Message.50.114 - update PTE.RW->RX - - - update PTE.RW->RX - - Return Message.53.115 - success - - - success - - Return Message.53.116 - success - - - success - - Return Message.53.119 - - - - Message.50.120 - load code page - - - load code page - - Self Message.52 - Set PTE.RW - - - Set PTE.RW - - Message.50.123 - EACCEPTCOPY(code, RX) load code and set EPCM.RX - - - EACCEPTCOPY(code, RX) load code and set EPCM.RX - - Return Message.53.125 - success - - - success - - diff --git a/sdk/emm/design_docs/images/SGX2_eaccept2.svg b/sdk/emm/design_docs/images/SGX2_eaccept2.svg deleted file mode 100644 index 03cdaed3a..000000000 --- a/sdk/emm/design_docs/images/SGX2_eaccept2.svg +++ /dev/null @@ -1,278 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - eacceptcopy2 - - Interaction operand - [Kernel directly EAUG ] - - - [Kernel directly EAUG ] - - Object lifeline - Untrusted runtime - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Untrusted runtime - - - Object lifeline.6 - kernel - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - kernel - - - Object lifeline.11 - CPU-sgx - - Sheet.12 - - - - Sheet.13 - - - - Sheet.14 - - - Sheet.15 - - - - - CPU-sgx - - - Message.50 - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED ... - - - mmap(addr, len, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_FIXED , fd) - - Object lifeline.24 - Enclave - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - Sheet.28 - - - - - Enclave - - - Message.50.29 - OCALL_mmap - - - OCALL_mmap - - Message.50.30 - EAUG for each page EPCM.pending=1 - - - EAUG for each page EPCM.pending=1 - - Return Message.32 - address - - - address - - Asynchronous Message.55 - #PF (EPCM.pending) - - - #PF (EPCM.pending) - - Return Message.83 - - - - Sheet.95 - - Message.50.33 - EACCEPTCOPY(code, RX) load code and set EPCM.RX - - - EACCEPTCOPY(code, RX) load code and set EPCM.RX - - - Dynamic connector - Execute code not loaded - - - Execute code not loaded - - Message.50.105 - vDSO callback - - - vDSO callback - - Message.50.106 - ExceptionHandler - - - ExceptionHandler - - Return Message.53.107 - return - - - return - - Message.108 - ERESUME - - - ERESUME - - Dynamic connector.109 - Retry: execute code - - - Retry: execute code - - Message.50.111 - OCall_mprotect(RX) - - - OCall_mprotect(RX) - - Message.112 - mprotect(RX) - - - mprotect(RX) - - Message.50.114 - update PTE.RW->RX - - - update PTE.RW->RX - - Return Message.53.115 - success - - - success - - Return Message.53.116 - success - - - success - - Return Message.53.119 - - - - Message.50.120 - load code page - - - load code page - - Message.50.122 - Set PTE.RW - - - Set PTE.RW - - Return Message.53 - success - - - success - - diff --git a/sdk/emm/design_docs/images/SGX2_emm_arch.svg b/sdk/emm/design_docs/images/SGX2_emm_arch.svg deleted file mode 100644 index 05f6904cf..000000000 --- a/sdk/emm/design_docs/images/SGX2_emm_arch.svg +++ /dev/null @@ -1,286 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Page-2 - - Rectangle.30 - Dynamic Code Loader / Protected Code Loader / SGX-LKL / other... - - - - - Dynamic Code Loader / Protected Code Loader / SGX-LKL / other high level user - - Rectangle.1 - SGX MM public APIs sgx_mm_{alloc | dealloc | commit | uncommi... - - SGX MM public APIs sgx_mm_{alloc | dealloc | commit | uncommit | commit_data| modify_type |modify_permissions} - - Rectangle.2 - mmap - - - - - mmap - - Rectangle.3 - Malloc (heap manager) - - - - - Malloc (heap manager) - - Dynamic connector - - - - Dynamic connector.5 - - - - Dynamic connector.6 - - - - Rectangle.7 - Reserved_memory (Intel) - - - - - Reserved_memory (Intel) - - Dynamic connector.8 - - - - Rectangle.9 - SGX MM implementation - - - - - SGX MM implementation - - Rectangle.10 - SGX runtime abstraction layer APIs sgx_mm_{un|}register_pfhan... - - SGX runtime abstraction layer APIs sgx_mm_{un|}register_pfhandler, sgx_mm_is_within_enclave, sgx_mm_{alloc|modify}_ocall sgx_mutex-* - - Rectangle.11 - mprotect - - - - - mprotect - - Dynamic connector.12 - - - - Rectangle.22 - SGX MM Private APIs mm_{init | alloc | dealloc | commit | unc... - - SGX MM Private APIs mm_{init | alloc | dealloc | commit | uncommit | commit_data| modify_type |modify_permissions} - - Rectangle.24 - pthread - - - - - pthread - - Dynamic connector.29 - - - - Rectangle.16 - Intel/OE/other runtime Abstraction Impl - - - - - Intel/OE/other runtime Abstraction Impl - - Dynamic connector.22 - - - - Sheet.25 - - - - Sheet.26 - Inside Enclave - - Inside Enclave - - Sheet.27 - Outside Enclave - - Outside Enclave - - Rectangle.28 - SGX Enclave Common Loader - - - - - SGX Enclave Common Loader - - Rectangle.45 - OCall Impl - - - - - OCall Impl - - Dynamic connector.49 - - - - Rectangle.52 - Kernel - - Kernel - - Rectangle.36 - SGX driver - - - - - SGX driver - - Rectangle.37 - vDSO SGX interface wrapper - - - - - vDSO SGX interface wrapper - - Dynamic connector.35 - - - - Rectangle.36 - Intel/OE/other runtime Initialization routines - - - - - Intel/OE/other runtime Initialization routines - - Dynamic connector.37 - - - - Rectangle.53 - common - - - - - common - - Rectangle - Runtime specific - - - - - Runtime specific - - Rectangle.40 - Maybe common - - - - - Maybe common - - diff --git a/sdk/emm/design_docs/images/SGX2_perms.svg b/sdk/emm/design_docs/images/SGX2_perms.svg deleted file mode 100644 index 842b07140..000000000 --- a/sdk/emm/design_docs/images/SGX2_perms.svg +++ /dev/null @@ -1,227 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - Page-1 - - Sheet.104 - - Sheet.102 - - Object lifeline - Kernel - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Kernel - - - Object lifeline.6 - Untrusted Runtime - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - Untrusted Runtime - - - Object lifeline.12 - Enclave - - Sheet.13 - - - - Sheet.14 - - - - Sheet.15 - - - Sheet.16 - - - - - Enclave - - - Object lifeline.22 - SGX - - Sheet.23 - - - - Sheet.24 - - - - Sheet.25 - - - Sheet.26 - - - - - SGX - - - Message.30 - ocall_mprotect (addr,len, perms) - - - ocall_mprotect (addr,len, perms) - - Message.34 - mprotect (addr, len, perms) - - - mprotect (addr, len, perms) - - Loop fragment - - - - - Sheet.36 - for each page if perms<prev perms - - for each page if perms<prev perms - - - Message.38 - EMODPR(perms) - - - EMODPR(perms) - - Message.39 - update PTE - - - update PTE - - Message.41 - ETRACK - - - ETRACK - - Message.42 - send IPI - - - send IPI - - Message.43 - - - - Message.44 - - - - Loop fragment.47 - - - - - Sheet.48 - for each page - - for each page - - - Message.50 - EMODPE(perms) - - - EMODPE(perms) - - Return Message.53 - - - - Message.100 - EACCEPT(perms) - - - EACCEPT(perms) - - Return Message.101 - - - - - Interaction operand - [if EMODPR done] - - - [if EMODPR done] - - - diff --git a/sdk/emm/design_docs/images/SGX2_tcs.svg b/sdk/emm/design_docs/images/SGX2_tcs.svg deleted file mode 100644 index f07613d25..000000000 --- a/sdk/emm/design_docs/images/SGX2_tcs.svg +++ /dev/null @@ -1,216 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - TCS seq - - Sheet.98 - - Object lifeline - Kernel - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Kernel - - - Object lifeline.12 - Enclave - - Sheet.13 - - - - Sheet.14 - - - - Sheet.15 - - - Sheet.16 - - - - - Enclave - - - Object lifeline.22 - CPU-sgx - - Sheet.23 - - - - Sheet.24 - - - - Sheet.25 - - - Sheet.26 - - - - - CPU-sgx - - - Message.30 - mprotect (addr, len, PT_TCS) - - - mprotect (addr, len, PT_TCS) - - Message.34 - mprotect (addr, len, PROT_TCS) - - - mprotect (addr, len, PROT_TCS) - - Message.38 - EMODT(PT_TCS) - - - EMODT(PT_TCS) - - Message.41 - ETRACK - - - ETRACK - - Message.42 - send IPI - - - send IPI - - Message.43 - - - - Message.44 - - - - Message.50 - EACCEPT(PT_TCS) - - - EACCEPT(PT_TCS) - - Return Message.53 - - - - Self Message - start_thread (addr, ecall#, ...) - - - start_thread (addr, ecall#, ...) - - Object lifeline.6 - Untrusted Runtime - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - Untrusted Runtime - - - Interaction operand - After allocating a regular page - - - After allocating a regular page - - Self Message.52 - init TCS content - - - init TCS content - - Self Message.52.97 - Associate an OS thread with the new TCS, ecall from OS thread... - - - Associate an OS thread with the new TCS, ecall from OS thread with the new TCS - - - Other fragment - - - - - Sheet.103 - Runtime Specific - - Runtime Specific - - - diff --git a/sdk/emm/design_docs/images/SGX2_trim.svg b/sdk/emm/design_docs/images/SGX2_trim.svg deleted file mode 100644 index 6f047ae00..000000000 --- a/sdk/emm/design_docs/images/SGX2_trim.svg +++ /dev/null @@ -1,250 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - Page-1 - - Sheet.69 - - Object lifeline - Kernel - - Sheet.2 - - - - Sheet.3 - - - - Sheet.4 - - - Sheet.5 - - - - - Kernel - - - Object lifeline.12 - Enclave - - Sheet.13 - - - - Sheet.14 - - - - Sheet.15 - - - Sheet.16 - - - - - Enclave - - - Object lifeline.22 - CPU-sgx - - Sheet.23 - - - - Sheet.24 - - - - Sheet.25 - - - Sheet.26 - - - - - CPU-sgx - - - Message.30 - ocall_mprotect (addr, len, TRIM) - - - ocall_mprotect (addr, len, TRIM) - - Message.34 - mprotect (addr, len, PROT_TRIM) - - - mprotect (addr, len, PROT_TRIM) - - Loop fragment - - - - - Sheet.36 - for each page - - for each page - - - Message.38 - EMODT(PT_TRIM) - - - EMODT(PT_TRIM) - - Message.41 - ETRACK - - - ETRACK - - Message.42 - send IPI - - - send IPI - - Message.43 - - - - Message.44 - - - - Loop fragment.47 - - - - - Sheet.48 - for each page - - for each page - - - Message.50 - EACCEPT(PT_TRIM) - - - EACCEPT(PT_TRIM) - - Return Message.53 - - - - Message.58 - ocall_mprotect (addr, len) - - - ocall_mprotect (addr, len) - - Message.59 - mprotect (addr, len, PROT_NONE) - - - mprotect (addr, len, PROT_NONE) - - Message.60 - EREMOVE - - - EREMOVE - - Message.64 - - - - Return Message - - - - Message.67 - unmap PTE - - - unmap PTE - - Self Message - record range deallocated - - - record range deallocated - - Loop fragment.61 - - - - - Sheet.62 - for each page of PT_TRIM - - for each page of PT_TRIM - - - Object lifeline.6 - Untrusted Runtime - - Sheet.7 - - - - Sheet.8 - - - - Sheet.9 - - - Sheet.10 - - - - - Untrusted Runtime - - - - diff --git a/sdk/emm/ema.c b/sdk/emm/ema.c deleted file mode 100644 index 454095a3c..000000000 --- a/sdk/emm/ema.c +++ /dev/null @@ -1,1184 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include -#include "ema.h" -#include "emalloc.h" -#include "bit_array.h" -#include "sgx_mm.h" -#include "sgx_mm_primitives.h" -#include "sgx_mm_rt_abstraction.h" -/* State flags */ -#define SGX_EMA_STATE_PENDING 0x8UL -#define SGX_EMA_STATE_MODIFIED 0x10UL -#define SGX_EMA_STATE_PR 0x20UL -#define UNUSED(x) ((void)(x)) -struct ema_t_ { - size_t start_addr; // starting address, should be on a page boundary - size_t size; // bytes - uint32_t alloc_flags; // EMA_RESERVED, EMA_COMMIT_NOW, EMA_COMMIT_ON_DEMAND, - // OR'ed with EMA_SYSTEM, EMA_GROWSDOWN, ENA_GROWSUP - uint64_t si_flags; // one of EMA_PROT_NONE, READ, READ_WRITE, READ_EXEC, READ_WRITE_EXEC - // Or'd with one of EMA_PAGE_TYPE_REG, EMA_PAGE_TYPE_TCS, EMA_PAGE_TYPE_TRIM - bit_array * eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for the page at start address - // bit i in eaccept_map[j] for page at start_address+(i+j<<3)<<12 - int transition; // state to indicate whether a transition in progress, e.g page type/permission changes. - sgx_enclave_fault_handler_t - handler; // custom PF handler (for EACCEPTCOPY use) - void* private; // private data for handler - ema_t* next; // next in doubly linked list - ema_t* prev; // prev in doubly linked list -}; - -struct ema_root_ { - ema_t *guard; -}; -extern size_t mm_user_base; -extern size_t mm_user_end; -ema_t dummy_user_ema = {.next = &dummy_user_ema, - .prev = &dummy_user_ema}; -ema_root_t g_user_ema_root = {.guard = &dummy_user_ema}; - -ema_t dummy_rts_ema = {.next = &dummy_rts_ema, - .prev = &dummy_rts_ema}; -ema_root_t g_rts_ema_root = {.guard = &dummy_rts_ema}; - -#ifdef TEST -static void dump_ema_node(ema_t *node, size_t index) -{ - printf("------ node #%lu ------\n", index); - printf("start:\t0x%lX\n", node->start_addr); - printf("size:\t0x%lX\n", node->size); -} - -void dump_ema_root(ema_root_t *root) -{ - ema_t *node = root->guard->next; - size_t index = 0; - - while (node != root->guard) { - dump_ema_node(node, index++); - node = node->next; - } -} - -#endif -void destroy_ema_root(ema_root_t *root) -{ - ema_t *node = root->guard->next; - size_t index = 0; - - while (node != root->guard) { - index++; - ema_t* next = node->next; - ema_destroy(node); - node = next; - } -#if 0 - printf("Destroy %lu nodes on the root\n", index); -#endif -} -bool ema_root_empty(ema_root_t* r) -{ - return r->guard == r->guard->next; -} - -size_t ema_root_end(ema_root_t* r) -{ - return r->guard->prev->start_addr + r->guard->prev->size; -} - -#ifdef TEST -size_t ema_base(ema_t *node) -{ - return node->start_addr; -} - -size_t ema_size(ema_t *node) -{ - return node->size; -} -#endif -#ifndef NDEBUG -ema_t *ema_next(ema_t *node) -{ - return node->next; -} -#endif - -uint32_t get_ema_alloc_flags(ema_t *node) -{ - return node->alloc_flags; -} - -uint64_t get_ema_si_flags(ema_t *node) -{ - return node->si_flags; -} - -sgx_enclave_fault_handler_t ema_fault_handler(ema_t* node, void** private_data) -{ - if(private_data) - *private_data = node->private; - return node->handler; -} - - -bool is_ema_transition(ema_t *node) -{ - return node->transition; -} - -static void ema_clone(ema_t *dst, ema_t *src) -{ - memcpy((void *)dst, (void *)src, sizeof(ema_t)); -} - -static bool ema_lower_than_addr(ema_t *ema, size_t addr) -{ - return ((ema->start_addr + ema->size) <= addr); -} - -static bool ema_higher_than_addr(ema_t *ema, size_t addr) -{ - return (ema->start_addr >= addr); -} - -static bool ema_overlap_addr(const ema_t *ema, size_t addr) -{ - if ((addr >= ema->start_addr) && (addr < ema->start_addr + ema->size)) - return true; - return false; -} - -static bool ema_overlap_range(const ema_t *ema, size_t start, size_t end) -{ - if ((end <= ema->start_addr) || (start >= ema->start_addr + ema->size)) - return false; - return true; -} - -int ema_set_eaccept_full(ema_t *node) -{ - if (!node->eaccept_map) { - node->eaccept_map = bit_array_new_set((node->size) >> SGX_PAGE_SHIFT); - if(!node->eaccept_map) - return ENOMEM; - else - return 0; - }else - bit_array_set_all(node->eaccept_map); - return 0; -} - -int ema_clear_eaccept_full(ema_t *node) -{ - if (!node->eaccept_map) { - node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); - if(!node->eaccept_map) - return ENOMEM; - else - return 0; - }else - bit_array_reset_all(node->eaccept_map); - return 0; -} - - -int ema_set_eaccept(ema_t *node, size_t start, size_t end) -{ - if (!node) { - return EINVAL; - } - - assert (start >= node->start_addr); - assert (end <= node->start_addr + node->size); - size_t pos_begin = (start - node->start_addr) >> SGX_PAGE_SHIFT; - size_t pos_end = (end - node->start_addr) >> SGX_PAGE_SHIFT; - - // update eaccept bit map - if (!node->eaccept_map) { - node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); - if(!node->eaccept_map) - return ENOMEM; - } - bit_array_set_range (node->eaccept_map, - pos_begin, - pos_end - pos_begin); - return 0; -} - -bool ema_page_committed(ema_t *ema, size_t addr) -{ - assert(!(addr%SGX_PAGE_SIZE)); - if (!ema->eaccept_map) { - return false; - } - - return bit_array_test(ema->eaccept_map, - (addr - ema->start_addr) >> SGX_PAGE_SHIFT); -} - -bool ema_exist_in(ema_root_t* root, size_t addr, size_t size) -{ - size_t end = addr + size; - for (ema_t *node = root->guard->next; node != root->guard; node = node->next) { - if (ema_overlap_range(node, addr, end)) { - return true; - } - } - return false; -} - -bool ema_exist(size_t addr, size_t size) -{ - return ema_exist_in(&g_rts_ema_root, addr, size) || - ema_exist_in(&g_user_ema_root, addr, size); -} - -// search for a node whose address range contains 'addr' -ema_t *search_ema(ema_root_t *root, size_t addr) -{ - for (ema_t *node = root->guard->next; node != root->guard; node = node->next) { - if (ema_overlap_addr(node, addr)) { - return node; - } - } - return NULL; -} - -// insert 'new_node' before 'node' -ema_t *insert_ema(ema_t *new_node, ema_t *node) -{ - new_node->prev = node->prev; - new_node->next = node; - node->prev->next = new_node; - node->prev = new_node; - return new_node; -} - -// Remove the 'node' from the list -static ema_t *remove_ema(ema_t *node) -{ - if (!node) - return node; - - // Sanity check pointers for corruption - if ((node->prev->next != node) || - (node->next->prev != node)) { - abort(); - } - - node->prev->next = node->next; - node->next->prev = node->prev; - return node; -} - -void push_back_ema(ema_root_t *root, ema_t *node) -{ - insert_ema(node, root->guard); -} - -// search for a range of nodes containing addresses within [start, end) -// 'ema_begin' will hold the fist ema that has address higher than /euqal to 'start' -// 'ema_end' will hold the node immediately follow the last ema that has address lower than / equal to 'end' -int search_ema_range(ema_root_t *root, size_t start, size_t end, - ema_t **ema_begin, ema_t **ema_end) -{ - ema_t *node = root->guard->next; - - // find the first node that has addr >= 'start' - while ((node != root->guard) && ema_lower_than_addr(node, start)) { - node = node->next; - } - - // empty list or all nodes are beyond [start, end) - if ((node == root->guard) || ema_higher_than_addr(node, end)) { - *ema_begin = NULL; - *ema_end = NULL; - return -1; - } - - *ema_begin = node; - - // find the last node that has addr <= 'end' - while ((node != root->guard) && - (!ema_higher_than_addr(node, end))) { - node = node->next; - } - *ema_end = node; - - return 0; -} -//TODO?do not split bit_arrays, reuse it by keeping ref-count -//and start and end offsets for multiple EMAs -int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** ret_node) -{ - //!FIXME: this is only needed for UT - // in real usage in the file, addr always overlap - if (!ema_overlap_addr(ema, addr) || !ret_node) { - return EINVAL; - } - - ema_t *new_node = (ema_t *)emalloc(sizeof(ema_t)); - if (!new_node) { - return ENOMEM; - } - - bit_array *low = NULL, *high = NULL; - if (ema->eaccept_map) { - size_t pos = (addr - ema->start_addr) >> SGX_PAGE_SHIFT; - int ret = bit_array_split(ema->eaccept_map, pos, &low, &high); - if(ret) { - efree(new_node); - return ret; - } - } - - //caller does not need free new_node as it is inserted - // and managed in root when this returns - ema_clone(new_node, ema); - - ema_t *lo_ema = NULL, *hi_ema = NULL; - if (new_lower) { - // new node for lower address - lo_ema = new_node; - hi_ema = ema; - insert_ema(new_node, ema); - } else { - lo_ema = ema; - hi_ema = new_node; - insert_ema(new_node, ema->next); - } - - size_t start = ema->start_addr; - size_t size = ema->size; - - lo_ema->start_addr = start; - lo_ema->size = addr - start; - hi_ema->start_addr = addr; - hi_ema->size = size - lo_ema->size; - - if (ema->eaccept_map) { - lo_ema->eaccept_map = low; - hi_ema->eaccept_map = high; - } - *ret_node = new_node; - return 0; -} - -int ema_split_ex(ema_t *ema, size_t start, size_t end, ema_t** new_node) -{ - ema_t *node = ema; - ema_t *tmp_node; - if (start > node->start_addr) { - int ret = ema_split(node, start, false, &tmp_node); - if(ret) return ret; - if(tmp_node) node = tmp_node; - } - tmp_node = NULL; - if (end < (node->start_addr + node->size)) { - int ret = ema_split(node, end, true, &tmp_node); - if(ret) return ret; - if(tmp_node) node = tmp_node; - } - *new_node = node; - return 0; -} - -ema_t *ema_merge(ema_t *lo_ema, ema_t *hi_ema) -{ - return NULL; -} - -static size_t ema_aligned_end(ema_t* ema, size_t align) -{ - size_t curr_end = ema->start_addr + ema->size; - curr_end = ROUND_TO(curr_end, align); - return curr_end; -} - -// Find a free space of size at least 'size' bytes, does not matter where the start is -bool find_free_region(ema_root_t *root, size_t size, - uint64_t align, size_t *addr, ema_t **next_ema) -{ - ema_t *ema_begin = root->guard->next; - ema_t *ema_end = root->guard; - bool is_system = (root == &g_rts_ema_root); - if(ema_begin == ema_end){ - size_t tmp = 0; - if (is_system) - { - // we need at least one node before calling this. - if(ema_root_empty(&g_rts_ema_root)) - return false;//rts has to be inited at this time - tmp = ema_root_end(&g_rts_ema_root); - }else - { - tmp = mm_user_base; - } - tmp = ROUND_TO(tmp, align); - if(!sgx_mm_is_within_enclave((void*)tmp, size)) - return false; - *addr = tmp; - *next_ema = ema_end; - return true; - } - - // iterate over the ema node within specified range - ema_t *curr = ema_begin; - ema_t *next = curr->next; - - while (next != ema_end) { - size_t curr_end = ema_aligned_end(curr, align); - size_t free_size = next->start_addr - curr_end; - if (free_size >= size) { - *next_ema = next; - *addr = curr_end; - return true; - } - curr = next; - next = curr->next; - } - - // check the last ema node - if( sgx_mm_is_within_enclave((void*)(curr->start_addr + curr->size), size)) - { - *next_ema = next; - *addr = ema_aligned_end(curr, align); - size_t end = *addr + size; - if( (is_system && (end <=mm_user_base || *addr > mm_user_base)) - || (!is_system && end < mm_user_end)) - return true; - } - // we look for space in front, but do not mix user with rts - size_t tmp = ema_begin->start_addr - size; - tmp = TRIM_TO(tmp, align); - if (!is_system) - { - if (mm_user_base < tmp){ - //we found gap bigger enough - *addr = tmp; - *next_ema = ema_begin; - return true; - } - }else - {//rts - if (sgx_mm_is_within_enclave((void*)tmp, size)) - { - *addr = tmp; - *next_ema = ema_begin; - return true; - } - } - *next_ema = NULL; - *addr = 0; - return false; -} - -bool find_free_region_at(ema_root_t *root, size_t addr, size_t size, ema_t **next_ema) -{ - if( !sgx_mm_is_within_enclave((void*)(addr), size)) return false; - ema_t *node = root->guard->next; - while (node != root->guard) { - if (node->start_addr >= (addr + size)) { - *next_ema = node; - return true; - } - if (addr >= (node->start_addr + node->size)) { - node = node->next; - } else { - break; - } - } - if (node == root->guard) { - *next_ema = node; - return true; - } - - *next_ema = NULL; - return false; -} - -ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, - uint64_t si_flags, - sgx_enclave_fault_handler_t handler, - void *private_data, - ema_t* next_ema) -{ - ema_t *node = (ema_t *)emalloc(sizeof(ema_t)); - if (!node) - return NULL; - *node = (ema_t){ - addr, - size, - alloc_flags, - si_flags, - NULL, - 0, - handler, - private_data, - NULL,//next - NULL,//pev - }; - node = insert_ema(node, next_ema); - return node; -} - -void ema_destroy(ema_t *ema) -{ - remove_ema(ema); - if (ema->eaccept_map) - { - bit_array_delete(ema->eaccept_map); - } - efree(ema); -} - -static int eaccept_range_forward(const sec_info_t *si, size_t start, size_t end) -{ - while (start < end) - { - if (do_eaccept(si, start)) - abort(); - start += SGX_PAGE_SIZE; - } - return 0; -} - -static int eaccept_range_backward(const sec_info_t *si, size_t start, size_t end) -{ - assert(start < end); - do - { - end -= SGX_PAGE_SIZE; - if (do_eaccept(si, end)) - abort(); - } while (end > start); - return 0; -} - -int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up) -{ - sec_info_t si SGX_SECINFO_ALIGN = {si_flags | SGX_EMA_STATE_PENDING, 0}; - int ret = -1; - - if (grow_up) { - ret = eaccept_range_backward(&si, start, start + size); - } else { - ret = eaccept_range_forward(&si, start, start + size); - } - - return ret; -} - -int ema_do_commit(ema_t *node, size_t start, size_t end) -{ - assert(node->eaccept_map); //TODO: refactor bit_array_test/set - size_t real_start = MAX(start, node->start_addr); - size_t real_end = MIN(end, node->start_addr + node->size); - - sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_REG | - SGX_EMA_PROT_READ_WRITE | - SGX_EMA_STATE_PENDING, - 0}; - - for(size_t addr = real_start; addr < real_end; addr += SGX_PAGE_SIZE) - { - size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; - // only commit for uncommitted page - if (!bit_array_test(node->eaccept_map, pos)) { - int ret = do_eaccept(&si, addr); - if (ret != 0) { - return ret; - } - bit_array_set(node->eaccept_map, pos); - } - } - - return 0; -} - -static int ema_can_commit(ema_t* first, ema_t* last, - size_t start, size_t end) -{ - ema_t* curr = first; - size_t prev_end = first->start_addr; - while (curr != last) { - if (prev_end != curr->start_addr)//there is a gap - return EINVAL; - - if (!(curr->si_flags & (SGX_EMA_PROT_WRITE) )) - return EACCES; - - if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) - return EACCES; - - if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) - return EACCES; - - prev_end = curr->start_addr + curr->size; - curr = curr->next; - } - if (prev_end < end) return EINVAL; - return 0; -} - -int ema_do_commit_loop(ema_t *first, ema_t *last, size_t start, size_t end) -{ - int ret = ema_can_commit(first, last, start, end); - if(ret) return ret; - - ema_t *curr = first, *next = NULL; - - while (curr != last) { - next = curr->next; - ret = ema_do_commit(curr, start, end); - if (ret != 0) { - return ret; - } - curr = next; - } - return ret; -} - -static int ema_do_uncommit_real(ema_t *node, size_t real_start, size_t real_end, - int prot) -{ - int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; - uint32_t alloc_flags = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; - - // ignore if ema is in reserved state - if (alloc_flags & SGX_EMA_RESERVE) { - return 0; - } - - assert(node->eaccept_map); //TODO: refactor bit_array_test/set - - sec_info_t si SGX_SECINFO_ALIGN = { SGX_EMA_PAGE_TYPE_TRIM | - SGX_EMA_STATE_MODIFIED, - 0}; - - while (real_start < real_end) - { - size_t block_start = real_start; - while (block_start < real_end ){ - size_t pos = (block_start - node->start_addr) >> SGX_PAGE_SHIFT; - if (bit_array_test(node->eaccept_map, pos)) { - break; - } else { - block_start += SGX_PAGE_SIZE; - } - } - if (block_start == real_end) - break; - - size_t block_end = block_start + SGX_PAGE_SIZE; - while (block_end < real_end) { - size_t pos = (block_end - node->start_addr) >> SGX_PAGE_SHIFT; - if (bit_array_test(node->eaccept_map, pos)) { - block_end += SGX_PAGE_SIZE; - } - else - break; - } - assert(block_end > block_start); - // only for committed page - size_t block_length = block_end - block_start; - int ret = sgx_mm_modify_ocall(block_start, block_length, - prot | type, prot | SGX_EMA_PAGE_TYPE_TRIM); - if (ret != 0) { - return EFAULT; - } - - ret = eaccept_range_forward(&si, block_start, block_end); - if (ret != 0) { - return ret; - } - bit_array_reset_range(node->eaccept_map, (block_start - node->start_addr) >> SGX_PAGE_SHIFT, block_length >> SGX_PAGE_SHIFT); - //eaccept trim notify - ret =sgx_mm_modify_ocall(block_start, block_length, - prot | SGX_EMA_PAGE_TYPE_TRIM, - prot | SGX_EMA_PAGE_TYPE_TRIM); - if(ret) return EFAULT; - - real_start = block_end; - } - return 0; -} - -int ema_do_uncommit(ema_t *node, size_t start, size_t end) -{ - size_t real_start = MAX(start, node->start_addr); - size_t real_end = MIN(end, node->start_addr + node->size); - int prot = node->si_flags & SGX_EMA_PROT_MASK; - return ema_do_uncommit_real(node, real_start, real_end, prot); -} -static int ema_can_uncommit(ema_t* first, ema_t* last, - size_t start, size_t end) -{ - ema_t* curr = first; - size_t prev_end = first->start_addr; - while (curr != last) { - if (prev_end != curr->start_addr)//there is a gap - return EINVAL; - - if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) - return EACCES; - - //! TODO check transition, TRIM type - // Those are not needed due to global lock - prev_end = curr->start_addr + curr->size; - curr = curr->next; - } - if (prev_end < end) return EINVAL; - return 0; -} - - -int ema_do_uncommit_loop(ema_t *first, ema_t *last, size_t start, size_t end) -{ - int ret = ema_can_uncommit(first, last, start, end); - if(ret) return ret; - - ema_t *curr = first, *next = NULL; - while (curr != last) { - next = curr->next; - ret = ema_do_uncommit(curr, start, end); - if (ret != 0) { - return ret; - } - curr = next; - } - return ret; -} - -int ema_do_dealloc(ema_t *node, size_t start, size_t end) -{ - int alloc_flag = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; - - if (alloc_flag & SGX_EMA_RESERVE) - {//!TODO need check range, only dealloc [start,end) - ema_destroy(node); - return 0; - } - assert(node->eaccept_map);//TODO: refactor test/set bit_array - size_t real_start = MAX(start, node->start_addr); - size_t real_end = MIN(end, node->start_addr + node->size); - - //clear protections flag - int ret = ema_do_uncommit_real (node, real_start, real_end, SGX_EMA_PROT_NONE); - if (ret != 0) - return ret; - - // potential ema split - ema_t *tmp_node = NULL; - if (real_start > node->start_addr) { - ret = ema_split(node, real_start, false, &tmp_node); - if(ret) return ret; - assert(tmp_node); - node = tmp_node; - } - - tmp_node = NULL; - if (real_end < (node->start_addr + node->size)) { - ret = ema_split(node, real_end, true, &tmp_node); - if(ret) return ret; - assert(tmp_node); - node = tmp_node; - } - - ema_destroy(node); - return 0; -} - -int ema_do_dealloc_loop(ema_t *first, ema_t *last, size_t start, size_t end) -{ - int ret = 0; - ema_t *curr = first, *next = NULL; - - while (curr != last) { - next = curr->next; - ret = ema_do_dealloc(curr, start, end); - if (ret != 0) { - return ret; - } - curr = next; - } - return ret; -} - -// change the type of the page to TCS -int ema_change_to_tcs(ema_t *node, size_t addr) -{ - int prot = node->si_flags & SGX_EMA_PROT_MASK; - int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; - - if (type == SGX_EMA_PAGE_TYPE_TCS) { - return 0; - } - if (type != SGX_EMA_PAGE_TYPE_REG) - return EACCES; - - if (!(prot & SGX_EMA_PROT_READ_WRITE)) - return EPERM; - - if (node->transition) return EBUSY; - - // page need to be already committed - size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; - if (!node->eaccept_map || !bit_array_test(node->eaccept_map, pos)) { - return EACCES; - } - node->transition = 1; - int ret = sgx_mm_modify_ocall(addr, SGX_PAGE_SIZE, prot | type, - prot | SGX_EMA_PAGE_TYPE_TCS); - if (ret != 0) { - ret = EFAULT; - goto fail; - } - - sec_info_t si SGX_SECINFO_ALIGN = {SGX_EMA_PAGE_TYPE_TCS | SGX_EMA_STATE_MODIFIED, 0}; - if (do_eaccept(&si, addr) != 0) { - abort(); - } - - // operation succeeded, update ema node: state update, split - ema_t *tcs = NULL; - ret = ema_split_ex(node, addr, addr + SGX_PAGE_SIZE, &tcs); - if(ret) goto fail; - assert(tcs); //ema_split_ex should not return NULL if node!=NULL - - tcs->si_flags = (tcs->si_flags - & (uint64_t)(~SGX_EMA_PAGE_TYPE_MASK) - & (uint64_t)(~SGX_EMA_PROT_MASK)) - | SGX_EMA_PAGE_TYPE_TCS - | SGX_EMA_PROT_NONE; - tcs->transition = 0; -fail: - node->transition = 0; - return ret; -} - -int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot) -{ - int prot = node->si_flags & SGX_EMA_PROT_MASK; - int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; - if (prot == new_prot) return 0; - - size_t real_start = MAX(start, node->start_addr); - size_t real_end = MIN(end, node->start_addr + node->size); - - - node->transition = 1; - int ret = sgx_mm_modify_ocall(real_start, real_end - real_start, - prot | type, new_prot | type); - if (ret != 0) { - ret = EFAULT; - goto fail; - } - - sec_info_t si SGX_SECINFO_ALIGN = {(uint64_t)new_prot | SGX_EMA_PAGE_TYPE_REG | SGX_EMA_STATE_PR, 0}; - - for(size_t page = real_start; page < real_end; page += SGX_PAGE_SIZE) - { - do_emodpe(&si, page); - - // new permission is RWX, no EMODPR needed in untrusted part, hence no EACCEPT - if ((new_prot & (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) != - (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) { - ret = do_eaccept(&si, page); - if (ret) goto fail; - } - } - - // all involved pages complete permission change, deal with potential - // ema node split and update permission state - node->transition = 0; - if (real_start > node->start_addr) { - ema_t *tmp_node = NULL; - ret = ema_split(node, real_start, false, &tmp_node); - if (ret) goto fail; - assert(tmp_node); - node = tmp_node; - } - - if (real_end < (node->start_addr + node->size)) { - ema_t *tmp_node = NULL; - ret = ema_split(node, real_end, true, &tmp_node); - if (ret) goto fail; - assert(tmp_node); - node = tmp_node; - } - - // 'node' is the ema node to update permission for - node->transition = 1; - node->si_flags = (node->si_flags - & (uint64_t) (~SGX_EMA_PROT_MASK)) - | (uint64_t) new_prot; - if (new_prot == SGX_EMA_PROT_NONE) - {//do mprotect if target is PROT_NONE - ret = sgx_mm_modify_ocall(real_start, real_end - real_start, - type | SGX_EMA_PROT_NONE, type | SGX_EMA_PROT_NONE); - if (ret) - ret = EFAULT; - } -fail: - node->transition = 0; - return ret; -} - -static int ema_can_modify_permissions(ema_t* first, ema_t* last, - size_t start, size_t end) -{ - ema_t* curr = first; - size_t prev_end = first->start_addr; - while (curr != last) { - if (prev_end != curr->start_addr)//there is a gap - return EINVAL; - - if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) - return EPERM; - - if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) - return EPERM; - - if (curr->transition) return EBUSY; - - size_t real_start = MAX(start, curr->start_addr); - size_t real_end = MIN(end, curr->start_addr + curr->size); - - size_t pos_begin = (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; - size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; - if (!curr->eaccept_map || - !bit_array_test_range(curr->eaccept_map, pos_begin, pos_end - pos_begin)) { - return EINVAL; - } - - prev_end = curr->start_addr + curr->size; - curr = curr->next; - } - if (prev_end < end) return EINVAL; - return 0; -} - -static int ema_modify_permissions_loop_nocheck(ema_t *first, ema_t *last, size_t start, - size_t end, int prot) -{ - int ret = 0; - ema_t *curr = first, *next = NULL; - while (curr != last) { - next = curr->next; - ret = ema_modify_permissions(curr, start, end, prot); - if (ret != 0) { - return ret; - } - curr = next; - } - return ret; -} - -int ema_modify_permissions_loop(ema_t *first, ema_t *last, size_t start, - size_t end, int prot) -{ - int ret = ema_can_modify_permissions(first, last, start, end); - if (ret) return ret; - - return ema_modify_permissions_loop_nocheck(first, last, start, end, prot); -} - -static int ema_can_commit_data(ema_t* first, ema_t* last, - size_t start, size_t end) -{ - ema_t* curr = first; - size_t prev_end = first->start_addr; - while (curr != last) { - if (prev_end != curr->start_addr)//there is a gap - return EINVAL; - - if (!(curr->si_flags & (SGX_EMA_PROT_WRITE) )) - return EACCES; - - if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG) )) - return EACCES; - - if ((curr->alloc_flags & (SGX_EMA_RESERVE) )) - return EACCES; - - if (!(curr->alloc_flags & (SGX_EMA_COMMIT_ON_DEMAND ))) - return EINVAL; - - if (curr->eaccept_map) - { - size_t real_start = MAX(start, curr->start_addr); - size_t real_end = MIN(end, curr->start_addr + curr->size); - size_t pos_begin = (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; - size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; - - if(bit_array_test_range_any(curr->eaccept_map, pos_begin, pos_end - pos_begin)) - return EINVAL; - } - prev_end = curr->start_addr + curr->size; - curr = curr->next; - } - if (prev_end < end) return EINVAL; - return 0; -} - -int ema_do_commit_data(ema_t *node, size_t start, size_t end, uint8_t *data, int prot) -{ - size_t addr = start; - size_t src = (size_t)data; - sec_info_t si SGX_SECINFO_ALIGN = {(uint64_t)prot | SGX_EMA_PAGE_TYPE_REG, 0}; - - while (addr < end) - { - int ret = do_eacceptcopy(&si, addr, src); - if (ret != 0) { - return EFAULT; - } - addr += SGX_PAGE_SIZE; - src += SGX_PAGE_SIZE; - } - return ema_set_eaccept(node, start, end); -} - -int ema_do_commit_data_loop(ema_t *first, ema_t * last, size_t start, size_t end, - uint8_t *data, int prot) -{ - int ret = 0; - ret = ema_can_commit_data(first, last, start, end); - if (ret) return ret; - - ema_t *curr = first; - while (curr != last) {//there is no split in this loop - size_t real_start = MAX(start, curr->start_addr); - size_t real_end = MIN(end, curr->start_addr + curr->size); - uint8_t* real_data = data + real_start - start; - ret = ema_do_commit_data(curr, real_start, real_end, real_data, prot); - if (ret != 0) { - return ret; - } - curr = curr->next; - } - - ret = ema_modify_permissions_loop_nocheck(first, last, start, end, prot); - return ret; -} - -ema_t* ema_realloc_from_reserve_range(ema_t* first, ema_t* last, - size_t start, size_t end, - uint32_t alloc_flags, uint64_t si_flags, - sgx_enclave_fault_handler_t handler, - void *private_data) -{ - assert(first != NULL); - assert(last != NULL); - ema_t* curr = first; - assert(first->start_addr < end); - assert(last->prev->start_addr + last->prev->size > start); - //fail on any nodes not reserve or any gaps - size_t prev_end = first->start_addr; - while (curr != last) - { - if (prev_end != curr->start_addr)//there is a gap - return NULL; - if (curr->alloc_flags & SGX_EMA_RESERVE) { - prev_end = curr->start_addr + curr->size; - curr = curr->next; - } - else - return NULL; - } - - int ret = 0; - if (start > first->start_addr){ - ret = ema_split(first, start, false, &first); - if (ret) return NULL; - } - if (end < last->prev->start_addr + last->prev->size){ - ret = ema_split(last->prev, end, false, &last); - if (ret) return NULL; - } - - assert(first->alloc_flags & SGX_EMA_RESERVE); - assert(!first->eaccept_map); - - curr = first; - while(curr != last) - { - ema_t* next = curr->next; - ema_destroy(curr); - curr = next; - } - - ema_t* new_node = ema_new(start, end - start, - alloc_flags, si_flags, - handler, private_data, last); - return new_node; -} - -int ema_do_alloc(ema_t* node) -{ - uint32_t alloc_flags = node->alloc_flags; - if (alloc_flags & SGX_EMA_RESERVE) { - return 0; - } - - size_t tmp_addr = node->start_addr; - size_t size = node->size; - int ret = sgx_mm_alloc_ocall(tmp_addr, size, - (int)(node->si_flags & SGX_EMA_PAGE_TYPE_MASK), (int)alloc_flags); - if (ret) { - ret = EFAULT; - return ret; - } - - if (alloc_flags & SGX_EMA_COMMIT_NOW) { - int grow_up = (alloc_flags & SGX_EMA_GROWSDOWN) ? 0 : 1; - ret = do_commit(tmp_addr, size, node->si_flags, grow_up); - if (ret) { - return ret; - } - } - - if(alloc_flags & SGX_EMA_COMMIT_NOW) - ret = ema_set_eaccept_full(node); - else - ret = ema_clear_eaccept_full(node); - - return ret; -} diff --git a/sdk/emm/emalloc.c b/sdk/emm/emalloc.c deleted file mode 100644 index 10f594191..000000000 --- a/sdk/emm/emalloc.c +++ /dev/null @@ -1,524 +0,0 @@ -/* - * Copyright (C) 2011-2022 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include "emalloc.h" -#include "ema.h" // SGX_PAGE_SIZE -#include "sgx_mm.h" //sgx_mm_alloc -#include -#include -#include -/* - * This file implements a Simple allocator for EMM internal memory - * It maintains a list of reserves, dynamically added on - * demand using sgx_mm_alloc recursively when reserve runs below - * a threshold. - */ - - -/** - * Meta reserve is only used to allocate EMAs for - * "reserve" areas used by emalloc. - * 16 pages would be enough to create enough reserves - * to be used to allocate bit maps for roughly 64T EPC - */ -#define META_RESERVE_SIZE 0x10000ULL -static uint8_t meta_reserve[META_RESERVE_SIZE]; -static size_t meta_used; -/** - * initial reserve size - * TODO: make it configurable by RTS - */ -#define initial_reserve_size 0x10000ULL - -// this is enough for bit map of an 8T EMA -static const size_t max_emalloc_size = 0x10000000ULL; - -/* Blocks of memory managed. - * The allocator put these fields at the front - * of the block when a memory block is freed - * minimal allocation size is 8 bytes - * 8 bytes of header is overhead - */ -typedef struct _block -{ - uint64_t header; // size | alloc_mask - union { - char* payload[0]; - struct _block *next_prev[2]; /* used only when this block is free - * next_prev[0] points to next free - * block, next_prev[1] points to prev - * free block if this one is 16 bytes+ - */ - }; -} block_t; - -#define num_exact_list 0x100 -size_t header_size = sizeof(uint64_t); -#define exact_match_increment 0x8 -#define min_block_size 0x10 //include 8-byte header -static const size_t max_exact_size = min_block_size + exact_match_increment * (num_exact_list -1); -static block_t* exact_block_list[num_exact_list]; - -// the least significant bit in block header -// 1 == allocated/in-use, 0 == free -static const uint64_t alloc_mask = 1ULL; -//block size align to 8 bytes -uint64_t size_mask = ~((uint64_t)(exact_match_increment-1)); -// We don't expect many large blocks -// !TODO: optimize if needed -static block_t* large_block_list = NULL; - -block_t* payload_to_block (void* p) -{ - return (block_t*) (((size_t)p) - header_size); -} - -void* block_to_payload(block_t* b) -{ - return (void *) (b->payload); -} - -bool is_alloced(block_t* b) -{ - return alloc_mask & b->header; -} - -uint64_t block_size(block_t* b) -{ - return b->header & size_mask; -} - -size_t block_end(block_t* b) -{ - return (size_t)(b) + block_size(b); -} -#ifndef NDEBUG -size_t num_free_blocks = 0; -#endif -/* - * A reserve is a continuous block of - * memory committed for emalloc purpose. - */ -typedef struct _mm_reserve -{ - size_t base; - size_t size; - size_t used; - struct _mm_reserve* next; -} mm_reserve_t; - -static mm_reserve_t* reserve_list = NULL; - -static mm_reserve_t* find_used_in_reserve(size_t addr, size_t size) -{ - if (size == 0) return NULL; - mm_reserve_t* r = reserve_list; - while (r) - { - if (addr >= r->base && - addr + size <= r->base + r->used) - return r; - r = r->next; - } - return NULL; -} - -static size_t get_list_idx(size_t size) -{ - assert(size % exact_match_increment == 0); - if(size < min_block_size) return 0; - size_t list = (size - min_block_size)/exact_match_increment; - assert(list < num_exact_list); - return list; -} - -static void remove_from_list(block_t* b, block_t** list_head) -{ - size_t bsize = block_size(b); - if(b == *list_head) - { - *list_head = b->next_prev[0]; - if((*list_head) && bsize > min_block_size ) - (*list_head)->next_prev[1] = NULL; - } - else - { - block_t* prev = NULL; - if (bsize > min_block_size) - prev = b->next_prev[1]; - block_t* next = b->next_prev[0]; - if(prev) - prev->next_prev[0] = next; - if(next) - next->next_prev[1] = prev; - } -} -static void remove_from_lists(block_t* b) -{ - size_t bsize = block_size(b); - if(bsize > max_exact_size) - remove_from_list(b, &large_block_list); - else - { - size_t l = get_list_idx(bsize); - remove_from_list(b, &exact_block_list[l]); - } -} - -static void prepend_to_list(block_t* b, block_t** head) -{ - b->next_prev[0] = *head; - if ((*head) && block_size(*head) > min_block_size) - { - (*head)->next_prev[1] = b; - } - *head = b; -} - -static void put_exact_block(block_t* b) -{ - size_t list = get_list_idx(block_size(b)); - prepend_to_list(b, &exact_block_list[list]); -#ifndef NDEBUG - num_free_blocks++; -#endif -} - -static block_t* neighbor_right(block_t* me) -{ - size_t end = block_end(me); - mm_reserve_t* r1 = find_used_in_reserve((size_t)me, end); - if (!r1) return NULL; - if(end == r1->base + r1->used) return NULL; - mm_reserve_t* r2 = find_used_in_reserve(end, block_size((block_t*)end)); - if (r1 != r2) return NULL; - return (block_t*) end; -} - -//!TODO merge with left neighbor -// which requires scanning or footer -static block_t* possibly_merge(block_t* b) -{ - block_t* nr = neighbor_right(b); - if (!nr) return b; - if (is_alloced(nr)) return b; - remove_from_lists(nr); - b->header += block_size(nr); -#ifndef NDEBUG - num_free_blocks--; -#endif - return possibly_merge(b); -} - -static void put_free_block(block_t* e) -{ - if (block_size(e) <= (size_t)max_exact_size) - { - put_exact_block(e); - return; - } - prepend_to_list(e, &large_block_list); -#ifndef NDEBUG - num_free_blocks++; -#endif -} - -static block_t* split_free_block(block_t* b, size_t s) -{ - size_t remain = b->header - s; - assert(remain >= (size_t)min_block_size); - b->header = s; - block_t* new_b = (block_t*)((uint8_t*)b+s); - new_b->header = remain; - return new_b; -} - -static block_t* get_exact_match(size_t bsize) -{ - size_t list = get_list_idx(bsize); - if (exact_block_list[list] == NULL) return NULL; - block_t* ret = exact_block_list[list]; - exact_block_list[list] = ret->next_prev[0]; - if (list > 0 && exact_block_list[list]) - exact_block_list[list]->next_prev[1] = NULL; -#ifndef NDEBUG - num_free_blocks--; -#endif - return ret; -} - -static block_t* get_free_block(size_t bsize) -{ - if(bsize <= max_exact_size) - return get_exact_match(bsize); - - if (large_block_list == NULL) - return NULL; - - block_t *tmp = large_block_list; - block_t *best = NULL; - - //find best match - while(tmp != NULL) - { - if(tmp->header >= bsize) - { - if (!best) - { - best = tmp; - } - else - if(best->header > tmp->header) - { - best = tmp; - } - } - tmp = (block_t *)tmp->next_prev[0]; - } - - if(!best) return NULL; - remove_from_list(best, &large_block_list); - - if(best->header >= (bsize + min_block_size)) - { - block_t* tail = split_free_block(best, bsize); - put_free_block (tail); - } - // !TODO optimize for large allocations - // Note: EMA objects are 80 bytes - // bit_arrays are mostly small except for really large EMAs -#ifndef NDEBUG - num_free_blocks--; -#endif - return best; -} - - -static block_t* get_large_block_end_at(size_t addr) -{ - if (large_block_list == NULL) - return NULL; - block_t *tmp = large_block_list; - - while (tmp != NULL) - { - if((((size_t)tmp) + tmp->header) == addr) - { - remove_from_list(tmp, &large_block_list); - return tmp; - } - tmp = tmp->next_prev[0]; - } - return NULL; -} - -static void merge_large_blocks_to_reserve(mm_reserve_t* r) -{ - size_t used_end = r->base + r->used; - block_t *merge = get_large_block_end_at (used_end); - while (merge != NULL) - { -#ifndef NDEBUG - num_free_blocks--; -#endif - used_end -= merge->header; - merge = get_large_block_end_at (used_end); - } - r->used = used_end - r->base; - return; -} - - -static void new_reserve (void* base, size_t rsize) -{ - mm_reserve_t *reserve = (mm_reserve_t*) base; - size_t head_size = sizeof(mm_reserve_t); - reserve->base = (size_t)(base) + head_size; - reserve->used = 0; - reserve->size = rsize - head_size; - reserve->next = reserve_list; - reserve_list = reserve; -} - -static block_t* alloc_from_reserve(size_t bsize) -{ - mm_reserve_t* r = reserve_list; - size_t ret = 0; - while (r) - { - if (r->size - r->used >= bsize) - { - ret = r->base + r->used; - r->used += bsize; - break; - } - r = r->next; - } - return (block_t*)ret; -} - -static bool adding_reserve = false; -static size_t chunk_size = initial_reserve_size; -static const size_t guard_size = 0x8000ULL; - -static int add_reserve (size_t rsize) -{ - void* base = NULL; - if(adding_reserve) - return 0; - chunk_size = chunk_size > rsize? chunk_size : rsize; - // this will call back to emalloc and efree. - // set the flag to avoid infinite loop - adding_reserve = true; - //!TODO - //create a separate internal API to remove circular calls - int ret = sgx_mm_alloc(NULL, chunk_size + 2*guard_size, SGX_EMA_RESERVE, - NULL, NULL, &base); - if (ret) - return ret; - ret = sgx_mm_alloc((void*)((size_t)base + guard_size), chunk_size, - SGX_EMA_COMMIT_ON_DEMAND, NULL, NULL, &base); - if(ret) - return ret; - - sgx_mm_commit(base, rsize); - new_reserve(base, chunk_size); - chunk_size = chunk_size * 2; //double next time - if (chunk_size > max_emalloc_size) - chunk_size = max_emalloc_size; - - adding_reserve = false; - - return 0; -} - -static void* alloc_from_meta(size_t bsize) -{ - if (meta_used + bsize> META_RESERVE_SIZE) return NULL; - block_t* b = (block_t*) (&meta_reserve[meta_used]); - meta_used += bsize; - b->header = bsize | alloc_mask; - return block_to_payload(b); -} - -void emalloc_init() -{ - for (int i = 0; i < num_exact_list; i++) - { - exact_block_list[i] = NULL; - } - if (add_reserve(initial_reserve_size)) abort(); -} - -// Single thread only. -// Caller holds mm_lock -void* emalloc(size_t size) -{ - size_t bsize = ROUND_TO(size + header_size, exact_match_increment); - if (bsize < min_block_size) - bsize = min_block_size; - if(adding_reserve) // called back from add_reserve - return alloc_from_meta(bsize); - - block_t* b = get_free_block(bsize); - - if (b!= NULL) - { - b->header = bsize | alloc_mask; - return block_to_payload(b); - } - - b = alloc_from_reserve (bsize); - if (!b) - { - size_t new_reserve_size = - ROUND_TO(bsize + sizeof(mm_reserve_t), - initial_reserve_size); - if (add_reserve(new_reserve_size)) - return NULL; - b = alloc_from_reserve(bsize); - if(!b)//should never happen - return NULL; - } - - b->header = bsize | alloc_mask; - return block_to_payload(b); -} - - -static block_t* reconfigure_block(block_t* b){ - b->header = b->header & size_mask; - b->next_prev[0] = NULL; - if (b->header > min_block_size) - b->next_prev[1] = NULL; - - b = possibly_merge(b); - return b; -} -/* - * This is an internal interface only used - * by emm, intentionally crash for any error or - * inconsistency - */ -void efree(void* payload) -{ - block_t *b = payload_to_block(payload); - size_t bstart = (size_t)b; - size_t bsize = block_size(b); - if (bstart < (size_t)(&meta_reserve[META_RESERVE_SIZE]) - && bstart + bsize >(size_t)(&meta_reserve[0])) - { - if (adding_reserve) - { //we don't expect a lot of free blocks allocated - // in meta reserve. Do nothing now - assert (bstart >= (size_t)(&meta_reserve[0])); - assert (bstart + bsize <= (size_t)(&meta_reserve[META_RESERVE_SIZE])); - return; - } - else - abort(); - } - // normal blocks - mm_reserve_t* r = find_used_in_reserve((size_t)b, block_size(b)); - if (!r) - abort(); - b = reconfigure_block(b); - size_t end = block_end(b); - if ((end - r->base) == r->used) - { - r->used -= b->header; - merge_large_blocks_to_reserve(r); - return; - } - - put_free_block(b); - return; -} - diff --git a/sdk/emm/emm_private.c b/sdk/emm/emm_private.c deleted file mode 100644 index 9131655aa..000000000 --- a/sdk/emm/emm_private.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include "ema.h" -#include "emm_private.h" -#include "sgx_mm_rt_abstraction.h" - -extern ema_root_t g_rts_ema_root; -#define LEGAL_INIT_FLAGS (\ - SGX_EMA_PAGE_TYPE_REG \ - | SGX_EMA_PAGE_TYPE_TCS \ - | SGX_EMA_PAGE_TYPE_SS_FIRST \ - | SGX_EMA_PAGE_TYPE_SS_REST \ - | SGX_EMA_SYSTEM \ - | SGX_EMA_RESERVE \ - ) - -int mm_init_ema(void *addr, size_t size, int flags, int prot, - sgx_enclave_fault_handler_t handler, - void *handler_private) -{ - if (!sgx_mm_is_within_enclave(addr, size)) - return EACCES; - if( ((unsigned int)flags) & (~LEGAL_INIT_FLAGS)) - return EINVAL; - if(prot &(~SGX_EMA_PROT_MASK)) - return EINVAL; - ema_t* next_ema = NULL; - - if(!find_free_region_at(&g_rts_ema_root, (size_t)addr, size, &next_ema)) - return EINVAL; - - ema_t* ema = ema_new((size_t)addr, size, flags & SGX_EMA_ALLOC_FLAGS_MASK, - (uint64_t)prot | (SGX_EMA_PAGE_TYPE_MASK & flags), - handler, handler_private, next_ema); - if(!ema) return ENOMEM; - if (flags & SGX_EMA_RESERVE) - return 0; - return ema_set_eaccept_full(ema); -} - -extern int mm_alloc_internal(void *addr, size_t size, uint32_t flags, - sgx_enclave_fault_handler_t handler, - void *private, void** out_addr, ema_root_t* root); - -int mm_alloc(void *addr, size_t size, uint32_t flags, - sgx_enclave_fault_handler_t handler, - void *private, void** out_addr) -{ - return mm_alloc_internal(addr, size, flags, handler, private, - out_addr, &g_rts_ema_root); -} - -extern int mm_commit_internal(void *addr, size_t size, ema_root_t* root); - -int mm_commit(void *addr, size_t size) -{ - return mm_commit_internal(addr, size, &g_rts_ema_root); -} - -extern int mm_uncommit_internal(void *addr, size_t size, ema_root_t* root); - -int mm_uncommit(void *addr, size_t size) -{ - return mm_uncommit_internal(addr, size, &g_rts_ema_root); -} - -extern int mm_dealloc_internal(void *addr, size_t size, ema_root_t* root); - -int mm_dealloc(void *addr, size_t size) -{ - return mm_dealloc_internal(addr, size, &g_rts_ema_root); -} - -extern int mm_commit_data_internal(void *addr, size_t size, - uint8_t *data, int prot, ema_root_t* root); - -int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot) -{ - return mm_commit_data_internal(addr, size, data, prot, &g_rts_ema_root); -} - -extern int mm_modify_type_internal(void *addr, size_t size, int type, ema_root_t* root); - -int mm_modify_type(void *addr, size_t size, int type) -{ - return mm_modify_type_internal(addr, size, type, &g_rts_ema_root); -} - -extern int mm_modify_permissions_internal(void *addr, size_t size, - int prot, ema_root_t* root); - -int mm_modify_permissions(void *addr, size_t size, int prot) -{ - return mm_modify_permissions_internal(addr, size, prot, &g_rts_ema_root); -} - diff --git a/sdk/emm/include/bit_array.h b/sdk/emm/include/bit_array.h deleted file mode 100644 index 3fcad77a4..000000000 --- a/sdk/emm/include/bit_array.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef BIT_ARRAY_H_ -#define BIT_ARRAY_H_ - -#include -#include -#include - -typedef struct bit_array_ bit_array; - -#ifdef __cplusplus -extern "C" { -#endif - -// Create a new bit array to track the status of 'num' of bits. -// The contents of the data is not initialized. -bit_array *bit_array_new(size_t num_of_bits); - -// Create a new bit array to track the status of 'num' of bits. -// All the tracked bits are set (value 1). -bit_array *bit_array_new_set(size_t num_of_bits); - -// Create a new bit array to track the status of 'num' of bits. -// All the tracked bits are reset (value 0). -bit_array *bit_array_new_reset(size_t num_of_bits); - -// Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits. -void bit_array_reattach(bit_array *ba, size_t num_of_bits, uint8_t *data); - -// Delete the bit_array 'ba' and the data it owns -void bit_array_delete(bit_array *ba); - -// Returns the number of tracked bits in the bit_array -size_t bit_array_size(bit_array *ba); - -// Returns whether the bit at position 'pos' is set -bool bit_array_test(bit_array *ba, size_t pos); - -// Return whether the bits in range [pos, pos+len) are all set -bool bit_array_test_range(bit_array *ba, size_t pos, size_t len); - -// Retuen whether any bit in range [pos, pos+len) is set -bool bit_array_test_range_any(bit_array *ba, size_t pos, size_t len); - -// Returns whether any of the bits is set -bool bit_array_any(bit_array *ba); - -// Returns whether none of the bits is set -bool bit_array_none(bit_array *ba); - -// Returns whether all of the bits are set -bool bit_array_all(bit_array *ba); - -// Set the bit at 'pos' -void bit_array_set(bit_array *ba, size_t pos); - -// Set the bits in range [pos, pos+len) -void bit_array_set_range(bit_array *ba, size_t pos, size_t len); - -// Set all the bits -void bit_array_set_all(bit_array *ba); - -// Clear the bit at 'pos' -void bit_array_reset(bit_array *ba, size_t pos); - -// Clear the bits in range [pos, pos+len) -void bit_array_reset_range(bit_array *ba, size_t pos, size_t len); - -// Clear all the bits -void bit_array_reset_all(bit_array *ba); - -// Flip the bit at 'pos' -void bit_array_flip(bit_array *ba, size_t pos); - -// Split the bit array at 'pos' -// Returns pointers to two new bit arrays -int bit_array_split(bit_array *ba, size_t pos, bit_array **, bit_array **); - -// Merge two bit arrays -// Returns a new bit array, merging two input bit arrays -bit_array* bit_array_merge(bit_array *ba1, bit_array *ba2); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sdk/emm/include/ema.h b/sdk/emm/include/ema.h deleted file mode 100644 index dc348e881..000000000 --- a/sdk/emm/include/ema.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __SGX_EMA_H__ -#define __SGX_EMA_H__ - -#include -#include "sgx_mm.h" - -#ifndef SGX_SECINFO_ALIGN -#define SGX_SECINFO_ALIGN __attribute__((aligned(sizeof(sec_info_t)))) -#endif - -#define SGX_PAGE_SIZE 0x1000ULL -#define SGX_PAGE_SHIFT 12 - -typedef struct ema_root_ ema_root_t; -typedef struct ema_t_ ema_t; - -#ifdef __cplusplus -extern "C" { -#endif - -bool ema_root_empty(ema_root_t* r); -bool ema_exist_in(ema_root_t* r, size_t addr, size_t size); -bool ema_exist(size_t addr, size_t size); - -#ifndef NDEBUG -ema_t * ema_next(ema_t *node); -#endif -#ifdef TEST -void destroy_ema_root(ema_root_t *); -void dump_ema_root(ema_root_t *); -size_t ema_base(ema_t *node); -size_t ema_size(ema_t *node); -int ema_split(ema_t *ema, size_t addr, bool new_lower, ema_t** new_node); -int ema_split_ex(ema_t *ema, size_t start, size_t end, ema_t** new_node); -ema_t * ema_merge(ema_t *lo_ema, ema_t *hi_ema); -#endif - -uint32_t get_ema_alloc_flags(ema_t *node); -uint64_t get_ema_si_flags(ema_t *node); - -sgx_enclave_fault_handler_t ema_fault_handler(ema_t* node, void** private_data); -bool is_ema_transition(ema_t *node); - -ema_t *ema_new(size_t addr, size_t size, uint32_t alloc_flags, - uint64_t si_flags, - sgx_enclave_fault_handler_t handler, - void *private_data, - ema_t* next_ema); -void ema_destroy(ema_t *ema); - -int ema_set_eaccept_full(ema_t *node); -int ema_clear_eaccept_full(ema_t *node); -int ema_set_eaccept(ema_t *node, size_t start, size_t end); -bool ema_page_committed(ema_t *ema, size_t addr); - -ema_t * search_ema(ema_root_t *root, size_t addr); -int search_ema_range(ema_root_t *root, - size_t start, size_t end, - ema_t **ema_begin, ema_t **ema_end); - -bool find_free_region(ema_root_t *root, - size_t size, size_t align, size_t *addr, - ema_t **next_ema); - -bool find_free_region_at(ema_root_t *root, - size_t addr, size_t size, - ema_t **next_ema); - - -int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up); -int ema_do_commit(ema_t *node, size_t start, size_t end); -int ema_do_commit_loop(ema_t *first, ema_t *last, size_t start, size_t end); - -int ema_do_uncommit(ema_t *node, size_t start, size_t end); -int ema_do_uncommit_loop(ema_t *first, ema_t *last, size_t start, size_t end); - -int ema_do_dealloc(ema_t *node, size_t start, size_t end); -int ema_do_dealloc_loop(ema_t *first, ema_t *last, size_t start, size_t end); - -int ema_modify_permissions(ema_t *node, size_t start, size_t end, int new_prot); -int ema_modify_permissions_loop(ema_t *first, ema_t *last, size_t start, size_t end, int prot); -int ema_change_to_tcs(ema_t *node, size_t addr); - -int ema_do_commit_data(ema_t *node, size_t start, size_t end, uint8_t *data, int prot); -int ema_do_commit_data_loop(ema_t *firsr, ema_t *last, size_t start, - size_t end, uint8_t *data, int prot); - -int ema_do_alloc(ema_t* node); -ema_t* ema_realloc_from_reserve_range(ema_t* first, ema_t* last, - size_t start, size_t end, - uint32_t alloc_flags, uint64_t si_flags, - sgx_enclave_fault_handler_t handler, - void *private_data); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sdk/emm/include/emalloc.h b/sdk/emm/include/emalloc.h deleted file mode 100644 index bfd712fa9..000000000 --- a/sdk/emm/include/emalloc.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2011-2022 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __SGX_EMALLOC_H__ -#define __SGX_EMALLOC_H__ -#include -#define ROUND_TO(x, align) ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) -#define TRIM_TO(x, align) ((size_t)(x) & (size_t)(~(align-1))) -#define MIN(x, y) (((x)>(y))?(y):(x)) -#define MAX(x, y) (((x)>(y))?(x):(y)) - -void emalloc_init(); -void* emalloc(size_t); -void efree(void* ptr); -#endif - diff --git a/sdk/emm/include/emm_private.h b/sdk/emm/include/emm_private.h deleted file mode 100644 index 9fcf43406..000000000 --- a/sdk/emm/include/emm_private.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef EMM_PRIVATE_H_ -#define EMM_PRIVATE_H_ - -#include -#include -#include "sgx_mm.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Initialize the EMM internals and reserve the whole range available for user - * allocations via the public sgx_mm_alloc API. This should be called before - * any other APIs invoked. The runtime should not intend to allocate any subregion - * in [user_start, user_end) for system usage, i.e., the EMM will fail any allocation - * request with SGX_EMA_SYSTEM flag in this range and return an EINVAL error. - * @param[in] user_start The start of the user address range, page aligned. - * @param[in] user_end The end (exclusive) of the user address range, page aligned. - */ -void sgx_mm_init(size_t user_start, size_t user_end); - -#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system */ -/* - * Initialize an EMA. This can be used to setup EMAs to account regions that - * are loaded and initialized with EADD before EINIT. - * @param[in] addr Starting address of the region, page aligned. If NULL is provided, - * then the function will select the starting address. - * @param[in] size Size of the region in multiples of page size in bytes. - * @param[in] flags SGX_EMA_SYSTEM, or SGX_EMA_SYSTEM | SGX_EMA_RESERVE - * bitwise ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_TCS: TCS page. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @param[in] prot permissions, either SGX_EMA_PROT_NONE or a bitwise OR of following with: - * - SGX_EMA_PROT_READ: Pages may be read. - * - SGX_EMA_PROT_WRITE: Pages may be written. - * - SGX_EMA_PROT_EXECUTE: Pages may be executed. - * @param[in] handler A custom handler for page faults in this region, NULL if - * no custom handling needed. - * @param[in] handler_private Private data for the @handler, which will be passed - * back when the handler is called. - * @retval 0 The operation was successful. - * @retval EACCES Region is outside enclave address space. - * @retval EEXIST Any page in range requested is in use. - * @retval EINVAL Invalid page type, flags, or addr and length are not page aligned. - */ -int mm_init_ema(void *addr, size_t size, int flags, int prot, - sgx_enclave_fault_handler_t handler, - void *handler_private); -// See documentation in sgx_mm.h -int mm_alloc(void *addr, size_t size, uint32_t flags, - sgx_enclave_fault_handler_t handler, void *private_data, void** out_addr); -int mm_dealloc(void *addr, size_t size); -int mm_uncommit(void *addr, size_t size); -int mm_commit(void *addr, size_t size); -int mm_commit_data(void *addr, size_t size, uint8_t *data, int prot); -int mm_modify_type(void *addr, size_t size, int type); -int mm_modify_permissions(void *addr, size_t size, int prot); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sdk/emm/include/sgx_mm.h b/sdk/emm/include/sgx_mm.h deleted file mode 100644 index e6bd4c8f8..000000000 --- a/sdk/emm/include/sgx_mm.h +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef SGX_MM_H_ -#define SGX_MM_H_ - -#include -#include -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Page fault (#PF) info reported in the SGX SSA MISC region. - */ -typedef struct _sgx_pfinfo -{ - uint64_t maddr; // address for #PF. - union _pfec - { - uint32_t errcd; - struct - { // PFEC bits. - uint32_t p : 1; // P flag. - uint32_t rw : 1; // RW access flag, 0 for read, 1 for write. - uint32_t : 13; // U/S, I/O, PK and reserved bits not relevant for SGX PF. - uint32_t sgx : 1; // SGX bit. - uint32_t : 16; // reserved bits. - }; - } pfec; - uint32_t reserved; -} sgx_pfinfo; - -/** - * Custom page fault (#PF) handler, do usage specific processing upon #PF, - * e.g., loading data and verify its trustworthiness, then call sgx_mm_commit_data - * to explicitly EACCEPTCOPY data. - * This custom handler is passed into sgx_mm_alloc, and associated with the - * newly allocated region. The memory manager calls the handler when a #PF - * happens in the associated region. The handler may invoke abort() if it - * determines the exception is invalid based on certain internal states - * it maintains. - * - * @param[in] pfinfo info reported in the SSA MISC region for page fault. - * @param[in] private_data private data provided by handler in sgx_mm_alloc call. - * @retval SGX_MM_EXCEPTION_CONTINUE_EXECUTION Success on handling the exception. - * @retval SGX_MM_EXCEPTION_CONTINUE_SEARCH Exception not handled and should be passed to - * some other handler. - * - */ -typedef int (*sgx_enclave_fault_handler_t)(const sgx_pfinfo *pfinfo, void *private_data); - -/* bit 0 - 7 are allocation flags. */ -#define SGX_EMA_ALLOC_FLAGS_SHIFT 0 -#define SGX_EMA_ALLOC_FLAGS(n) ((n) << SGX_EMA_ALLOC_FLAGS_SHIFT) -#define SGX_EMA_ALLOC_FLAGS_MASK SGX_EMA_ALLOC_FLAGS(0xFF) - -/* Only reserve an address range, no physical memory committed.*/ -#define SGX_EMA_RESERVE SGX_EMA_ALLOC_FLAGS(0x1) - -/* Reserve an address range and commit physical memory. */ -#define SGX_EMA_COMMIT_NOW SGX_EMA_ALLOC_FLAGS(0x2) - -/* Reserve an address range and commit physical memory on demand.*/ -#define SGX_EMA_COMMIT_ON_DEMAND SGX_EMA_ALLOC_FLAGS(0x4) - -/* Always commit pages from higher to lower addresses, - * no gaps in addresses above the last committed. - */ -#define SGX_EMA_GROWSDOWN SGX_EMA_ALLOC_FLAGS(0x10) - -/* Always commit pages from lower to higher addresses, - * no gaps in addresses below the last committed. -*/ -#define SGX_EMA_GROWSUP SGX_EMA_ALLOC_FLAGS(0x20) - -/* Map addr must be exactly as requested. */ -#define SGX_EMA_FIXED SGX_EMA_ALLOC_FLAGS(0x40) - -/* bit 8 - 15 are page types. */ -#define SGX_EMA_PAGE_TYPE_SHIFT 8 -#define SGX_EMA_PAGE_TYPE(n) ((n) << SGX_EMA_PAGE_TYPE_SHIFT) -#define SGX_EMA_PAGE_TYPE_MASK SGX_EMA_PAGE_TYPE(0xFF) -#define SGX_EMA_PAGE_TYPE_TCS SGX_EMA_PAGE_TYPE(0x1) /* TCS page type. */ -#define SGX_EMA_PAGE_TYPE_REG SGX_EMA_PAGE_TYPE(0x2) /* regular page type, default if not specified. */ -#define SGX_EMA_PAGE_TYPE_TRIM SGX_EMA_PAGE_TYPE(0x4) /* TRIM page type. */ -#define SGX_EMA_PAGE_TYPE_SS_FIRST SGX_EMA_PAGE_TYPE(0x5) /* the first page in shadow stack. */ -#define SGX_EMA_PAGE_TYPE_SS_REST SGX_EMA_PAGE_TYPE(0x6) /* the rest pages in shadow stack. */ - -/* Use bit 24-31 for alignment masks. */ -#define SGX_EMA_ALIGNMENT_SHIFT 24 -/* - * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and - * < # bits in a pointer (32 or 64). - */ -#define SGX_EMA_ALIGNED(n) (((unsigned int)(n) << SGX_EMA_ALIGNMENT_SHIFT)) -#define SGX_EMA_ALIGNMENT_MASK SGX_EMA_ALIGNED(0xFFUL) -#define SGX_EMA_ALIGNMENT_64KB SGX_EMA_ALIGNED(16UL) -#define SGX_EMA_ALIGNMENT_16MB SGX_EMA_ALIGNED(24UL) -#define SGX_EMA_ALIGNMENT_4GB SGX_EMA_ALIGNED(32UL) - -/* Permissions flags */ -#define SGX_EMA_PROT_NONE 0x0 -#define SGX_EMA_PROT_READ 0x1 -#define SGX_EMA_PROT_WRITE 0x2 -#define SGX_EMA_PROT_EXEC 0x4 -#define SGX_EMA_PROT_READ_WRITE (SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE) -#define SGX_EMA_PROT_READ_EXEC (SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC) -#define SGX_EMA_PROT_MASK (SGX_EMA_PROT_READ_WRITE|SGX_EMA_PROT_EXEC) -/* - * Allocate a new memory region in enclave address space (ELRANGE). - * @param[in] addr Starting address of the region, page aligned. If NULL is provided, - * then the function will select the starting address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] flags A bitwise OR of flags describing committing mode, committing - * order, address preference, and page type. - * Flags should include exactly one of following for committing mode: - * - SGX_EMA_RESERVE: just reserve an address range, no EPC committed. - * To allocate memory on a reserved range, call this - * function again with SGX_EMA_COMMIT_ON_DEMAND or SGX_EMA_COMMIT_NOW. - * - SGX_EMA_COMMIT_NOW: reserves memory range and commit EPC pages. EAUG and - * EACCEPT are done on SGX2 platforms. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages - * are committed (EACCEPT) on demand upon #PF on SGX2 platforms. - * ORed with zero or one of the committing order flags for SGX2 platforms: - * - SGX_EMA_GROWSDOWN: always commit pages from higher to lower addresses, - * no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: always commit pages from lower to higher addresses, - * no gaps in addresses below the last committed. - * Optionally ORed with - * - SGX_EMA_FIXED: allocate at fixed address, will return error if the - * requested address is in use. - * - SGX_EMA_ALIGNED(n): Align the region on a requested boundary. - * Fail if a suitable region cannot be found, - * The argument n specifies the binary logarithm of - * the desired alignment and must be at least 12. - * Optionally ORed with one of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * - * @param[in] handler A custom handler for page faults in this region, NULL if - * no custom handling needed. - * @param[in] handler_private Private data for the @handler, which will be passed - * back when the handler is called. - * @param[out] out_addr Pointer to store the start address of allocated range. - * Set to valid address by the function on success, NULL otherwise. - * @retval 0 The operation was successful. - * @retval EACCES Region is outside enclave address space. - * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is set. - * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in SGX_EMA_ALIGNED(n). - * @retval ENOMEM Out of memory, or no free space to satisfy alignment boundary. - * @retval EFAULT All other errors. - */ -int sgx_mm_alloc(void *addr, size_t length, int flags, - sgx_enclave_fault_handler_t handler, void *handler_private, - void **out_addr); - -/* - * Uncommit (trim) physical EPC pages in a previously committed range. - * The pages in the allocation are freed, but the address range is still reserved. - * @param[in] addr Page aligned start address of the region to be trimmed. - * @param[in] length Size in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL The address range is not allocated or outside enclave. - * @retval EFAULT All other errors. - */ -int sgx_mm_uncommit(void *addr, size_t length); - -/* - * Deallocate the address range. - * The pages in the allocation are freed and the address range is released for future allocation. - * @param[in] addr Page aligned start address of the region to be freed and released. - * @param[in] length Size in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL The address range is not allocated or outside enclave. - */ -int sgx_mm_dealloc(void *addr, size_t length); - -/* - * Change permissions of an allocated region. - * @param[in] addr Start address of the region, must be page aligned. - * @param[in] length Size in bytes of multiples of page size. - * @param[in] prot permissions bitwise OR of following with: - * - SGX_EMA_PROT_READ: Pages may be read. - * - SGX_EMA_PROT_WRITE: Pages may be written. - * - SGX_EMA_PROT_EXEC: Pages may be executed. - * @retval 0 The operation was successful. - * @retval EACCES Original page type can not be changed to target type. - * @retval EINVAL The memory region was not allocated or outside enclave - * or other invalid parameters that are not supported. - * @retval EPERM The request permissions are not allowed, e.g., by target page type or - * SELinux policy. - * @retval EFAULT All other errors. - */ -int sgx_mm_modify_permissions(void *addr, size_t length, int prot); - -/* - * Change the page type of an allocated region. - * @param[in] addr Start address of the region, must be page aligned. - * @param[in] length Size in bytes of multiples of page size. - * @param[in] type page type, only SGX_EMA_PAGE_TYPE_TCS is supported. - * - * @retval 0 The operation was successful. - * @retval EACCES Original page type can not be changed to target type. - * @retval EINVAL The memory region was not allocated or outside enclave - * or other invalid parameters that are not supported. - * @retval EPERM Target page type is no allowed by this API, e.g., PT_TRIM, - * PT_SS_FIRST, PT_SS_REST. - * @retval EFAULT All other errors. - */ -int sgx_mm_modify_type(void *addr, size_t length, int type); - -/* - * Commit a partial or full range of memory allocated previously with SGX_EMA_COMMIT_ON_DEMAND. - * The API will return 0 if all pages in the requested range are successfully committed. - * Calling this API on pages already committed has no effect. - * @param[in] addr Page aligned starting address. - * @param[in] length Length of the region in bytes of multiples of page size. - * @retval 0 The operation was successful. - * @retval EINVAL Any requested page is not in any previously allocated regions, or - * outside the enclave address range. - * @retval EFAULT All other errors. - */ -int sgx_mm_commit(void *addr, size_t length); - -/* - * Load data into target pages within a region previously allocated by sgx_mm_alloc. - * This can be called to load data and set target permissions at the same time, - * e.g., dynamic code loading. The caller has verified data to be trusted and expected - * to be loaded to the target address range. Calling this API on pages already committed - * will fail. - * - * @param[in] addr Page aligned target starting addr. - * @param[in] length Length of data, in bytes of multiples of page size. - * @param[in] data Data of @length. - * @param[in] prot Target permissions. - * @retval 0 The operation was successful. - * @retval EINVAL Any page in requested address range is not previously allocated, or - * outside the enclave address range. - * @retval EPERM Any page in requested range is previously committed. - * @retval EPERM The target permissions are not allowed by OS security policy, - * e.g., SELinux rules. - * @retval EFAULT All other errors. - */ -int sgx_mm_commit_data(void *addr, size_t length, uint8_t *data, int prot); - -/* Return value used by the EMM #PF handler to indicate - * to the dispatcher that it should continue searching for the next handler. - */ -#define SGX_MM_EXCEPTION_CONTINUE_SEARCH 0 - -/* Return value used by the EMM #PF handler to indicate - * to the dispatcher that it should stop searching and continue execution. - */ -#define SGX_MM_EXCEPTION_CONTINUE_EXECUTION -1 - - -#ifdef __cplusplus -} -#endif -#endif diff --git a/sdk/emm/include/sgx_mm_primitives.h b/sdk/emm/include/sgx_mm_primitives.h deleted file mode 100644 index 0724ea41e..000000000 --- a/sdk/emm/include/sgx_mm_primitives.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef SGX_MM_PRIMITIVES_H_ -#define SGX_MM_PRIMITIVES_H_ - -#include -#include -#ifdef __cplusplus -extern "C" { -#endif - -//SGX primitives -typedef struct _sec_info_t -{ - uint64_t flags; - uint64_t reserved[7]; -} sec_info_t; - -// EACCEPT -int do_eaccept(const sec_info_t* si, size_t addr); - -// EMODPE -int do_emodpe(const sec_info_t* si, size_t addr); - -// EACCEPTCOPY -int do_eacceptcopy(const sec_info_t* si, size_t dest, size_t src); - -#ifdef __cplusplus -} -#endif - - -#endif diff --git a/sdk/emm/include/sgx_mm_rt_abstraction.h b/sdk/emm/include/sgx_mm_rt_abstraction.h deleted file mode 100644 index 5500b58be..000000000 --- a/sdk/emm/include/sgx_mm_rt_abstraction.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef SGX_MM_RT_ABSTRACTION_H_ -#define SGX_MM_RT_ABSTRACTION_H_ - -#include "sgx_mm.h" -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/* - * The EMM page fault (#PF) handler. - * - * @param[in] pfinfo Info reported in the SSA MISC region for page fault. - * @retval SGX_EXCEPTION_CONTINUE_EXECUTION Success handling the exception. - * @retval SGX_EXCEPTION_CONTINUE_SEARCH The EMM does not handle the exception. - */ - typedef int (*sgx_mm_pfhandler_t)(const sgx_pfinfo *pfinfo); - -/* - * Register the EMM handler with the global exception handler registry - * The Runtime should ensure this handler is called first in case of - * a #PF before all other handlers. - * - * @param[in] pfhandler The EMM page fault handler. - * @retval true Success. - * @retval false Failure. - */ - bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler); - -/* - * Unregister the EMM handler with the global exception handler registry. - * @param[in] pfhandler The EMM page fault handler. - * @retval true Success. - * @retval false Failure. - */ - bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); - -/* - * Call OS to reserve region for EAUG, immediately or on-demand. - * - * @param[in] addr Desired page aligned start address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] page_type One of following page types: - * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the default if not specified. - * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. - * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. - * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing - * order, address preference, page type. The untrusted side. - * implementation should translate following additional bits to proper - * parameters invoking syscall(mmap on Linux) provided by the kernel. - * The flags param of this interface should include exactly one of following for committing mode: - * - SGX_EMA_COMMIT_NOW: reserves memory range with SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, - * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. - * ORed with zero or one of the committing order flags: - * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher - * to lower addresses, no gaps in addresses above the last committed. - * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower - * to higher addresses, no gaps in addresses below the last committed. - * @retval 0 The operation was successful. - * @retval EFAULT for all failures. - */ - int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int alloc_flags); - -/* - * Call OS to change permissions, type, or notify EACCEPT done after TRIM. - * - * @param[in] addr Start address of the memory to change protections. - * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. - * Must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and TCS - * SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: - * SGX_EMA_PROT_READ - * SGX_EMA_PROT_WRITE - * SGX_EMA_PROT_EXEC - * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note the address - * range for trimmed pages may still be reserved by enclave with - * proper permissions. - * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS - * @retval 0 The operation was successful. - * @retval EFAULT for all failures. - */ - - int sgx_mm_modify_ocall(uint64_t addr, size_t length, int page_properties_from, int page_properties_to); - -/* - * Define a mutex and init/lock/unlock/destroy functions. - */ - typedef struct _sgx_mm_mutex sgx_mm_mutex; - sgx_mm_mutex* sgx_mm_mutex_create(void); - int sgx_mm_mutex_lock(sgx_mm_mutex *mutex); - int sgx_mm_mutex_unlock(sgx_mm_mutex *mutex); - int sgx_mm_mutex_destroy(sgx_mm_mutex *mutex); - - /* - * Check whether the given buffer is strictly within the enclave. - * - * Check whether the buffer given by the **ptr** and **size** parameters is - * strictly within the enclave's memory. If so, return true. If any - * portion of the buffer lies outside the enclave's memory, return false. - * - * @param[in] ptr The pointer to the buffer. - * @param[in] size The size of the buffer. - * - * @retval true The buffer is strictly within the enclave. - * @retval false At least some part of the buffer is outside the enclave, or - * the arguments are invalid. For example, if **ptr** is null or **size** - * causes arithmetic operations to wrap. - * - */ - bool sgx_mm_is_within_enclave(const void *ptr, size_t size); - - -#define SGX_EMA_SYSTEM SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sdk/emm/sgx_mm.c b/sdk/emm/sgx_mm.c deleted file mode 100644 index 2344cd0cd..000000000 --- a/sdk/emm/sgx_mm.c +++ /dev/null @@ -1,450 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include "sgx_mm.h" -#include "ema.h" -#include "emalloc.h" -#include "sgx_mm_rt_abstraction.h" - -extern ema_root_t g_user_ema_root; -extern ema_root_t g_rts_ema_root; -#define LEGAL_ALLOC_PAGE_TYPE (SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PAGE_TYPE_SS_FIRST | SGX_EMA_PAGE_TYPE_SS_REST) -sgx_mm_mutex *mm_lock = NULL; -size_t mm_user_base = 0; -size_t mm_user_end = 0; -//!FIXME: assume user and system EMAs are not interleaved -// user EMAs are above the last system EMA -int mm_alloc_internal(void *addr, size_t size, int flags, - sgx_enclave_fault_handler_t handler, - void *private, void **out_addr, ema_root_t* root) -{ - int status = -1; - size_t tmp_addr = 0; - ema_t *node = NULL, *next_ema = NULL; - bool ret = false; - - uint32_t alloc_flags = (uint32_t)flags & SGX_EMA_ALLOC_FLAGS_MASK; - //Must have one of these: - if (!(alloc_flags & (SGX_EMA_RESERVE | SGX_EMA_COMMIT_NOW | SGX_EMA_COMMIT_ON_DEMAND))) - return EINVAL; - - uint64_t page_type = (uint64_t)flags & SGX_EMA_PAGE_TYPE_MASK; - if ((uint64_t)(~LEGAL_ALLOC_PAGE_TYPE) & page_type) return EINVAL; - if (page_type == 0) page_type = SGX_EMA_PAGE_TYPE_REG; - - if (size % SGX_PAGE_SIZE) return EINVAL; - - uint8_t align_flag = (uint8_t) (((uint32_t)flags & SGX_EMA_ALIGNMENT_MASK) >> SGX_EMA_ALIGNMENT_SHIFT); - if (align_flag == 0) align_flag = 12; - if (align_flag < 12) - return EINVAL; - - uint64_t align_mask = (uint64_t)(1ULL << align_flag) - 1ULL; - - tmp_addr = (size_t) addr; - //If an address is given, user must align it - if ((tmp_addr & align_mask)) - return EINVAL; - if (addr && (!sgx_mm_is_within_enclave(addr, size))) - return EACCES; - - if(sgx_mm_mutex_lock(mm_lock)) - return EFAULT; - - if (mm_user_base == 0){ - //the rts is not initialized - status = EFAULT; - goto unlock; - } - - uint64_t si_flags = (uint64_t)SGX_EMA_PROT_READ_WRITE | page_type ; - if (alloc_flags & SGX_EMA_RESERVE) - { - si_flags = SGX_EMA_PROT_NONE; - } - - if (tmp_addr) { - bool fixed_alloc = (alloc_flags & SGX_EMA_FIXED); - bool in_system_but_not_allowed = false; - size_t end = tmp_addr + size; - size_t start = tmp_addr; - if(root != &g_rts_ema_root && - ema_exist_in(&g_rts_ema_root, start, size)) - { - in_system_but_not_allowed = true; - if(fixed_alloc){ - status = EPERM; - goto unlock; - } - } - ema_t* first = NULL; - ema_t* last = NULL; - bool exist_in_root = !search_ema_range(root, start, end, &first, &last); - - if(exist_in_root){ - // Use the reserved space earlier - node = ema_realloc_from_reserve_range(first, last, start, end, - alloc_flags, si_flags, - handler, private); - if (node){ - goto alloc_action; - } - //can't fit with the address but fixed alloc is asked - if (fixed_alloc) { - status = EEXIST; - goto unlock; - } - // Not a fixed alloc, - // fall through to find a free space anywhere - assert(!ret); - } else { - // No existing ema overlapping with requested range - // Use the address unless it is not allowed by rts - if(!in_system_but_not_allowed){ - // make sure not in rts if this is user - ret = find_free_region_at(root, - tmp_addr, size, &next_ema); - } - //We can't use the address, fall through - } - } - // At this point, ret == false means: - // Either no address given or the given address can't be used - if (!ret) - ret = find_free_region(root, - size, (1ULL << align_flag), &tmp_addr, &next_ema); - if (!ret) { - status = ENOMEM; - goto unlock; - } -/************************************************** -* create and operate on a new node -***************************************************/ - assert(tmp_addr);//found address - assert(next_ema);//found where to insert - // create and insert the node - node = ema_new(tmp_addr, size, alloc_flags, si_flags, - handler, private, next_ema); - if (!node) { - status = ENOMEM; - goto unlock; - } -alloc_action: - assert(node); - status = ema_do_alloc(node); - if (status != 0) { - goto alloc_failed; - } - if (out_addr) { - *out_addr = (void *)tmp_addr; - } - status = 0; - goto unlock; -alloc_failed: - ema_destroy(node); - -unlock: - sgx_mm_mutex_unlock(mm_lock); - return status; -} - -int sgx_mm_alloc(void *addr, size_t size, int flags, - sgx_enclave_fault_handler_t handler, - void *private, void **out_addr) -{ - if (flags & SGX_EMA_SYSTEM) return EINVAL; - if(addr) - { - size_t tmp = (size_t)addr; - if (tmp >= mm_user_end || tmp < mm_user_base) - return EPERM; - } - return mm_alloc_internal(addr, size, flags, - handler, private, out_addr, &g_user_ema_root); -} - -int mm_commit_internal(void *addr, size_t size, ema_root_t* root) -{ - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - ema_t *first = NULL, *last = NULL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - - ret = ema_do_commit_loop(first, last, start, end); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_commit(void *addr, size_t size) -{ - return mm_commit_internal(addr, size, &g_user_ema_root); -} - -int mm_uncommit_internal(void *addr, size_t size, ema_root_t* root) -{ - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - ema_t *first = NULL, *last = NULL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - - ret = ema_do_uncommit_loop(first, last, start, end); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_uncommit(void *addr, size_t size) -{ - return mm_uncommit_internal(addr, size, &g_user_ema_root); -} - -int mm_dealloc_internal(void *addr, size_t size, ema_root_t* root) -{ - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - ema_t *first = NULL, *last = NULL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - - ret = ema_do_dealloc_loop(first, last, start, end); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_dealloc(void *addr, size_t size) -{ - return mm_dealloc_internal(addr, size, &g_user_ema_root); -} - -int mm_commit_data_internal(void *addr, size_t size, uint8_t *data, int prot, ema_root_t* root) -{ - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - ema_t *first = NULL, *last = NULL; - - if (size == 0) - return EINVAL; - if (size % SGX_PAGE_SIZE != 0) - return EINVAL; - if (start % SGX_PAGE_SIZE != 0) - return EINVAL; - if (((size_t)data) % SGX_PAGE_SIZE != 0) - return EINVAL; - if (((uint32_t)prot) & (uint32_t)(~SGX_EMA_PROT_MASK)) - return EINVAL; - if (!sgx_mm_is_within_enclave(data, size)) - return EINVAL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - - ret = ema_do_commit_data_loop(first, last, start, end, data, prot); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_commit_data(void *addr, size_t size, uint8_t *data, int prot) -{ - return mm_commit_data_internal (addr, size, data, prot, &g_user_ema_root); -} - -int mm_modify_type_internal(void *addr, size_t size, int type, ema_root_t* root) -{ - // for this API, TCS is the only valid page type - if (type != SGX_EMA_PAGE_TYPE_TCS) { - return EPERM; - } - - // TCS occupies only one page - if (size != SGX_PAGE_SIZE) { - return EINVAL; - } - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - ema_t *first = NULL, *last = NULL; - - if (start % SGX_PAGE_SIZE != 0) - return EINVAL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - - // one page only, covered by a single ema node - assert(ema_next(first) == last); - ret = ema_change_to_tcs(first, (size_t)addr); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_modify_type(void *addr, size_t size, int type) -{ - return mm_modify_type_internal(addr, size, type, &g_user_ema_root); -} - -int mm_modify_permissions_internal(void *addr, size_t size, int prot, ema_root_t* root) -{ - int ret = EFAULT; - size_t start = (size_t)addr; - size_t end = start + size; - - if (size == 0) return EINVAL; - if (size % SGX_PAGE_SIZE) return EINVAL; - if (start % SGX_PAGE_SIZE) return EINVAL; - if ((prot & SGX_EMA_PROT_EXEC) && !(prot & SGX_EMA_PROT_READ)) - return EINVAL; - - ema_t *first = NULL, *last = NULL; - - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ret = search_ema_range(root, start, end, &first, &last); - if (ret < 0) { - ret = EINVAL; - goto unlock; - } - ret = ema_modify_permissions_loop(first, last, start, end, prot); -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -int sgx_mm_modify_permissions(void *addr, size_t size, int prot) -{ - return mm_modify_permissions_internal(addr, size, prot, &g_user_ema_root); -} - -int sgx_mm_enclave_pfhandler(const sgx_pfinfo *pfinfo) -{ - int ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; - size_t addr = TRIM_TO((pfinfo->maddr), SGX_PAGE_SIZE); - if(sgx_mm_mutex_lock(mm_lock)) return ret; - ema_t *ema = search_ema(&g_user_ema_root, addr); - if (!ema) { - ema = search_ema(&g_rts_ema_root, addr); - if(!ema) - goto unlock; - } - void* data = NULL; - sgx_enclave_fault_handler_t eh = ema_fault_handler(ema, &data); - if(eh){ - //don't hold the lock as handlers can longjmp - sgx_mm_mutex_unlock(mm_lock); - return eh(pfinfo, data); - } - if (ema_page_committed(ema, addr)) - { - // Check for spurious #PF - if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || - (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) - { - ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; - } - else - ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; - goto unlock; - } - if (get_ema_alloc_flags(ema) & SGX_EMA_COMMIT_ON_DEMAND) - { - if ((pfinfo->pfec.rw == 0 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || - (pfinfo->pfec.rw == 1 && 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) { - ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; - goto unlock; - } - - //!TODO: Check GROWSUP/GROWSDOWN flags and optimize accordingly. - if (ema_do_commit(ema, addr, addr + SGX_PAGE_SIZE)){ - sgx_mm_mutex_unlock(mm_lock); - abort(); - } - - ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; - goto unlock; - } - else - { - sgx_mm_mutex_unlock(mm_lock); - //we found the EMA and nothing should cause the PF - //Can't continue as we know something is wrong - abort(); - } - - ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; -unlock: - sgx_mm_mutex_unlock(mm_lock); - return ret; -} - -void sgx_mm_init(size_t user_base, size_t user_end) -{ - mm_lock = sgx_mm_mutex_create(); - mm_user_base = user_base; - mm_user_end = user_end; - sgx_mm_register_pfhandler(sgx_mm_enclave_pfhandler); - emalloc_init(); -} diff --git a/sdk/emm/sgx_primitives.S b/sdk/emm/sgx_primitives.S deleted file mode 100644 index 397e7d08b..000000000 --- a/sdk/emm/sgx_primitives.S +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - -#define SE_EACCEPT 5 -#define SE_EMODPE 6 -#define SE_EACCEPTCOPY 7 - -.macro ENCLU -.byte 0x0f, 0x01, 0xd7 -.endm - -.macro SE_PROLOG - .cfi_startproc - - push %rbx - push %rcx - push %rdx - movq %rdi, %rbx - movq %rsi, %rcx - -.endm - - -.macro SE_EPILOG - pop %rdx - pop %rcx - pop %rbx - - ret - .cfi_endproc -.endm - - -.macro DECLARE_GLOBAL_FUNC name - .globl \name - .type \name, @function -\name: -.endm - - -DECLARE_GLOBAL_FUNC do_eaccept - SE_PROLOG - mov $SE_EACCEPT, %eax - ENCLU - SE_EPILOG - -DECLARE_GLOBAL_FUNC do_eacceptcopy - SE_PROLOG - mov $SE_EACCEPTCOPY, %eax - ENCLU - SE_EPILOG - -DECLARE_GLOBAL_FUNC do_emodpe - SE_PROLOG - mov $SE_EMODPE, %eax - ENCLU - SE_EPILOG - diff --git a/sdk/trts/linux/Makefile b/sdk/trts/linux/Makefile index 3bc83ebd4..fcb56662d 100644 --- a/sdk/trts/linux/Makefile +++ b/sdk/trts/linux/Makefile @@ -59,7 +59,7 @@ OBJS += $(ASM_SRCS:.S=.o) OBJS := $(sort $(OBJS)) LIBTRTS = libsgx_trts.a -LIBSGX_MM_PATH = $(LINUX_SDK_DIR)/emm +LIBSGX_MM_PATH = $(LINUX_EXTERNAL_DIR)/sgx-emm LIBSGX_MM = libsgx_mm.a .PHONY: all From d9f70c23774bb92b1b522390ed50a88e76dc917b Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Tue, 16 Aug 2022 17:10:09 -0700 Subject: [PATCH 34/96] Revert "enclave_common: simplfy permissions change" This reverts commit ec544fea30538e51024aa588ce33babfc85ae3ab. Signed-off-by: Haitao Huang --- psw/enclave_common/sgx_mm_ocalls.cpp | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 2deeb304f..80aaf0ead 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -396,6 +396,15 @@ uint32_t COMM_API enclave_modify( if (type_to != type_from) return ENCLAVE_INVALID_PARAMETER; + // type_to == type_from + // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R + // separate mprotect is needed to change pte.R to pte.NONE + if (prot_to == prot_from && prot_to == PROT_NONE) + { + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return error_driver2api(ret, errno); + } if (prot_to == prot_from) { @@ -412,8 +421,12 @@ uint32_t COMM_API enclave_modify( { return ENCLAVE_INVALID_PARAMETER; } - ret = mprotect((void *)addr, length, prot_to); - if (ret == -1) - return error_driver2api(ret, errno); + //EACCEPT needs at least pte.R, PROT_NONE case done above. + if (prot_to != PROT_NONE) + { + ret = mprotect((void *)addr, length, prot_to); + if (ret == -1) + return error_driver2api(ret, errno); + } return ret; } From fc6f8f67d98b1ee910bce213f3eb2aeb0cc6eca7 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Wed, 17 Aug 2022 09:10:43 -0700 Subject: [PATCH 35/96] sgx-emm/api_tests: add a case to change permissions to PT_NONE link: https://github.com/intel/sgx-emm/issues/1 Signed-off-by: Haitao Huang --- external/sgx-emm/api_tests/Enclave/Enclave.cpp | 5 +++-- external/sgx-emm/emm_src | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/external/sgx-emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp index 28a52b10f..4b3b7e0b9 100644 --- a/external/sgx-emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -328,8 +328,9 @@ int test_sgx_mm_permissions() EXPECT_NEQ (pd.pf.pfec.errcd, 0); //WRITE suceess with PF EXPECT_EQ (pd.pf.pfec.rw, 1); //WRITE indicated in PFEC - memset((void*) &pd, 0, sizeof(pd)); - pd.access = SGX_EMA_PROT_READ|SGX_EMA_PROT_EXEC; + // permissions reduction + ret = sgx_mm_modify_permissions(addr + ALLOC_SIZE/2, ALLOC_SIZE/2, SGX_EMA_PROT_NONE); + EXPECT_EQ(ret, 0); //no longer used, ready to be released by any thread //we could dealloc here but to make it more interesting... diff --git a/external/sgx-emm/emm_src b/external/sgx-emm/emm_src index 8d4cb8c69..c39e89265 160000 --- a/external/sgx-emm/emm_src +++ b/external/sgx-emm/emm_src @@ -1 +1 @@ -Subproject commit 8d4cb8c6942b63618eedac44e25e2f319e08ac38 +Subproject commit c39e89265a3e2e608f7fb2bb10f720a5e573ca54 From 3cdca125bde7c663b7704c0275cfcc88e209692d Mon Sep 17 00:00:00 2001 From: xxu36 Date: Mon, 6 Jun 2022 21:59:50 +0800 Subject: [PATCH 36/96] EDMM support with configurable User Region Size Signed-off-by: xxu36 --- .../SampleEnclave/Enclave/Enclave.config.xml | 5 +-- common/inc/internal/metadata.h | 2 + psw/urts/create_param.h | 2 + sdk/sign_tool/SignTool/manage_metadata.cpp | 29 ++++++++++++ sdk/sign_tool/SignTool/manage_metadata.h | 3 +- sdk/sign_tool/SignTool/sign_tool.cpp | 3 +- sdk/sign_tool/SignTool/util_st.h | 1 + sdk/trts/ema_init.cpp | 32 +++++++++----- sdk/trts/init_enclave.cpp | 44 ++++++++++++------- 9 files changed, 90 insertions(+), 31 deletions(-) diff --git a/SampleCode/SampleEnclave/Enclave/Enclave.config.xml b/SampleCode/SampleEnclave/Enclave/Enclave.config.xml index 4a7edb0c7..83ec0679f 100644 --- a/SampleCode/SampleEnclave/Enclave/Enclave.config.xml +++ b/SampleCode/SampleEnclave/Enclave/Enclave.config.xml @@ -13,9 +13,8 @@ --> 0x4000 0x2000 - 0x900000 - 0x90000 - 0x0022000 + 0x5000 + 0x50000 0 diff --git a/common/inc/internal/metadata.h b/common/inc/internal/metadata.h index ec121a730..70d3c5d10 100644 --- a/common/inc/internal/metadata.h +++ b/common/inc/internal/metadata.h @@ -84,6 +84,7 @@ #define HEAP_SIZE_MAX 0x1000000 /* 16 MB */ #define RSRV_SIZE_MIN 0x0000000 /* 0 KB */ #define RSRV_SIZE_MAX 0x0000000 /* 0 KB */ +#define USER_REGION_SIZE 0x0000000 /* 0 KB */ #define DEFAULT_MISC_SELECT 0 #define DEFAULT_MISC_MASK 0xFFFFFFFF #define ISVFAMILYID_MAX 0xFFFFFFFFFFFFFFFFULL @@ -128,6 +129,7 @@ typedef enum #define LAYOUT_ID_RSRV_MIN (20) #define LAYOUT_ID_RSRV_INIT (21) #define LAYOUT_ID_RSRV_MAX (22) +#define LAYOUT_ID_USER_REGION (23) extern const char * layout_id_str[]; diff --git a/psw/urts/create_param.h b/psw/urts/create_param.h index 6300d7dad..235427e66 100644 --- a/psw/urts/create_param.h +++ b/psw/urts/create_param.h @@ -51,6 +51,8 @@ typedef struct _create_param_t uint64_t rsrv_init_size; uint64_t rsrv_offset; uint64_t rsrv_executable; + uint64_t user_region_offset; + uint64_t user_region_size; uint64_t first_ssa_gpr; uint64_t td_addr; uint64_t tls_addr; diff --git a/sdk/sign_tool/SignTool/manage_metadata.cpp b/sdk/sign_tool/SignTool/manage_metadata.cpp index e75a18fe2..e9464134c 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.cpp +++ b/sdk/sign_tool/SignTool/manage_metadata.cpp @@ -489,6 +489,12 @@ bool CMetadata::check_xml_parameter(const xml_parameter_t *parameter) } } + if ((parameter[USERREGIONSIZE].value % ALIGN_SIZE)) + { + se_trace(SE_TRACE_ERROR, SET_USER_REGION_SIZE_ALIGN_ERROR); + return false; + } + // LE setting: HW != 0, Licensekey = 1 // Other enclave setting: HW = 0, Licensekey = 0 if((parameter[HW].value == 0 && parameter[LAUNCHKEY].value != 0) || @@ -547,6 +553,7 @@ bool CMetadata::check_xml_parameter(const xml_parameter_t *parameter) m_create_param.rsrv_min_size = parameter[RSRVMINSIZE].value; m_create_param.rsrv_max_size = parameter[RSRVMAXSIZE].value; m_create_param.rsrv_executable = parameter[RSRVEXECUTABLE].flag ? parameter[RSRVEXECUTABLE].value : 0; + m_create_param.user_region_size = parameter[USERREGIONSIZE].value; m_create_param.stack_max_size = parameter[STACKMAXSIZE].value; m_create_param.stack_min_size = parameter[STACKMINSIZE].value; m_create_param.tcs_num = (uint32_t)parameter[TCSNUM].value; @@ -558,6 +565,7 @@ bool CMetadata::check_xml_parameter(const xml_parameter_t *parameter) SE_TRACE_DEBUG("RSRV_MIN_SIZE = 0x%016llX\n", m_create_param.rsrv_min_size); SE_TRACE_DEBUG("RSRV_INIT_SIZE = 0x%016llX\n", m_create_param.rsrv_init_size); SE_TRACE_DEBUG("RSRV_MAX_SIZE = 0x%016llX\n", m_create_param.rsrv_max_size); + SE_TRACE_DEBUG("USER_REGION_SIZE = 0x%016llX\n", m_create_param.user_region_size); return true; } @@ -998,6 +1006,17 @@ bool CMetadata::build_layout_table() } } + // USER_REGION + if (m_create_param.user_region_size > 0) + { + memset(&layout, 0, sizeof(layout)); + layout.entry.id = LAYOUT_ID_USER_REGION; + layout.entry.page_count = (uint32_t)(m_create_param.user_region_size >> SE_PAGE_SHIFT); + layout.entry.attributes = PAGE_ATTR_POST_ADD; + layout.entry.si_flags = SI_FLAGS_RW; + m_layouts.push_back(layout); + } + // update layout entries if(false == update_layout_entries()) { @@ -1219,6 +1238,16 @@ bool CMetadata::build_gd_template(uint8_t *data, uint32_t *data_size) m_create_param.rsrv_offset = (size_t)layout_rsrv->rva; } + layout_entry_t * layout_user = get_entry_by_id(LAYOUT_ID_USER_REGION, false); + if (NULL == layout_user) + { + m_create_param.user_region_offset = (size_t)0; + } + else + { + m_create_param.user_region_offset = (size_t)layout_user->rva; + } + size_t tmp_tls_addr = (size_t)(get_entry_by_id(LAYOUT_ID_TD)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva); m_create_param.td_addr = tmp_tls_addr + (size_t)((get_entry_by_id(LAYOUT_ID_TD)->page_count - 1) << SE_PAGE_SHIFT); diff --git a/sdk/sign_tool/SignTool/manage_metadata.h b/sdk/sign_tool/SignTool/manage_metadata.h index 856e6abc8..9c60e5a4b 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.h +++ b/sdk/sign_tool/SignTool/manage_metadata.h @@ -83,7 +83,8 @@ typedef enum _para_type_t ENCLAVEIMAGEADDRESS, ELRANGESTARTADDRESS, ELRANGESIZE, - PKRU + PKRU, + USERREGIONSIZE } para_type_t; typedef struct _xml_parameter_t diff --git a/sdk/sign_tool/SignTool/sign_tool.cpp b/sdk/sign_tool/SignTool/sign_tool.cpp index ea9f5317f..7f71de031 100644 --- a/sdk/sign_tool/SignTool/sign_tool.cpp +++ b/sdk/sign_tool/SignTool/sign_tool.cpp @@ -1306,7 +1306,8 @@ int main(int argc, char* argv[]) {"EnclaveImageAddress", 0xFFFFFFFFFFFFFFFF, 0x1000, 0, 0}, {"ELRangeStartAddress", 0xFFFFFFFFFFFFFFFF, 0, 0, 0}, {"ELRangeSize", 0xFFFFFFFFFFFFFFFF, 0x1000, 0, 0}, - {"PKRU", FEATURE_LOADER_SELECTS, FEATURE_MUST_BE_DISABLED, FEATURE_MUST_BE_DISABLED, 0}}; + {"PKRU", FEATURE_LOADER_SELECTS, FEATURE_MUST_BE_DISABLED, FEATURE_MUST_BE_DISABLED, 0}, + {"UserRegionSize", ENCLAVE_MAX_SIZE_64/2, 0, USER_REGION_SIZE, 0}}; const char *path[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}; uint8_t enclave_hash[SGX_HASH_SIZE] = {0}; uint8_t metadata_raw[METADATA_SIZE]; diff --git a/sdk/sign_tool/SignTool/util_st.h b/sdk/sign_tool/SignTool/util_st.h index 8dbe1db28..98b5c1f41 100644 --- a/sdk/sign_tool/SignTool/util_st.h +++ b/sdk/sign_tool/SignTool/util_st.h @@ -140,6 +140,7 @@ #define SET_RSRV_SIZE_INIT_MIN_ERROR "Reserved memory size setting is not correct: min value should not be larger than init value.\n" #define SET_RSRV_SIZE_MAX_MIN_ERROR "Reserved memory size setting is not correct: max value should not be smaller than min value.\n" #define SET_RSRV_EXECUTABLE_ERROR "Reserved memory executable setting is not correct: the executable value should be set to 1 or 0.\n" +#define SET_USER_REGION_SIZE_ALIGN_ERROR "User region size setting is not correct: size is not page aligned.\n" #define SET_HW_LE_ERROR "Conflicting setting between the 'HW' and 'LaunchKey'.\n" #define SET_TCS_MAX_NUM_ERROR "Maximum number of TCS is not correct.\n" #define SET_TCS_MIN_POOL_ERROR "Minimum number of TCS Pool is not correct.\n" diff --git a/sdk/trts/ema_init.cpp b/sdk/trts/ema_init.cpp index 78033d68f..d4bdfc1b6 100644 --- a/sdk/trts/ema_init.cpp +++ b/sdk/trts/ema_init.cpp @@ -64,18 +64,11 @@ static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) assert(IS_PAGE_ALIGNED(rva)); size_t addr = (size_t)get_enclave_base() + rva; - size_t size = ((size_t)entry->page_count) << SE_PAGE_SHIFT; - size_t enclave_end = (size_t)get_enclave_base() + get_enclave_size(); + size_t size = entry->page_count << SE_PAGE_SHIFT; // entry is guard page or has EREMOVE, build a reserved ema if ((entry->si_flags == 0) || - (entry->attributes & PAGE_ATTR_EREMOVE)) {//TODO:is EREMOVE EVER used for sgx2? - /*********************** - Intel SDK specific. Last guard page area fills up remaining enclave space - we cut off to leave space for user. - ************************/ - if((addr + size) == enclave_end && size > 0x10000ULL) - size = 0x10000; + (entry->attributes & PAGE_ATTR_EREMOVE)) { int ret = mm_init_ema((void*)addr, size, SGX_EMA_RESERVE | SGX_EMA_SYSTEM, @@ -144,7 +137,7 @@ static int build_rts_context_nodes(layout_entry_t *entry, uint64_t offset) return SGX_SUCCESS; } -extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta) +static int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta) { int ret = SGX_ERROR_UNEXPECTED; @@ -171,3 +164,22 @@ extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t d return SGX_SUCCESS; } +extern "C" void init_rts_ema_root(size_t, size_t); +extern "C" int init_segment_emas(void* enclave_base); + +extern "C" int init_rts_emas(size_t rts_base, size_t rts_end, + layout_t *layout_start, layout_t *layout_end) +{ + int ret = SGX_ERROR_UNEXPECTED; + + init_rts_ema_root(rts_base, rts_end); + + ret = init_segment_emas((void *)rts_base); + if (SGX_SUCCESS != ret) { + return ret; + } + + ret = init_rts_contexts_emas(layout_start, layout_end, 0); + return ret; +} + diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 45636628e..7276edcbf 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -77,8 +77,7 @@ extern sgx_status_t pcl_entry(void* enclave_base,void* ms) __attribute__((weak)) extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section(".nipx"))); extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); -extern "C" int init_segment_emas(void* enclave_base); -extern "C" int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta); +extern "C" int init_rts_emas(size_t rts_base, size_t rts_end, layout_t *start, layout_t *end); extern "C" void sgx_mm_init(size_t, size_t); // init_enclave() // Initialize enclave. @@ -269,21 +268,34 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) g_enclave_state = ENCLAVE_INIT_DONE; if (EDMM_supported) { - //!TODO take user base and size from config - layout_t* last_layout = (layout_t*)(g_global_data.layout_table + g_global_data.layout_entry_num - 1); - if(IS_GROUP_ID(last_layout->group.id)) return SGX_ERROR_UNEXPECTED; - layout_entry_t *last_entry = &last_layout->entry; - size_t user_base = last_entry->rva + g_enclave_base; - size_t user_end = user_base + (((size_t)last_entry->page_count) << SE_PAGE_SHIFT); - assert(last_entry->si_flags == 0 && user_end == g_enclave_size + g_enclave_base); //last guard pages - user_base += 0x10000ULL; //reserve guard page, same number used in ema_init.c - if(user_base>=user_end) - return SGX_ERROR_UNEXPECTED; + size_t rts_base = g_enclave_base; + size_t rts_end = g_enclave_base + g_enclave_size; + size_t user_base = 0; + size_t user_end = 0; + + layout_t *layout_start = (layout_t*)g_global_data.layout_table; + layout_t *layout_end = (layout_t*)(g_global_data.layout_table + g_global_data.layout_entry_num); + + // find potential user_region layout + layout_t *layout = layout_start; + for (;layout < layout_end; layout++) + if (layout->entry.id == LAYOUT_ID_USER_REGION) + break; + + // there exists user_region layout + if (layout != layout_end) + { + user_base = g_enclave_base + layout->entry.rva; + user_end = user_base + (((size_t)layout->entry.page_count) << SE_PAGE_SHIFT); + if(user_base > user_end) + return SGX_ERROR_UNEXPECTED; + + rts_end = user_base; + } + sgx_mm_init(user_base, user_end); - void* enclave_start = (void*)&__ImageBase; - if (init_segment_emas(enclave_start)) - return SGX_ERROR_UNEXPECTED; - int ret = init_rts_contexts_emas((layout_t*)g_global_data.layout_table, last_layout, 0); + + int ret = init_rts_emas(rts_base, rts_end, layout_start, layout); if (ret != SGX_SUCCESS) { return SGX_ERROR_UNEXPECTED; } From 1cbcb2d99d5ca77539f52b2ad90be84c6d4956fb Mon Sep 17 00:00:00 2001 From: xxu36 Date: Sun, 3 Jul 2022 12:31:38 +0800 Subject: [PATCH 37/96] Added signtool checking for EDMM related configurations --- sdk/sign_tool/SignTool/manage_metadata.cpp | 103 ++++++++++++++++++++- sdk/sign_tool/SignTool/manage_metadata.h | 6 ++ 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/sdk/sign_tool/SignTool/manage_metadata.cpp b/sdk/sign_tool/SignTool/manage_metadata.cpp index e9464134c..9a28daffb 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.cpp +++ b/sdk/sign_tool/SignTool/manage_metadata.cpp @@ -401,7 +401,8 @@ bool CMetadata::modify_metadata(const xml_parameter_t *parameter) //set bits that have been set '1' and need to be checked m_metadata->attributes.xfrm |= (m_metadata->enclave_css.body.attributes.xfrm & m_metadata->enclave_css.body.attribute_mask.xfrm); - return true; + bool ret = warn_config(); + return ret; } bool CMetadata::check_xml_parameter(const xml_parameter_t *parameter) @@ -1356,6 +1357,106 @@ uint64_t CMetadata::calculate_enclave_size(uint64_t size) return round_size; } +bool CMetadata::rts_dynamic() +{ + bool no_dynamic_heap = + ((m_create_param.heap_init_size == m_create_param.heap_min_size) && + (m_create_param.heap_init_size == m_create_param.heap_max_size)); + + bool no_dynamic_stack = + (m_create_param.stack_max_size == m_create_param.stack_min_size); + + bool no_dynamic_rsrv = + ((m_create_param.rsrv_init_size == m_create_param.rsrv_min_size) && + (m_create_param.rsrv_init_size == m_create_param.rsrv_max_size)); + + + uint32_t tcs_min_pool = 0; + if(m_create_param.tcs_min_pool > m_create_param.tcs_num - 1) + { + tcs_min_pool = m_create_param.tcs_num - 1; + } + else + { + tcs_min_pool = m_create_param.tcs_min_pool; + } + + bool no_dynamic_thread = (m_create_param.tcs_max_num == tcs_min_pool + 1); + + bool no_rts_dynamic = (no_dynamic_heap && no_dynamic_stack && + no_dynamic_rsrv && no_dynamic_thread); + + return !no_rts_dynamic; +} + +bool CMetadata::user_dynamic() +{ + return (m_create_param.user_region_size > 0); +} + +sgx_misc_select_t CMetadata::get_config_misc_select() +{ + return m_metadata->enclave_css.body.misc_select; +} + +sgx_misc_select_t CMetadata::get_config_misc_mask() +{ + return m_metadata->enclave_css.body.misc_mask; +} + +bool CMetadata::warn_config() +{ + uint32_t misc_select_0 = (uint32_t)get_config_misc_select() & 1u; + uint32_t misc_mask_0 = (uint32_t)get_config_misc_mask() & 1u; + + bool has_rts_dynamic = rts_dynamic(); + bool has_user_dynamic = user_dynamic(); + + // user region configured, either mask or select, or both are zero + if (has_user_dynamic) + { + if ((misc_mask_0 && misc_select_0) == 0) + { + se_trace(SE_TRACE_ERROR, "ERROR: Enclave configuration 'UserRegionSize' requires MiscSelect[0] and MiscMask[0] set to 1.\n"); + return false; + } + else + { + se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'UserRegionSize' requires the enclave to be run on SGX2 platform.\n"); + return true; + } + } + + if (has_rts_dynamic) + { + if (misc_select_0 == 0) + { + se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from using dynamic features. To use the dynamic features on SGX2 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n"); + return true; + } + + if (misc_mask_0 == 1) + { + se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform. To make it run on SGX1 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n"); + return true; + } + + se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will work on SGX1 and SGX2 platforms with respective metadata.\n"); + return true; + } + + if (misc_select_0 == 1) + { + se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform.\n"); + return true; + } + else + { + se_trace(SE_TRACE_ERROR, "INFO: SGX1 only enclave, which will run on all platforms.\n"); + return true; + } +} + bool update_metadata(const char *path, const metadata_t *metadata, uint64_t meta_offset) { assert(path != NULL && metadata != NULL); diff --git a/sdk/sign_tool/SignTool/manage_metadata.h b/sdk/sign_tool/SignTool/manage_metadata.h index 9c60e5a4b..a376edf40 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.h +++ b/sdk/sign_tool/SignTool/manage_metadata.h @@ -107,6 +107,10 @@ class CMetadata: private Uncopyable CMetadata(metadata_t *metadata, BinParser *parser); ~CMetadata(); bool build_metadata(const xml_parameter_t *parameter); + bool rts_dynamic(); + bool user_dynamic(); + sgx_misc_select_t get_config_misc_select(); + sgx_misc_select_t get_config_misc_mask(); private: bool get_time(uint32_t *date); bool modify_metadata(const xml_parameter_t *parameter); @@ -129,6 +133,8 @@ class CMetadata: private Uncopyable void* get_rawdata_by_rva(uint64_t rva); bool vaildate_elrange_config(); bool build_elrange_config_entry(); + uint64_t calculate_heap_overhead(); + bool warn_config(); metadata_t *m_metadata; BinParser *m_parser; From 6b68f96845639b6b354ef9d20b5f6fc89a50295d Mon Sep 17 00:00:00 2001 From: xxu36 Date: Sun, 10 Jul 2022 14:41:25 +0800 Subject: [PATCH 38/96] Support for compatibility 1. Added signing tool check for potential compatibility issues. 2. Determine which metadatas to insert in the enclave. 3. Calculate the potential memory overhead for bookkeeping the RTS (heap/rsrv/threadcontexts) Signed-off-by: xxu36 --- common/inc/internal/bit_array_imp.h | 1 + common/inc/internal/ema_imp.h | 1 + sdk/sign_tool/SignTool/manage_metadata.cpp | 186 +++++++++++++++++++-- sdk/sign_tool/SignTool/manage_metadata.h | 6 +- sdk/sign_tool/SignTool/sign_tool.cpp | 79 ++++++--- 5 files changed, 240 insertions(+), 33 deletions(-) create mode 120000 common/inc/internal/bit_array_imp.h create mode 120000 common/inc/internal/ema_imp.h diff --git a/common/inc/internal/bit_array_imp.h b/common/inc/internal/bit_array_imp.h new file mode 120000 index 000000000..c30a510f2 --- /dev/null +++ b/common/inc/internal/bit_array_imp.h @@ -0,0 +1 @@ +../../../external/sgx-emm/emm_src/include/bit_array_imp.h \ No newline at end of file diff --git a/common/inc/internal/ema_imp.h b/common/inc/internal/ema_imp.h new file mode 120000 index 000000000..41b8b3d22 --- /dev/null +++ b/common/inc/internal/ema_imp.h @@ -0,0 +1 @@ +../../../external/sgx-emm/emm_src/include/ema_imp.h \ No newline at end of file diff --git a/sdk/sign_tool/SignTool/manage_metadata.cpp b/sdk/sign_tool/SignTool/manage_metadata.cpp index 9a28daffb..10d850bb3 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.cpp +++ b/sdk/sign_tool/SignTool/manage_metadata.cpp @@ -48,6 +48,8 @@ #include "crypto_wrapper.h" #include "global_data.h" #include "se_version.h" +#include "ema_imp.h" +#include "bit_array_imp.h" #include #include @@ -401,7 +403,7 @@ bool CMetadata::modify_metadata(const xml_parameter_t *parameter) //set bits that have been set '1' and need to be checked m_metadata->attributes.xfrm |= (m_metadata->enclave_css.body.attributes.xfrm & m_metadata->enclave_css.body.attribute_mask.xfrm); - bool ret = warn_config(); + bool ret = check_config(); return ret; } @@ -571,6 +573,147 @@ bool CMetadata::check_xml_parameter(const xml_parameter_t *parameter) return true; } +uint64_t CMetadata::calculate_rts_bk_overhead() +{ + uint64_t ema_overhead = sizeof(struct ema_t_); + uint64_t bit_array_overhead = sizeof(struct bit_array_); + + // MIN heap + uint32_t page_count = (uint32_t)(m_create_param.heap_min_size >> SE_PAGE_SHIFT); + uint64_t heap_node_overhead = ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + + if(m_create_param.heap_init_size > m_create_param.heap_min_size) + { + // INIT heap + page_count = (uint32_t)((m_create_param.heap_init_size - m_create_param.heap_min_size) >> SE_PAGE_SHIFT); + heap_node_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + } + + if(m_create_param.heap_max_size > m_create_param.heap_init_size) + { + page_count = (uint32_t)((m_create_param.heap_max_size - m_create_param.heap_init_size) >> SE_PAGE_SHIFT); + heap_node_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + } + + page_count = (uint32_t)(m_create_param.rsrv_min_size >> SE_PAGE_SHIFT); + uint64_t rsrv_node_overhead = ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + + if(m_create_param.rsrv_init_size > m_create_param.rsrv_min_size) + { + // INIT RSRV + page_count = (uint32_t)((m_create_param.rsrv_init_size - m_create_param.rsrv_min_size) >> SE_PAGE_SHIFT); + rsrv_node_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + } + + if(m_create_param.rsrv_max_size > m_create_param.rsrv_init_size) + { + page_count = (uint32_t)((m_create_param.rsrv_max_size - m_create_param.rsrv_init_size) >> SE_PAGE_SHIFT); + rsrv_node_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + } + // guard page | stack | guard page | TCS | SSA | guard page | TLS + + // guard page + uint64_t non_removed_ctx_overhead = ema_overhead; + uint64_t removed_ctx_overhead = ema_overhead; + + // stack + page_count = (uint32_t)(m_create_param.stack_min_size >> SE_PAGE_SHIFT); + non_removed_ctx_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + removed_ctx_overhead += ema_overhead; + + if(m_create_param.stack_max_size > m_create_param.stack_min_size) + { + page_count = (uint32_t)((m_create_param.stack_max_size - m_create_param.stack_min_size) >> SE_PAGE_SHIFT); + non_removed_ctx_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + removed_ctx_overhead += ema_overhead; + } + + // guard page + non_removed_ctx_overhead += ema_overhead; + removed_ctx_overhead += ema_overhead; + + // tcs + page_count = TCS_SIZE >> SE_PAGE_SHIFT; + non_removed_ctx_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + removed_ctx_overhead += ema_overhead; + + // ssa + page_count = SSA_FRAME_SIZE * SSA_NUM; + non_removed_ctx_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + removed_ctx_overhead += ema_overhead; + + // guard page + non_removed_ctx_overhead += ema_overhead; + removed_ctx_overhead += ema_overhead; + + // td + page_count = 1; + const Section *section = m_parser->get_tls_section(); + if(section) + { + page_count += (uint32_t)(ROUND_TO_PAGE(section->virtual_size()) >> SE_PAGE_SHIFT); + } + non_removed_ctx_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(page_count, 8) >> 3); + removed_ctx_overhead += ema_overhead; + + uint32_t tcs_min_pool = 0; /* Number of static threads (EADD) */ + uint32_t tcs_eremove = 0; + if(m_create_param.tcs_min_pool > m_create_param.tcs_num - 1) + { + tcs_min_pool = m_create_param.tcs_num - 1; + tcs_eremove = 0; + } + else + { + tcs_min_pool = m_create_param.tcs_min_pool; + tcs_eremove = m_create_param.tcs_num -1 - m_create_param.tcs_min_pool; + } + + // static thread contexts + uint64_t total_non_removed_ctx_overhead = non_removed_ctx_overhead; + if (tcs_min_pool > 0) + { + total_non_removed_ctx_overhead += non_removed_ctx_overhead; + + if (tcs_min_pool > 1) + { + total_non_removed_ctx_overhead += non_removed_ctx_overhead * (tcs_min_pool - 1); + } + } + + // eremoved thread contexts + uint64_t total_removed_ctx_overhead = removed_ctx_overhead; + if (tcs_eremove > 0) + { + total_removed_ctx_overhead += removed_ctx_overhead; + + if (tcs_eremove > 1) + { + total_removed_ctx_overhead += removed_ctx_overhead * (tcs_eremove - 1); + } + } + + // dynamic thread contexts + if (m_create_param.tcs_max_num > tcs_min_pool + 1) + { + total_non_removed_ctx_overhead += non_removed_ctx_overhead * (m_create_param.tcs_max_num - tcs_min_pool); + } + + // PT_LOAD segments + uint64_t total_sections_overhead = 0; + std::vector sections = m_parser->get_sections(); + for (auto s : sections) { + uint32_t p_count = (uint32_t)(ROUND_TO_PAGE(s->virtual_size()) >> SE_PAGE_SHIFT); + total_sections_overhead += ema_overhead + bit_array_overhead + (ROUND_TO(p_count, 8) >> 3); + } + + return heap_node_overhead + + rsrv_node_overhead + + total_non_removed_ctx_overhead + + total_removed_ctx_overhead + + total_sections_overhead; +} + void *CMetadata::alloc_buffer_from_metadata(uint32_t size) { void *addr = GET_PTR(void, m_metadata, m_metadata->size); @@ -1008,11 +1151,21 @@ bool CMetadata::build_layout_table() } // USER_REGION - if (m_create_param.user_region_size > 0) + uint8_t meta_versions = get_meta_versions(); + // SGX2 metadata required + if ((meta_versions & 2u) == 2u) { + uint64_t rts_bk_overhead = calculate_rts_bk_overhead(); + uint64_t user_region_size = ROUND_TO_PAGE(rts_bk_overhead); + se_trace(SE_TRACE_ERROR, "RTS bookkeeping overhead: 0x%016llX\n", user_region_size); + + if (m_create_param.user_region_size > 0) + { + user_region_size += m_create_param.user_region_size; + } memset(&layout, 0, sizeof(layout)); layout.entry.id = LAYOUT_ID_USER_REGION; - layout.entry.page_count = (uint32_t)(m_create_param.user_region_size >> SE_PAGE_SHIFT); + layout.entry.page_count = (uint32_t)(user_region_size >> SE_PAGE_SHIFT); layout.entry.attributes = PAGE_ATTR_POST_ADD; layout.entry.si_flags = SI_FLAGS_RW; m_layouts.push_back(layout); @@ -1404,7 +1557,7 @@ sgx_misc_select_t CMetadata::get_config_misc_mask() return m_metadata->enclave_css.body.misc_mask; } -bool CMetadata::warn_config() +bool CMetadata::check_config() { uint32_t misc_select_0 = (uint32_t)get_config_misc_select() & 1u; uint32_t misc_mask_0 = (uint32_t)get_config_misc_mask() & 1u; @@ -1417,12 +1570,15 @@ bool CMetadata::warn_config() { if ((misc_mask_0 && misc_select_0) == 0) { - se_trace(SE_TRACE_ERROR, "ERROR: Enclave configuration 'UserRegionSize' requires MiscSelect[0] and MiscMask[0] set to 1.\n"); + m_meta_verions = 0; + se_trace(SE_TRACE_ERROR, "\033[0;31mERROR: Enclave configuration 'UserRegionSize' requires MiscSelect[0] and MiscMask[0] set to 1.\n\033[0m"); return false; } else { - se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'UserRegionSize' requires the enclave to be run on SGX2 platform.\n"); + // SGX2 metadata only + m_meta_verions = 1u << 1; + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: Enclave configuration 'UserRegionSize' requires the enclave to be run on SGX2 platform.\n\033[0m"); return true; } } @@ -1431,28 +1587,36 @@ bool CMetadata::warn_config() { if (misc_select_0 == 0) { - se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from using dynamic features. To use the dynamic features on SGX2 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n"); + // SGX1 metadata only + m_meta_verions = 1u; + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from using dynamic features. To use the dynamic features on SGX2 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n\033[0m"); return true; } if (misc_mask_0 == 1) { - se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform. To make it run on SGX1 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n"); + // SGX2 metadata only + m_meta_verions = 1u << 1; + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform. To make it run on SGX1 platform, suggest to set MiscSelectMask[0]=0 and MiscSelect[0]=1.\n\033[0m"); return true; } - se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will work on SGX1 and SGX2 platforms with respective metadata.\n"); + // SGX1 and SGX2 metadata + m_meta_verions = (1u << 1) | 1u; + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will work on SGX1 and SGX2 platforms with respective metadata.\n\033[0m"); return true; } + // SGX1 metadata only + m_meta_verions = 1u; if (misc_select_0 == 1) { - se_trace(SE_TRACE_ERROR, "INFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform.\n"); + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: Enclave configuration 'MiscSelect' and 'MiscSelectMask' will prevent enclave from running on SGX1 platform.\n\033[0m"); return true; } else { - se_trace(SE_TRACE_ERROR, "INFO: SGX1 only enclave, which will run on all platforms.\n"); + se_trace(SE_TRACE_ERROR, "\033[0;32mINFO: SGX1 only enclave, which will run on all platforms.\n\033[0m"); return true; } } diff --git a/sdk/sign_tool/SignTool/manage_metadata.h b/sdk/sign_tool/SignTool/manage_metadata.h index a376edf40..df5bb4a02 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.h +++ b/sdk/sign_tool/SignTool/manage_metadata.h @@ -111,6 +111,7 @@ class CMetadata: private Uncopyable bool user_dynamic(); sgx_misc_select_t get_config_misc_select(); sgx_misc_select_t get_config_misc_mask(); + uint8_t get_meta_versions() { return m_meta_verions; } private: bool get_time(uint32_t *date); bool modify_metadata(const xml_parameter_t *parameter); @@ -133,8 +134,9 @@ class CMetadata: private Uncopyable void* get_rawdata_by_rva(uint64_t rva); bool vaildate_elrange_config(); bool build_elrange_config_entry(); - uint64_t calculate_heap_overhead(); - bool warn_config(); + uint64_t calculate_rts_bk_overhead(); + bool check_config(); + uint8_t m_meta_verions; metadata_t *m_metadata; BinParser *m_parser; diff --git a/sdk/sign_tool/SignTool/sign_tool.cpp b/sdk/sign_tool/SignTool/sign_tool.cpp index 7f71de031..d3a5d0d75 100644 --- a/sdk/sign_tool/SignTool/sign_tool.cpp +++ b/sdk/sign_tool/SignTool/sign_tool.cpp @@ -141,9 +141,9 @@ static bool get_enclave_info(BinParser *parser, bin_fmt_t *bf, uint64_t * meta_o // measure_enclave(): // 1. Get the enclave hash by loading enclave // 2. Get the enclave info - metadata offset and enclave file format -static bool measure_enclave(uint8_t *hash, const char *dllpath, const xml_parameter_t *parameter, uint32_t option_flag_bits, metadata_t *metadata, uint64_t *meta_offset) +static bool measure_enclave(uint8_t *hash, const char *dllpath, const xml_parameter_t *parameter, uint32_t option_flag_bits, metadata_t *metadata, uint64_t *meta_offset, uint8_t *meta_versions) { - assert(hash && dllpath && metadata && meta_offset); + assert(hash && dllpath && metadata && meta_offset && meta_versions); bool res = false; off_t file_size = 0; uint64_t quota = 0; @@ -189,6 +189,9 @@ static bool measure_enclave(uint8_t *hash, const char *dllpath, const xml_parame return false; } + // get the versions of metadata we need to output + *meta_versions = meta.get_meta_versions(); + // Collect enclave info if(get_enclave_info(parser.get(), &bin_fmt, meta_offset, false, ENABLE_RESIGN(option_flag_bits)) == false) { @@ -1066,30 +1069,66 @@ static bool append_compatible_metadata(metadata_t *compat_metadata, metadata_t * return true; } -static bool generate_compatible_metadata(metadata_t *metadata, const xml_parameter_t *parameter) +static bool handle_compatible_metadata(metadata_t *compat_metadata, metadata_t *metadata, bool append) +{ + if (append) { + se_trace(SE_TRACE_ERROR, "%s: Append metadata version 0x%lx\n", __FUNCTION__, compat_metadata->version); + return append_compatible_metadata(compat_metadata, metadata); + } else { + // overwrite + memset(metadata, 0, METADATA_SIZE); + if(memcpy_s(metadata, METADATA_SIZE, compat_metadata, compat_metadata->size)) + return false; + se_trace(SE_TRACE_ERROR, "%s: Overwrite with metadata version 0x%lx\n", __FUNCTION__, metadata->version); + return true; + } +} + +static bool generate_compatible_metadata(metadata_t *metadata, const xml_parameter_t *parameter, uint8_t meta_versions) { + if(meta_versions == 0) + { + se_trace(SE_TRACE_ERROR, "metadata version is invalid"); + return false; + } + + bool meta_sgx1_only = ((meta_versions & 3u) == 1u); + bool meta_sgx2_only = ((meta_versions & 3u) == 2u); + bool append = (meta_sgx1_only ? false : true); + metadata_t *metadata2 = (metadata_t *)malloc(metadata->size); if(!metadata2) { se_trace(SE_TRACE_ERROR, NO_MEMORY_ERROR); return false; } - SE_TRACE_DEBUG("\n"); - - // append 2_0 metadata - memcpy_s(metadata2, metadata->size, metadata, metadata->size); - //if elrange is set, we can remove this metadata - if(parameter[ELRANGESIZE].value == 0) - { - metadata2->version = META_DATA_MAKE_VERSION(SGX_2_0_MAJOR_VERSION,SGX_2_0_MINOR_VERSION); - if (!append_compatible_metadata(metadata2, metadata)) + + if (memcpy_s(metadata2, metadata->size, metadata, metadata->size)) { + se_trace(SE_TRACE_ERROR, "%s: Error memcpy_s failed\n", __FUNCTION__); + free(metadata2); + return false; + } + + if (!meta_sgx1_only) { + // append 2_0 metadata + // if elrange is set, we can remove this metadata + if(parameter[ELRANGESIZE].value == 0) { - free(metadata2); - return false; + metadata2->version = META_DATA_MAKE_VERSION(SGX_2_0_MAJOR_VERSION,SGX_2_0_MINOR_VERSION); + if (!append_compatible_metadata(metadata2, metadata)) + { + free(metadata2); + return false; + } } } + if (meta_sgx2_only) { + se_trace(SE_TRACE_ERROR, "%s: Only requires SGX2 metadata\n", __FUNCTION__); + return true; + } + // append 1_9 metadata if(parameter[ELRANGESIZE].value != 0) { @@ -1157,7 +1196,7 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet { se_trace(SE_TRACE_DEBUG, "%s: Utility thread TD is the last layout\n", __FUNCTION__); metadata_cleanup(metadata2, 0); - ret = append_compatible_metadata(metadata2, metadata); + ret = handle_compatible_metadata(metadata2, metadata, append); free(metadata2); return ret; } @@ -1176,7 +1215,7 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet max_rsrv_entry->entry.si_flags = SI_FLAG_NONE; max_rsrv_entry->entry.attributes &= (uint16_t)(~PAGE_ATTR_POST_ADD); } - ret = append_compatible_metadata(metadata2, metadata); + ret = handle_compatible_metadata(metadata2, metadata, append); free(metadata2); return ret; } @@ -1204,7 +1243,7 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet if (false == ret) goto end; } - ret = append_compatible_metadata(metadata2, metadata); + ret = handle_compatible_metadata(metadata2, metadata, append); if (false == ret) goto end; ret = dump_metadata_layout(metadata); @@ -1319,7 +1358,7 @@ int main(int argc, char* argv[]) uint32_t option_flag_bits = 0; RSA *rsa = NULL; memset(&metadata_raw, 0, sizeof(metadata_raw)); - + uint8_t meta_versions = 0; #if OPENSSL_VERSION_NUMBER < 0x10100000L OpenSSL_add_all_algorithms(); @@ -1370,7 +1409,7 @@ int main(int argc, char* argv[]) goto clear_return; } - if(measure_enclave(enclave_hash, path[OUTPUT], parameter, option_flag_bits, metadata, &meta_offset) == false) + if(measure_enclave(enclave_hash, path[OUTPUT], parameter, option_flag_bits, metadata, &meta_offset, &meta_versions) == false) { se_trace(SE_TRACE_ERROR, OVERALL_ERROR); goto clear_return; @@ -1389,7 +1428,7 @@ int main(int argc, char* argv[]) se_trace(SE_TRACE_ERROR, OVERALL_ERROR); goto clear_return; } - if(false == generate_compatible_metadata(metadata, parameter)) + if(false == generate_compatible_metadata(metadata, parameter, meta_versions)) { se_trace(SE_TRACE_ERROR, OVERALL_ERROR); goto clear_return; From a0b0ff3669a5117c4cf59ca0cf2f1f4aeb93fc46 Mon Sep 17 00:00:00 2001 From: xxu36 Date: Mon, 18 Jul 2022 12:53:22 +0800 Subject: [PATCH 39/96] Metadata and sdk version selection for compatibility 1. Updated the metadata version to 3.0 2. Selected metadata and sdk version so that an enclve with highest metadata version 2.x will downgrade to using metadata version 1.9 Signed-off-by: xxu36 --- common/inc/internal/metadata.h | 6 +++--- common/inc/internal/rts.h | 3 ++- psw/urts/enclave_creator_hw_com.cpp | 2 +- psw/urts/linux/enclave_creator_hw.cpp | 4 ++-- psw/urts/loader.cpp | 2 ++ psw/urts/urts_com.h | 12 ++++++------ sdk/sign_tool/SignTool/sign_tool.cpp | 12 +++++++----- sdk/trts/init_enclave.cpp | 2 +- 8 files changed, 24 insertions(+), 19 deletions(-) diff --git a/common/inc/internal/metadata.h b/common/inc/internal/metadata.h index 70d3c5d10..dac267a81 100644 --- a/common/inc/internal/metadata.h +++ b/common/inc/internal/metadata.h @@ -37,10 +37,10 @@ #pragma pack(1) /* version of metadata */ -#define MAJOR_VERSION 2 //MAJOR_VERSION should not larger than 0ffffffff -#define MINOR_VERSION 4 //MINOR_VERSION should not larger than 0ffffffff +#define MAJOR_VERSION 3 //MAJOR_VERSION should not larger than 0ffffffff +#define MINOR_VERSION 0 //MINOR_VERSION should not larger than 0ffffffff -#define SGX_2_ELRANGE_MAJOR_VERSION 12 +#define SGX_2_ELRANGE_MAJOR_VERSION 13 #define SGX_1_ELRANGE_MAJOR_VERSION 11 #define SGX_MAJOR_VERSION_GAP 10 diff --git a/common/inc/internal/rts.h b/common/inc/internal/rts.h index 308baaab2..612cd8454 100644 --- a/common/inc/internal/rts.h +++ b/common/inc/internal/rts.h @@ -65,7 +65,8 @@ typedef enum SDK_VERSION_2_0, SDK_VERSION_2_1, SDK_VERSION_2_2, - SDK_VERSION_2_3 + SDK_VERSION_2_3, + SDK_VERSION_3_0, } sdk_version_t; typedef struct _system_features diff --git a/psw/urts/enclave_creator_hw_com.cpp b/psw/urts/enclave_creator_hw_com.cpp index 534f7233b..9bfe23d0c 100644 --- a/psw/urts/enclave_creator_hw_com.cpp +++ b/psw/urts/enclave_creator_hw_com.cpp @@ -65,7 +65,7 @@ int EnclaveCreatorHW::initialize(sgx_enclave_id_t enclave_id) init_cpuinfo((uint32_t *)info.cpuinfo_table); info.system_feature_set[0] |= (1ULL << SYS_FEATURE_EXTEND); info.size = sizeof(system_features_t); - info.version = (sdk_version_t)MIN((uint32_t)SDK_VERSION_2_3, enclave->get_enclave_version()); + info.version = (sdk_version_t)MIN((uint32_t)SDK_VERSION_3_0, enclave->get_enclave_version()); info.sealed_key = enclave->get_sealed_key(); info.cpu_core_num = (uint32_t)sysconf(_SC_NPROCESSORS_ONLN); if (is_EDMM_supported(enclave_id)) diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index c3976009c..0cb049913 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -375,7 +375,7 @@ int EnclaveCreatorHW::trim_accept(uint64_t addr) //1. We operate in HW mode //2. CPU has EDMM support //3. Driver has EDMM support -//4. Both the uRTS version and enclave (metadata) version are higher than 1.5 +//4. SDK version >= 3.0 bool EnclaveCreatorHW::is_EDMM_supported(sgx_enclave_id_t enclave_id) { bool supported = false, driver_supported = false, cpu_edmm = false; @@ -388,7 +388,7 @@ bool EnclaveCreatorHW::is_EDMM_supported(sgx_enclave_id_t enclave_id) driver_supported = is_driver_compatible(); //return value of get_enclave_version() considers the version of uRTS and enclave metadata - supported = use_se_hw() && cpu_edmm && driver_supported && (enclave->get_enclave_version() >= SDK_VERSION_2_0); + supported = use_se_hw() && cpu_edmm && driver_supported && (enclave->get_enclave_version() >= SDK_VERSION_3_0); return supported; } diff --git a/psw/urts/loader.cpp b/psw/urts/loader.cpp index d92a93302..b80c7fa60 100644 --- a/psw/urts/loader.cpp +++ b/psw/urts/loader.cpp @@ -952,6 +952,7 @@ int CLoader::set_memory_protection() return SGX_ERROR_UNEXPECTED; } +#if 0 if ((META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION) <= m_metadata->version) && get_enclave_creator()->is_EDMM_supported(get_enclave_id())) { @@ -980,6 +981,7 @@ int CLoader::set_memory_protection() } } } +#endif //set memory protection for context ret = set_context_protection(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset), diff --git a/psw/urts/urts_com.h b/psw/urts/urts_com.h index 293e46e72..67ad10603 100644 --- a/psw/urts/urts_com.h +++ b/psw/urts/urts_com.h @@ -155,6 +155,11 @@ static bool check_metadata_version(uint64_t urts_version, uint64_t metadata_vers return false; } + if (MAJOR_VERSION_OF_METADATA(metadata_version)%SGX_MAJOR_VERSION_GAP == 2) + { + return false; + } + return true; } @@ -284,12 +289,7 @@ static int __create_enclave(BinParser &parser, if (MAJOR_VERSION_OF_METADATA(metadata->version) % SGX_MAJOR_VERSION_GAP == MAJOR_VERSION_OF_METADATA(urts_version)% SGX_MAJOR_VERSION_GAP && MINOR_VERSION_OF_METADATA(metadata->version) >= MINOR_VERSION_OF_METADATA(urts_version)) { - enclave_version = SDK_VERSION_2_3; - } - else if (MAJOR_VERSION_OF_METADATA(metadata->version) % SGX_MAJOR_VERSION_GAP == MAJOR_VERSION_OF_METADATA(urts_version)% SGX_MAJOR_VERSION_GAP && - MINOR_VERSION_OF_METADATA(metadata->version) < MINOR_VERSION_OF_METADATA(urts_version)) - { - enclave_version = SDK_VERSION_2_0; + enclave_version = SDK_VERSION_3_0; } // initialize the enclave object diff --git a/sdk/sign_tool/SignTool/sign_tool.cpp b/sdk/sign_tool/SignTool/sign_tool.cpp index d3a5d0d75..a6710ec1e 100644 --- a/sdk/sign_tool/SignTool/sign_tool.cpp +++ b/sdk/sign_tool/SignTool/sign_tool.cpp @@ -1096,6 +1096,11 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet bool meta_sgx2_only = ((meta_versions & 3u) == 2u); bool append = (meta_sgx1_only ? false : true); + if (meta_sgx2_only) { + se_trace(SE_TRACE_ERROR, "%s: Only requires SGX2 metadata\n", __FUNCTION__); + return true; + } + metadata_t *metadata2 = (metadata_t *)malloc(metadata->size); if(!metadata2) { @@ -1110,6 +1115,7 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet return false; } +#if 0 if (!meta_sgx1_only) { // append 2_0 metadata // if elrange is set, we can remove this metadata @@ -1123,11 +1129,7 @@ static bool generate_compatible_metadata(metadata_t *metadata, const xml_paramet } } } - - if (meta_sgx2_only) { - se_trace(SE_TRACE_ERROR, "%s: Only requires SGX2 metadata\n", __FUNCTION__); - return true; - } +#endif // append 1_9 metadata if(parameter[ELRANGESIZE].value != 0) diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 7276edcbf..917316844 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -162,7 +162,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) { EDMM_supported = 0; } - else if (g_sdk_version >= SDK_VERSION_2_0) + else if (g_sdk_version >= SDK_VERSION_3_0) { EDMM_supported = feature_supported((const uint64_t *)sys_features.system_feature_set, 0); } From 92a08a3fef0c6e8f26cddb478fcf3f9276699f56 Mon Sep 17 00:00:00 2001 From: xxu36 Date: Tue, 23 Aug 2022 13:37:04 +0800 Subject: [PATCH 40/96] Fix bookkeeping overhead Signed-off-by: xxu36 --- sdk/sign_tool/SignTool/manage_metadata.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/sign_tool/SignTool/manage_metadata.cpp b/sdk/sign_tool/SignTool/manage_metadata.cpp index 10d850bb3..e97d16c08 100644 --- a/sdk/sign_tool/SignTool/manage_metadata.cpp +++ b/sdk/sign_tool/SignTool/manage_metadata.cpp @@ -1155,7 +1155,7 @@ bool CMetadata::build_layout_table() // SGX2 metadata required if ((meta_versions & 2u) == 2u) { - uint64_t rts_bk_overhead = calculate_rts_bk_overhead(); + uint64_t rts_bk_overhead = calculate_rts_bk_overhead() + 0x20000; uint64_t user_region_size = ROUND_TO_PAGE(rts_bk_overhead); se_trace(SE_TRACE_ERROR, "RTS bookkeeping overhead: 0x%016llX\n", user_region_size); From 5a8e3f12b7e36d7354d9e05cdb38c24c28c0999f Mon Sep 17 00:00:00 2001 From: Haitao Huang <4699115+haitaohuang@users.noreply.github.com> Date: Mon, 18 Jul 2022 20:35:29 -0700 Subject: [PATCH 41/96] update API test to use UserRegionSize in config Signed-off-by: Haitao Huang --- external/sgx-emm/api_tests/Enclave/config.xml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/external/sgx-emm/api_tests/Enclave/config.xml b/external/sgx-emm/api_tests/Enclave/config.xml index 2dafb0872..f96ddcd47 100644 --- a/external/sgx-emm/api_tests/Enclave/config.xml +++ b/external/sgx-emm/api_tests/Enclave/config.xml @@ -11,9 +11,7 @@ 0xF0000000 0x9000 0x08000 - 0x00001000 - 0x00100000 - 0x90000000 + 0x90000000 From 4f4d1f18651596d62a43f1ae924229ff111803ea Mon Sep 17 00:00:00 2001 From: xxu36 Date: Fri, 5 Aug 2022 16:32:22 +0800 Subject: [PATCH 42/96] Update enclave common loader interface Signed-off-by: xxu36 --- .gitmodules | 1 + psw/enclave_common/sgx_enclave_common.h | 169 +++++++++-------- psw/enclave_common/sgx_mm_ocalls.cpp | 239 +++++++++++++----------- psw/urts/linux/enclave_creator_hw.cpp | 10 +- psw/urts/linux/urts_emm.cpp | 4 +- 5 files changed, 237 insertions(+), 186 deletions(-) diff --git a/.gitmodules b/.gitmodules index f00c461f3..a4e439761 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,3 +20,4 @@ [submodule "external/sgx-emm/emm_src"] path = external/sgx-emm/emm_src url = https://github.com/intel/sgx-emm + branch = dev diff --git a/psw/enclave_common/sgx_enclave_common.h b/psw/enclave_common/sgx_enclave_common.h index d799b7be3..32f4ef21d 100644 --- a/psw/enclave_common/sgx_enclave_common.h +++ b/psw/enclave_common/sgx_enclave_common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + * Copyright (C) 2011-2022 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,11 +35,23 @@ #include #include #include +#ifdef _MSC_VER +#include +#endif #ifdef __cplusplus extern "C" { #endif +#ifdef _MSC_VER +/* The following macros are MSVC only */ +#define COMM_API cdecl +#define COMM_IN _In_ +#define COMM_IN_OPT _In_opt_ +#define COMM_OUT _Out_ +#define COMM_OUT_OPT _Out_opt_ +#define COMM_IN_OUT _Inout_ +#else /* The following macros are for GCC only */ #define COMM_API #define COMM_IN @@ -47,9 +59,12 @@ extern "C" { #define COMM_OUT #define COMM_OUT_OPT #define COMM_IN_OUT +#endif #ifndef ENCLAVE_TYPE_SGX #define ENCLAVE_TYPE_SGX 0x00000001 /* An enclave for the Intel Software Guard Extensions (SGX) architecture version 1. */ +#endif +#ifndef ENCLAVE_TYPE_SGX2 #define ENCLAVE_TYPE_SGX2 0x00000002 /* An enclave for the Intel Software Guard Extensions (SGX) architecture version 2. */ #endif #define ENCLAVE_TYPE_SGX1 ENCLAVE_TYPE_SGX @@ -87,6 +102,7 @@ typedef enum { ENCLAVE_PAGE_EXECUTE = 1 << 2, /* Enables execute access to the committed region of pages. */ ENCLAVE_PAGE_THREAD_CONTROL = 1 << 8, /* The page contains a thread control structure. */ ENCLAVE_PAGE_REG = 2 << 8, /* The page contains a PT_REG page. */ + ENCLAVE_PAGE_TRIM = 4 << 8, /* The page is trimmed(PT_TRIM). This is for pages which will be trimmed (removed) from the enclave. */ ENCLAVE_PAGE_SS_FIRST = 5 << 8, /* The page contains the first page of a Shadow Stack (future). */ ENCLAVE_PAGE_SS_REST = 6 << 8, /* The page contains a non-first page of a Shadow Stack (future). */ ENCLAVE_PAGE_UNVALIDATED = 1 << 12, /* The page contents that you supply are excluded from measurement and content validation. */ @@ -106,7 +122,7 @@ typedef enum { ENCLAVE_EMA_GROWSUP = 32, /* Gives a hint to the kernel that the application will access pages below the last accessed page. The kernel may want to EAUG pages from lower to higher addresses with no gaps in addresses below the last committed page. */ -} enclave_alloc_flags; +} enclave_alloc_flags_t; typedef enum { ENCLAVE_LAUNCH_TOKEN = 0x1 @@ -124,7 +140,7 @@ typedef enum { #define _ENCLAVE_CREATE_EX_FEATURES_MASK_ (((uint32_t)-1) >> (ENCLAVE_CREATE_MAX_EX_FEATURES_COUNT - 1 - _ENCLAVE_CREATE_LAST_EX_FEATURE_IDX_)) -typedef struct enclave_elrange{ +typedef struct enclave_elrange { uint64_t enclave_image_address; uint64_t elrange_start_address; uint64_t elrange_size; @@ -267,61 +283,76 @@ bool COMM_API enclave_set_information( COMM_IN size_t input_info_size, COMM_OUT_OPT uint32_t* enclave_error); -/* +/* enclave_get_features() + * Parameters: + * None + * Return Value: + * Returns flags indicating enclave features which are supported on the platform. + */ +uint32_t COMM_API enclave_get_features(); + +/* enclave_alloc() * Call OS to reserve region for EAUG, immediately or on-demand. * - * @param[in] addr Desired page aligned start address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] page_properties Page types to be allocated, must be one of these: - * - ENCLAVE_PAGE_REG: regular page type. This is the default if not specified. - * - ENCLAVE_PAGE_SS_FIRST: the first page in shadow stack. - * - ENCLAVE_PAGE_SS_REST: the rest page in shadow stack. - * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing - * order, address preference, page type. The untrusted side. - * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and - * translate following additional bits to proper parameters invoking mmap or other SGX specific - * syscall(s) provided by the kernel. - * The alloc_flags param of this interface should include exactly one of following for committing mode: - * - ENCLAVE_EMA_COMMIT_NOW: reserves memory range with ENCLAVE_PAGE_READ|SGX_EMA_PROT_WRITE, if supported, - * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - ENCLAVE_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. + * Parameters + * targt_addr [in] - Desired page aligned start address. + * target_size [in] - Size of the region in bytes of multiples of page size. + * data_properties [in] - Page types to be allocated, must be one of these: + * - ENCLAVE_PAGE_REG: regular page type. This is the default if not specified. + * - ENCLAVE_PAGE_SS_FIRST: the first page in shadow stack. + * - ENCLAVE_PAGE_SS_REST: the rest page in shadow stack. + * alloc_flags [in] - A bitwise OR of flags describing committing mode, committing + * order, address preference, page type. The untrusted side. Implementation + * should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and + * translate following additional bits to proper parameters invoking mmap or + * other SGX specific syscall(s) provided by the kernel. The alloc_flags param + * of this interface should include exactly one of following for committing mode: + * - ENCLAVE_EMA_COMMIT_NOW: reserves memory range with ENCLAVE_PAGE_READ|SGX_EMA_PROT_WRITE, if supported, + * kernel is given a hint to EAUG EPC pages for the area as soon as possible. + * - ENCLAVE_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. * ORed with zero or one of the committing order flags: - * - ENCLAVE_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher - * to lower addresses, no gaps in addresses above the last committed. - * - ENCLAVE_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower - * to higher addresses, no gaps in addresses below the last committed. - * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. - * @retval ENCLAVE_NOT_SUPPORTED: Enavle feature is not supported by the system - * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) - * @retval ENCLAVE_INVALID_ADDRESS: the start address does not point to an enclave. - * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. - * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. - * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) - * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave - * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). - * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. - * @retval ENCLAVE_UNEXPECTED, unexpected error. + * - ENCLAVE_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher + * to lower addresses, no gaps in addresses above the last committed. + * - ENCLAVE_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower + * to higher addresses, no gaps in addresses below the last committed. + * enclave_error [out, optional] - An optional pointer to a variable that receives an enclave error code. + * + * Return Values: + * ENCLAVE_ERROR_SUCCESS(0): The operation was successful. + * ENCLAVE_NOT_SUPPORTED: Enavle feature is not supported by the system + * ENCLAVE_LOST: May be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * ENCLAVE_INVALID_ADDRESS: The start address does not point to an enclave. + * ENCLAVE_INVALID_PARAMETER: An invalid combination of flags was provided. + * ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * ENCLAVE_INVALID_ADDRESS: Address does not point to an enclave or valid memory within the enclave + * ENCLAVE_NOT_INITIALIZED: May be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * ENCLAVE_UNEXPECTED: Unexpected error. */ -uint32_t COMM_API enclave_alloc( - COMM_IN uint64_t addr, - COMM_IN size_t length, - COMM_IN uint32_t page_properties, - COMM_IN uint32_t alloc_flags); +uint32_t COMM_API enclave_alloc ( + COMM_IN void* target_addr, + COMM_IN size_t target_size, + COMM_IN uint32_t data_properties, + COMM_IN uint32_t alloc_flags, + COMM_OUT_OPT uint32_t* enclave_error +); -/* +/* enclave_modify() * Call OS to change permissions, type, or notify EACCEPT done after TRIM. * - * @param[in] addr Start address of the memory to change protections. - * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. + * Parameters: + * target_addr [in] - Start address of the memory to change protections. + * target_size [in] - Length of the area. This must be a multiple of the page size. + * from_data_properties [in] - The original EPCM flags of the EPC pages to be modified. * Must be bitwise OR of following: * ENCLAVE_PAGE_READ * ENCLAVE_PAGE_WRITE * ENCLAVE_PAGE_EXEC * ENCLAVE_PAGE_REG: regular page, changeable to TRIM or TCS * ENCLAVE_PAGE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: + * to_data_properties [in] - The target EPCM flags. This must be bitwise OR of following: * ENCLAVE_PAGE_READ * ENCLAVE_PAGE_WRITE * ENCLAVE_PAGE_EXEC @@ -329,45 +360,37 @@ uint32_t COMM_API enclave_alloc( * range for trimmed pages may still be reserved by enclave with * proper permissions. * ENCLAVE_PAGE_TCS: change the page type to PT_TCS - * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. - * @retval ENCLAVE_NOT_SUPPORTED: Enclave feature is not supported by the system - * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) - * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. - * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. - * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) - * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave - * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). - * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. - * @retval ENCLAVE_UNEXPECTED, unexpected error. + * + * Return Values: + * ENCLAVE_ERROR_SUCCESS(0): The operation was successful. + * ENCLAVE_NOT_SUPPORTED: Enclave feature is not supported by the system + * ENCLAVE_LOST: May be returned if the enclave has been removed or if it has not been initialized (via EINIT) + * ENCLAVE_INVALID_PARAMETER: An invalid combination of flags was provided. + * ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. + * ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) + * ENCLAVE_INVALID_ADDRESS: Address does not point to an enclave or valid memory within the enclave + * ENCLAVE_NOT_INITIALIZED: May be returned if the enclave has not been initialized (via EINIT). + * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. + * ENCLAVE_UNEXPECTED: Unexpected error. */ -uint32_t COMM_API enclave_modify( - COMM_IN uint64_t addr, - COMM_IN size_t length, - COMM_IN uint32_t page_properties_from, - COMM_IN uint32_t page_properties_to); - - - +uint32_t COMM_API enclave_modify ( + COMM_IN void* target_addr, + COMM_IN size_t target_size, + COMM_IN uint32_t from_data_properties, + COMM_IN uint32_t to_data_properties, + COMM_OUT_OPT uint32_t* enclave_error +); /** * The enclave features flags describe additional enclave features * which are supported by the platform. A value of 0 indicates not features are supported. */ -typedef enum -{ +typedef enum { ENCLAVE_FEATURE_NONE = 0, ENCLAVE_FEATURE_SGX1 = 0x00000001, /* The platform (HW and OS) supports SGX1 */ ENCLAVE_FEATURE_SGX2 = 0x00000002, /* The platform (HW and OS) supports SGX2 */ -}enclave_features; - -/* - * Get enclave features which are supported by the platform. - * @return an enclave_features enum indicating enclave features which are supported on the platform - * - */ -uint32_t COMM_API enclave_get_features(); - +}enclave_features_t; #ifdef __cplusplus } diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 80aaf0ead..835e2e467 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -6,83 +6,72 @@ using namespace std; #define PROT_MASK (PROT_READ|PROT_WRITE|PROT_EXEC) -/* - * Call OS to reserve region for EAUG, immediately or on-demand. - * - * @param[in] addr Desired page aligned start address. - * @param[in] length Size of the region in bytes of multiples of page size. - * @param[in] page_properties Page types to be allocated, must be one of these: - * - ENCLAVE_PAGE_REG: regular page type. This is the default if not specified. - * - ENCLAVE_PAGE_SS_FIRST: the first page in shadow stack. - * - ENCLAVE_PAGE_SS_REST: the rest page in shadow stack. - * @param[in] alloc_flags A bitwise OR of flags describing committing mode, committing - * order, address preference, page type. The untrusted side. - * implementation should always invoke mmap syscall with MAP_SHARED|MAP_FIXED_NOREPLACE, and - * translate following additional bits to proper parameters invoking mmap or other SGX specific - * syscall(s) provided by the kernel. - * The alloc_flags param of this interface should include exactly one of following for committing mode: - * - ENCLAVE_EMA_COMMIT_NOW: reserves memory range with ENCLAVE_PAGE_READ|SGX_EMA_PROT_WRITE, if supported, - * kernel is given a hint to EAUG EPC pages for the area as soon as possible. - * - ENCLAVE_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages can be EAUGed upon #PF. - * ORed with zero or one of the committing order flags: - * - ENCLAVE_EMA_GROWSDOWN: if supported, a hint given for the kernel to EAUG pages from higher - * to lower addresses, no gaps in addresses above the last committed. - * - ENCLAVE_EMA_GROWSUP: if supported, a hint given for the kernel to EAUG pages from lower - * to higher addresses, no gaps in addresses below the last committed. - * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. - * @retval ENCLAVE_NOT_SUPPORTED: feature is not supported by the system - * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) - * @retval ENCLAVE_INVALID_ADDRESS: the start address does not point to an enclave. - * @retval ENCLAVE_INVALID_PARAMETER: an invalid combinations of parameters. - * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. - * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) - * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave - * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). - * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. - * @retval ENCLAVE_UNEXPECTED, unexpected error. - */ - uint32_t COMM_API enclave_alloc( - COMM_IN uint64_t addr, - COMM_IN size_t length, - COMM_IN uint32_t page_properties, - COMM_IN uint32_t alloc_flags) + COMM_IN void* target_addr, + COMM_IN size_t target_size, + COMM_IN uint32_t data_properties, + COMM_IN uint32_t alloc_flags, + COMM_OUT_OPT uint32_t* enclave_error) { int ret = ENCLAVE_UNEXPECTED; SE_TRACE(SE_TRACE_DEBUG, - "enclave_alloc for 0x%llX ( %llX ) with alloc flags = 0x%lX\n", - addr, length, alloc_flags); + "enclave_alloc for %p ( %llX ) with alloc flags = 0x%lX\n", + target_addr, target_size, alloc_flags); if (s_driver_type == SGX_DRIVER_DCAP) { + if (enclave_error != NULL) + *enclave_error = ret; return ret; } if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { - return mprotect((void *)addr, length, PROT_WRITE | PROT_READ); + ret = mprotect(target_addr, target_size, PROT_WRITE | PROT_READ); + if ((ret != 0) && (enclave_error != NILL)) + { + *enclave_error = ENCLAVE_UNEXPECTED; + } + return ret; } - int enclave_fd = get_file_handle_from_address((void *)addr); + int enclave_fd = get_file_handle_from_address(target_addr); if (enclave_fd == -1) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_ADDRESS; return ENCLAVE_INVALID_ADDRESS; + } int map_flags = MAP_SHARED | MAP_FIXED; //!TODO: support COMMIT_NOW when kernel supports if (alloc_flags & ENCLAVE_EMA_COMMIT_NOW) { } //!TODO support CET - int type = page_properties; + int type = data_properties; if((type == ENCLAVE_PAGE_SS_FIRST) | (type == ENCLAVE_PAGE_SS_REST)) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_NOT_SUPPORTED; return ENCLAVE_NOT_SUPPORTED; - if((type == ENCLAVE_PAGE_SS_FIRST) && length > SE_PAGE_SIZE) + } + if((type == ENCLAVE_PAGE_SS_FIRST) && target_size > SE_PAGE_SIZE) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; - void *out = mmap((void *)addr, length, PROT_WRITE | PROT_READ, map_flags, enclave_fd, 0); + } + void *out = mmap(target_addr, target_size, PROT_WRITE | PROT_READ, map_flags, enclave_fd, 0); if (out == MAP_FAILED) { ret = errno; SE_TRACE(SE_TRACE_WARNING, "mmap failed, error = %d\n", ret); ret = error_driver2api(-1, ret); - }else + if (enclave_error != NULL) + *enclave_error = ret; + } + else + { ret = 0; + } return ret; } @@ -272,64 +261,41 @@ static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) return SGX_SUCCESS; } -/* - * Call OS to change permissions, type, or notify EACCEPT done after TRIM. - * - * @param[in] addr Start address of the memory to change protections. - * @param[in] length Length of the area. This must be a multiple of the page size. - * @param[in] page_properties_from The original EPCM flags of the EPC pages to be modified. - * Must be bitwise OR of following: - * ENCLAVE_PAGE_READ - * ENCLAVE_PAGE_WRITE - * ENCLAVE_PAGE_EXEC - * ENCLAVE_PAGE_REG: regular page, changeable to TRIM or TCS - * ENCLAVE_PAGE_TRIM: signal to the kernel EACCEPT is done for TRIM pages. - * @param[in] page_properties_to The target EPCM flags. This must be bitwise OR of following: - * ENCLAVE_PAGE_READ - * ENCLAVE_PAGE_WRITE - * ENCLAVE_PAGE_EXEC - * ENCLAVE_PAGE_TRIM: change the page type to PT_TRIM. Note the address - * range for trimmed pages may still be reserved by enclave with - * proper permissions. - * ENCLAVE_PAGE_TCS: change the page type to PT_TCS - * @retval ENCLAVE_ERROR_SUCCESS(0) The operation was successful. - * @retval ENCLAVE_NOT_SUPPORTED: SGX EDMM is not supported by the system - * @retval ENCLAVE_LOST: may be returned if the enclave has been removed or if it has not been initialized (via EINIT) - * @retval ENCLAVE_INVALID_PARAMETER: an invalid combination of flags was provided. - * @retval ENCLAVE_OUT_OF_MEMORY: No EPC left (some OSes like Linux), or system is out of memory for internal allocation by OS or this function. - * @retval ENCLAVE_DEVICE_NO_MEMORY: NO EPC left (some OSes like Windows) - * @retval ENCLAVE_INVALID_ADDRESS: address does not point to an enclave or valid memory within the enclave - * @retval ENCLAVE_NOT_INITIALIZED: may be returned if the enclave has not been initialized (via EINIT). - * Some configurations may give ENCLAVE_LOST if the enclave has not been initialized. - * @retval ENCLAVE_UNEXPECTED, unexpected error. - */ - uint32_t COMM_API enclave_modify( - COMM_IN uint64_t addr, - COMM_IN size_t length, - COMM_IN uint32_t page_properties_from, - COMM_IN uint32_t page_properties_to) + COMM_IN void* target_addr, + COMM_IN size_t target_size, + COMM_IN uint32_t from_data_properties, + COMM_IN uint32_t to_data_properties, + COMM_OUT_OPT uint32_t* enclave_error) { int ret = ENCLAVE_UNEXPECTED; SE_TRACE(SE_TRACE_DEBUG, - "enclave_modify for 0x%llX ( %llX ) from 0x%lX to %lX\n", - addr, length, page_properties_from, page_properties_to); + "enclave_modify for %p ( %llX ) from 0x%lX to %lX\n", + target_addr, target_size, from_data_properties, to_data_properties); if (s_driver_type == SGX_DRIVER_DCAP) { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_NOT_SUPPORTED; return ENCLAVE_NOT_SUPPORTED; } - uint64_t enclave_base = (uint64_t)get_enclave_base_address_from_address((void *)addr); + uint64_t enclave_base = (uint64_t)get_enclave_base_address_from_address(target_addr); if (enclave_base == 0) { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_ADDRESS; return ENCLAVE_INVALID_ADDRESS; } - if (length % SE_PAGE_SIZE != 0) + if (target_size % SE_PAGE_SIZE != 0) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; + } function _trim = trim; function _trim_accept = trim_accept; function _mktcs = mktcs; function _emodpr = emodpr; - int fd = get_file_handle_from_address((void *)addr); + int fd = get_file_handle_from_address(target_addr); if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { _trim = trim_legacy; @@ -338,35 +304,54 @@ uint32_t COMM_API enclave_modify( _emodpr = emodpr_legacy; fd = s_hdevice; } - if(fd == -1) return ENCLAVE_INVALID_ADDRESS; + if(fd == -1) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_ADDRESS; + return ENCLAVE_INVALID_ADDRESS; + } - int type_to = (page_properties_to & SGX_EMA_PAGE_TYPE_MASK); - int type_from = (page_properties_from & SGX_EMA_PAGE_TYPE_MASK); + int type_to = (to_data_properties & SGX_EMA_PAGE_TYPE_MASK); + int type_from = (from_data_properties & SGX_EMA_PAGE_TYPE_MASK); if (type_from == SGX_EMA_PAGE_TYPE_TRIM && type_to != SGX_EMA_PAGE_TYPE_TRIM) { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; } - int prot_to = (page_properties_to & PROT_MASK); - int prot_from = (page_properties_from & PROT_MASK); + int prot_to = (to_data_properties & PROT_MASK); + int prot_from = (from_data_properties & PROT_MASK); if ((prot_to != prot_from) && (type_to != type_from)) { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; } if ((type_from & type_to & SGX_EMA_PAGE_TYPE_TRIM)) { //user space can only do EACCEPT for PT_TRIM type - ret = _trim_accept(fd, addr, length); + ret = _trim_accept(fd, target_addr, target_size); if (ret) - return error_driver2api(-1, ret); + { + ret = error_driver2api(-1, ret); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } if (prot_to == PROT_NONE) { //EACCEPT done and notified. //if user wants to remove permissions, //only mprotect is needed - ret = mprotect((void *)addr, length, prot_to); + ret = mprotect(target_addr, target_size, prot_to); if (ret == -1) - return error_driver2api(ret, errno); + { + ret = error_driver2api(ret, errno); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } } return ret; } @@ -375,26 +360,51 @@ uint32_t COMM_API enclave_modify( { assert(type_from != SGX_EMA_PAGE_TYPE_TRIM); if (prot_to != prot_from) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; - ret = _trim(fd, addr, length); + } + ret = _trim(fd, target_addr, target_size); if (ret) - return error_driver2api(-1, ret); + { + ret = error_driver2api(-1, ret); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } return 0; } if (type_to == SGX_EMA_PAGE_TYPE_TCS) { if (type_from != SGX_EMA_PAGE_TYPE_REG) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; + } if ((prot_from != (SGX_EMA_PROT_READ_WRITE)) && prot_to != prot_from) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; - ret = _mktcs(fd, addr, length); + } + ret = _mktcs(fd, target_addr, target_size); if (ret) - return error_driver2api(-1, ret); + { + ret = error_driver2api(-1, ret); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } return 0; } if (type_to != type_from) + { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; // type_to == type_from // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R @@ -403,7 +413,12 @@ uint32_t COMM_API enclave_modify( { ret = mprotect((void *)addr, length, prot_to); if (ret == -1) - return error_driver2api(ret, errno); + { + ret = error_driver2api(ret, errno); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } } if (prot_to == prot_from) @@ -413,12 +428,19 @@ uint32_t COMM_API enclave_modify( // Permissions changes. Only do emodpr for PT_REG pages if ((type_from & type_to & SGX_EMA_PAGE_TYPE_MASK) == SGX_EMA_PAGE_TYPE_REG) { - ret = _emodpr(fd, addr, length, prot_to); + ret = _emodpr(fd, target_addr, target_size, prot_to); if (ret) - return error_driver2api(-1, ret); + { + ret = error_driver2api(-1, ret); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } } else { + if (enclave_error != NULL) + *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; } //EACCEPT needs at least pte.R, PROT_NONE case done above. @@ -426,7 +448,12 @@ uint32_t COMM_API enclave_modify( { ret = mprotect((void *)addr, length, prot_to); if (ret == -1) - return error_driver2api(ret, errno); + { + ret = error_driver2api(ret, errno); + if (enclave_error != NULL) + *enclave_error = ret; + return ret; + } } return ret; } diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index 0cb049913..1c8a79bad 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -308,7 +308,7 @@ void EnclaveCreatorHW::close_device() int EnclaveCreatorHW::alloc(uint64_t addr, uint64_t size, int flag) { - int ret = enclave_alloc(addr, size, flag, SGX_EMA_COMMIT_ON_DEMAND); + int ret = enclave_alloc(addr, size, flag, SGX_EMA_COMMIT_ON_DEMAND, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_alloc failed %d\n", ret); @@ -322,7 +322,7 @@ int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) { int ret = enclave_modify(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC|SGX_EMA_PAGE_TYPE_REG, - (int) (flag|SGX_EMA_PAGE_TYPE_REG)); + (int) (flag|SGX_EMA_PAGE_TYPE_REG), NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_EMODPR failed %d\n", ret); @@ -334,7 +334,7 @@ int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) { - int ret = enclave_modify(tcs_addr, SE_PAGE_SIZE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_REG, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TCS); + int ret = enclave_modify(tcs_addr, SE_PAGE_SIZE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_REG, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TCS, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "MODIFY_TYPE failed %d\n", ret); @@ -347,7 +347,7 @@ int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) { - int ret= enclave_modify( fromaddr, toaddr - fromaddr, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TRIM); + int ret= enclave_modify( fromaddr, toaddr - fromaddr, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TRIM, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_TRIM failed %d\n", ret); @@ -361,7 +361,7 @@ int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) int EnclaveCreatorHW::trim_accept(uint64_t addr) { int ret = enclave_modify(addr, SE_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE - , SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE); + , SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE, NULL); if (ret) { diff --git a/psw/urts/linux/urts_emm.cpp b/psw/urts/linux/urts_emm.cpp index a2dab3b16..191ae2d07 100644 --- a/psw/urts/linux/urts_emm.cpp +++ b/psw/urts/linux/urts_emm.cpp @@ -52,7 +52,7 @@ extern "C" sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms) #ifdef SE_SIM ms->retval = mprotect((void*)ms->addr, ms->size, ms->page_properties|PROT_MASK); #else - ms->retval = enclave_alloc(ms->addr, ms->size,ms->page_properties, ms->alloc_flags); + ms->retval = enclave_alloc(ms->addr, ms->size,ms->page_properties, ms->alloc_flags, NULL); #endif return SGX_SUCCESS; } @@ -73,7 +73,7 @@ extern "C" sgx_status_t SGX_CDECL ocall_emm_modify(void* pms) #ifdef SE_SIM ms->retval = mprotect((void*)ms->addr, ms->size, ms->flags_to|PROT_MASK); #else - ms->retval = enclave_modify(ms->addr, ms->size, ms->flags_from, ms->flags_to); + ms->retval = enclave_modify(ms->addr, ms->size, ms->flags_from, ms->flags_to, NULL); #endif return SGX_SUCCESS; } From 1604f391bcaa65d05ad11dfeaa81991178afed55 Mon Sep 17 00:00:00 2001 From: xxu36 Date: Thu, 11 Aug 2022 16:30:52 +0800 Subject: [PATCH 43/96] Fix build error after adapting enclave common loader changes Signed-off-by: xxu36 --- psw/enclave_common/sgx_mm_ocalls.cpp | 38 +++++++++++++-------------- psw/urts/linux/enclave_creator_hw.cpp | 10 +++---- psw/urts/linux/urts_emm.cpp | 4 +-- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 835e2e467..3393d40b4 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -27,7 +27,7 @@ uint32_t COMM_API enclave_alloc( if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { ret = mprotect(target_addr, target_size, PROT_WRITE | PROT_READ); - if ((ret != 0) && (enclave_error != NILL)) + if ((ret != 0) && (enclave_error != NULL)) { *enclave_error = ENCLAVE_UNEXPECTED; } @@ -83,19 +83,19 @@ uint64_t get_offset_for_address(uint64_t target_address) return (uint64_t)target_address - (uint64_t)enclave_base_addr; } -static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) +static int emodt(int fd, void *addr, size_t length, uint64_t type) { struct sgx_enclave_modify_types ioc; if (length == 0) return EINVAL; SE_TRACE(SE_TRACE_DEBUG, - "MODT for 0x%llX ( %llX ), type: 0x%llX\n", + "MODT for %p ( %llX ), type: 0x%llX\n", addr, length, type); memset(&ioc, 0, sizeof(ioc)); ioc.page_type = type; - ioc.offset = get_offset_for_address(addr); + ioc.offset = get_offset_for_address((uint64_t)addr); ioc.length = length; do { @@ -105,7 +105,7 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) { //total failure int err = errno; SE_TRACE(SE_TRACE_WARNING, - "MODT failed, error = %d for 0x%llX ( %llX ), type: 0x%llX\n", + "MODT failed, error = %d for %p ( %llX ), type: 0x%llX\n", err, addr, length, type); return err; } @@ -119,16 +119,16 @@ static int emodt(int fd, uint64_t addr, size_t length, uint64_t type) return 0; } -static int trim(int fd, uint64_t addr, size_t length) +static int trim(int fd, void *addr, size_t length) { return emodt(fd, addr, length, (SGX_EMA_PAGE_TYPE_TRIM >> SGX_EMA_PAGE_TYPE_SHIFT)); } -static int mktcs(int fd, uint64_t addr, size_t length) +static int mktcs(int fd, void *addr, size_t length) { return emodt(fd, addr, length, (SGX_EMA_PAGE_TYPE_TCS >> SGX_EMA_PAGE_TYPE_SHIFT)); } -static int trim_accept(int fd, uint64_t addr, size_t length) +static int trim_accept(int fd, void *addr, size_t length) { struct sgx_enclave_remove_pages ioc; memset(&ioc, 0, sizeof(ioc)); @@ -136,7 +136,7 @@ static int trim_accept(int fd, uint64_t addr, size_t length) SE_TRACE(SE_TRACE_DEBUG, "REMOVE for 0x%llX ( %llX )\n", addr, length); - ioc.offset = get_offset_for_address(addr); + ioc.offset = get_offset_for_address((uint64_t)addr); ioc.length = length; int ret = 0; do { @@ -156,7 +156,7 @@ static int trim_accept(int fd, uint64_t addr, size_t length) return 0; } -static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) +static int emodpr(int fd, void *addr, size_t length, uint64_t prot) { struct sgx_enclave_restrict_permissions ioc; if (length == 0) @@ -168,7 +168,7 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) memset(&ioc, 0, sizeof(ioc)); ioc.permissions = prot; - ioc.offset = get_offset_for_address(addr); + ioc.offset = get_offset_for_address((uint64_t)addr); ioc.length = length; do @@ -194,7 +194,7 @@ static int emodpr(int fd, uint64_t addr, size_t length, uint64_t prot) // legacy support for EDMM -static int trim_accept_legacy(int fd, uint64_t addr, size_t len) +static int trim_accept_legacy(int fd, void *addr, size_t len) { sgx_range params; memset(¶ms, 0, sizeof(sgx_range)); @@ -211,7 +211,7 @@ static int trim_accept_legacy(int fd, uint64_t addr, size_t len) return SGX_SUCCESS; } -static int trim_legacy(int fd, uint64_t fromaddr, uint64_t len) +static int trim_legacy(int fd, void *fromaddr, uint64_t len) { sgx_range params; memset(¶ms, 0, sizeof(sgx_range)); @@ -227,7 +227,7 @@ static int trim_legacy(int fd, uint64_t fromaddr, uint64_t len) return SGX_SUCCESS; } -static int mktcs_legacy(int fd, uint64_t tcs_addr, size_t len) +static int mktcs_legacy(int fd, void *tcs_addr, size_t len) { if (len != SE_PAGE_SIZE) return EINVAL; @@ -244,7 +244,7 @@ static int mktcs_legacy(int fd, uint64_t tcs_addr, size_t len) return SGX_SUCCESS; } -static int emodpr_legacy(int fd, uint64_t addr, uint64_t size, uint64_t flag) +static int emodpr_legacy(int fd, void *addr, uint64_t size, uint64_t flag) { sgx_modification_param params; memset(¶ms, 0, sizeof(sgx_modification_param)); @@ -291,10 +291,10 @@ uint32_t COMM_API enclave_modify( *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; } - function _trim = trim; - function _trim_accept = trim_accept; - function _mktcs = mktcs; - function _emodpr = emodpr; + function _trim = trim; + function _trim_accept = trim_accept; + function _mktcs = mktcs; + function _emodpr = emodpr; int fd = get_file_handle_from_address(target_addr); if (s_driver_type == SGX_DRIVER_OUT_OF_TREE) { diff --git a/psw/urts/linux/enclave_creator_hw.cpp b/psw/urts/linux/enclave_creator_hw.cpp index 1c8a79bad..425a7d890 100644 --- a/psw/urts/linux/enclave_creator_hw.cpp +++ b/psw/urts/linux/enclave_creator_hw.cpp @@ -308,7 +308,7 @@ void EnclaveCreatorHW::close_device() int EnclaveCreatorHW::alloc(uint64_t addr, uint64_t size, int flag) { - int ret = enclave_alloc(addr, size, flag, SGX_EMA_COMMIT_ON_DEMAND, NULL); + int ret = enclave_alloc((void *)addr, size, flag, SGX_EMA_COMMIT_ON_DEMAND, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_alloc failed %d\n", ret); @@ -321,7 +321,7 @@ int EnclaveCreatorHW::alloc(uint64_t addr, uint64_t size, int flag) int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) { - int ret = enclave_modify(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC|SGX_EMA_PAGE_TYPE_REG, + int ret = enclave_modify((void *)addr, size, PROT_READ|PROT_WRITE|PROT_EXEC|SGX_EMA_PAGE_TYPE_REG, (int) (flag|SGX_EMA_PAGE_TYPE_REG), NULL); if (ret) { @@ -334,7 +334,7 @@ int EnclaveCreatorHW::emodpr(uint64_t addr, uint64_t size, uint64_t flag) int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) { - int ret = enclave_modify(tcs_addr, SE_PAGE_SIZE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_REG, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TCS, NULL); + int ret = enclave_modify((void *)tcs_addr, SE_PAGE_SIZE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_REG, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TCS, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "MODIFY_TYPE failed %d\n", ret); @@ -347,7 +347,7 @@ int EnclaveCreatorHW::mktcs(uint64_t tcs_addr) int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) { - int ret= enclave_modify( fromaddr, toaddr - fromaddr, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TRIM, NULL); + int ret= enclave_modify( (void *)fromaddr, toaddr - fromaddr, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE|SGX_EMA_PAGE_TYPE_TRIM, NULL); if (ret) { SE_TRACE(SE_TRACE_ERROR, "SGX_IOC_ENCLAVE_TRIM failed %d\n", ret); @@ -360,7 +360,7 @@ int EnclaveCreatorHW::trim_range(uint64_t fromaddr, uint64_t toaddr) int EnclaveCreatorHW::trim_accept(uint64_t addr) { - int ret = enclave_modify(addr, SE_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE + int ret = enclave_modify((void *)addr, SE_PAGE_SIZE, SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE , SGX_EMA_PAGE_TYPE_TRIM|PROT_READ|PROT_WRITE, NULL); if (ret) diff --git a/psw/urts/linux/urts_emm.cpp b/psw/urts/linux/urts_emm.cpp index 191ae2d07..94f124abb 100644 --- a/psw/urts/linux/urts_emm.cpp +++ b/psw/urts/linux/urts_emm.cpp @@ -52,7 +52,7 @@ extern "C" sgx_status_t SGX_CDECL ocall_emm_alloc(void* pms) #ifdef SE_SIM ms->retval = mprotect((void*)ms->addr, ms->size, ms->page_properties|PROT_MASK); #else - ms->retval = enclave_alloc(ms->addr, ms->size,ms->page_properties, ms->alloc_flags, NULL); + ms->retval = enclave_alloc((void *)ms->addr, ms->size,ms->page_properties, ms->alloc_flags, NULL); #endif return SGX_SUCCESS; } @@ -73,7 +73,7 @@ extern "C" sgx_status_t SGX_CDECL ocall_emm_modify(void* pms) #ifdef SE_SIM ms->retval = mprotect((void*)ms->addr, ms->size, ms->flags_to|PROT_MASK); #else - ms->retval = enclave_modify(ms->addr, ms->size, ms->flags_from, ms->flags_to, NULL); + ms->retval = enclave_modify((void *)ms->addr, ms->size, ms->flags_from, ms->flags_to, NULL); #endif return SGX_SUCCESS; } From 6ad7ab16b0c09e00efa9ad43f2f3c83b77264081 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Wed, 17 Aug 2022 18:30:42 -0700 Subject: [PATCH 44/96] update for sgx_mm_init returning error. Signed-off-by: Haitao Huang --- sdk/trts/init_enclave.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 917316844..7609b6752 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -78,7 +78,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); extern "C" int init_rts_emas(size_t rts_base, size_t rts_end, layout_t *start, layout_t *end); -extern "C" void sgx_mm_init(size_t, size_t); +extern "C" int sgx_mm_init(size_t, size_t); // init_enclave() // Initialize enclave. // Parameters: @@ -293,7 +293,8 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) rts_end = user_base; } - sgx_mm_init(user_base, user_end); + if (sgx_mm_init(user_base, user_end)) + return SGX_ERROR_UNEXPECTED; int ret = init_rts_emas(rts_base, rts_end, layout_start, layout); if (ret != SGX_SUCCESS) { From a3254d9a03ac6ae362da1766248820998338d26e Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Mon, 29 Aug 2022 08:32:21 -0700 Subject: [PATCH 45/96] sgx-emm: update commit to point to dev Signed-off-by: Haitao Huang --- external/sgx-emm/emm_src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/sgx-emm/emm_src b/external/sgx-emm/emm_src index c39e89265..b1c4226de 160000 --- a/external/sgx-emm/emm_src +++ b/external/sgx-emm/emm_src @@ -1 +1 @@ -Subproject commit c39e89265a3e2e608f7fb2bb10f720a5e573ca54 +Subproject commit b1c4226deb3241d48ca6587e829524d514dcdc7a From e2fce9716814194991a335ebead3f547efa689c7 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Mon, 29 Aug 2022 08:57:05 -0700 Subject: [PATCH 46/96] enclave_common: fix build error in sgx_mm_ocalls.cpp Signed-off-by: Haitao Huang --- psw/enclave_common/sgx_mm_ocalls.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/psw/enclave_common/sgx_mm_ocalls.cpp b/psw/enclave_common/sgx_mm_ocalls.cpp index 3393d40b4..7f29d51e3 100644 --- a/psw/enclave_common/sgx_mm_ocalls.cpp +++ b/psw/enclave_common/sgx_mm_ocalls.cpp @@ -41,11 +41,11 @@ uint32_t COMM_API enclave_alloc( return ENCLAVE_INVALID_ADDRESS; } int map_flags = MAP_SHARED | MAP_FIXED; - //!TODO: support COMMIT_NOW when kernel supports + //COMMIT_NOW not supported by kernel yet if (alloc_flags & ENCLAVE_EMA_COMMIT_NOW) { } - //!TODO support CET + //CET pages not supported by kernel yet int type = data_properties; if((type == ENCLAVE_PAGE_SS_FIRST) | (type == ENCLAVE_PAGE_SS_REST)) { @@ -406,12 +406,13 @@ uint32_t COMM_API enclave_modify( if (enclave_error != NULL) *enclave_error = ENCLAVE_INVALID_PARAMETER; return ENCLAVE_INVALID_PARAMETER; + } // type_to == type_from // this is for emodpr to epcm.NONE, enclave EACCEPT with pte.R // separate mprotect is needed to change pte.R to pte.NONE if (prot_to == prot_from && prot_to == PROT_NONE) { - ret = mprotect((void *)addr, length, prot_to); + ret = mprotect(target_addr, target_size, prot_to); if (ret == -1) { ret = error_driver2api(ret, errno); @@ -446,7 +447,7 @@ uint32_t COMM_API enclave_modify( //EACCEPT needs at least pte.R, PROT_NONE case done above. if (prot_to != PROT_NONE) { - ret = mprotect((void *)addr, length, prot_to); + ret = mprotect((void *)target_addr, target_size, prot_to); if (ret == -1) { ret = error_driver2api(ret, errno); From b817ce3cf2ede5f73337da0e59f7d2cc48dcb7cd Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Mon, 29 Aug 2022 19:26:49 -0700 Subject: [PATCH 47/96] update commit for sgx-emm Signed-off-by: Haitao Huang --- external/sgx-emm/emm_src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/sgx-emm/emm_src b/external/sgx-emm/emm_src index b1c4226de..ffb1d04b1 160000 --- a/external/sgx-emm/emm_src +++ b/external/sgx-emm/emm_src @@ -1 +1 @@ -Subproject commit b1c4226deb3241d48ca6587e829524d514dcdc7a +Subproject commit ffb1d04b143f143c04c60fb9e7a8b943c4286cd0 From 3c082a7b7829febbfdb0681bff5071b95f3a167f Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Thu, 8 Sep 2022 14:08:16 -0700 Subject: [PATCH 48/96] sgx-emm/api_tests: add case for longjmp from exception handler Signed-off-by: Haitao Huang --- .../sgx-emm/api_tests/Enclave/Enclave.cpp | 54 ++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/external/sgx-emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp index 4b3b7e0b9..76bdad002 100644 --- a/external/sgx-emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -234,6 +234,7 @@ int test_sgx_mm_alloc_dealloc_unsafe1() return 0; } +#include typedef struct _pfdata { sgx_pfinfo pf; @@ -242,8 +243,59 @@ typedef struct _pfdata int magic; }; void* addr_expected; + jmp_buf jbuf; // used for jmp_handler only } pf_data_t; +int jmp_handler(const sgx_pfinfo *pfinfo, void *private_data) +{ + pf_data_t* pd = (pf_data_t *) private_data; + memcpy(private_data, pfinfo, sizeof(*pfinfo)); + void* addr = (void*) pd->pf.maddr; + if (pd->pf.pfec.rw == 0 + && addr == pd->addr_expected) + { + int ret = sgx_mm_commit(addr, SGX_PAGE_SIZE); + if (ret) abort(); + memset(addr, pd->magic, SGX_PAGE_SIZE); + longjmp(pd->jbuf, 1); + abort(); //won't reach here + } + return SGX_MM_EXCEPTION_CONTINUE_SEARCH; +} + +int test_sgx_mm_alloc_jmp() +{ + + void* addr = 0; + pf_data_t pd; + memset((void*) &pd, 0, sizeof(pd)); + int ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND, &jmp_handler, &pd, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + const int MAGIC = 0x55UL; + + uint8_t* data = (uint8_t*)addr; + pd.magic = MAGIC; + pd.addr_expected = addr; + if (0 == setjmp(pd.jbuf)) { + uint8_t d0 = 0; + d0 = data[0]; + EXPECT_NEQ (d0, 0); //should not come here + }else { + uint8_t d0 = data[0]; + + EXPECT_EQ (d0, MAGIC); + EXPECT_EQ (pd.pf.pfec.rw, 0); //Read caused PF + EXPECT_EQ (pd.pf.pfec.sgx, 1); // sgx bit set + } + + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ(ret, 0); + return 0; +} + int permissions_handler(const sgx_pfinfo *pfinfo, void *private_data) { pf_data_t* pd = (pf_data_t *) private_data; @@ -568,7 +620,6 @@ int test_sgx_mm_commit_data() // TODO: // - alloc big buf on stack to trigger expansion // - alloc ondemand in handler with a nested hanndler -// - do setjmp at allocation and long jmp in handler? // - random addrss allocation and deallocation // int ecall_test_sgx_mm(int sid) @@ -580,6 +631,7 @@ int ecall_test_sgx_mm(int sid) failures += test_sgx_mm_permissions_dealloc(); failures += test_sgx_mm_commit_data(); failures += test_sgx_mm_dealloc(); + failures += test_sgx_mm_alloc_jmp(); if(failures) LOG("!!! %d fail(s) in thread %d\n", failures, sid); return failures; From 2b4dcf82ef9a65f568d18747415b4307c5599c25 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Thu, 8 Sep 2022 18:57:59 -0700 Subject: [PATCH 49/96] sgx-emm/api_tests: add a case with nested handler Signed-off-by: Haitao Huang --- .../sgx-emm/api_tests/Enclave/Enclave.cpp | 77 ++++++++++++++++++- 1 file changed, 75 insertions(+), 2 deletions(-) diff --git a/external/sgx-emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp index 76bdad002..a580f4c2c 100644 --- a/external/sgx-emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -343,6 +343,52 @@ int commit_data_handler(const sgx_pfinfo *pfinfo, void *private_data) return SGX_MM_EXCEPTION_CONTINUE_SEARCH; } +static const int MAX_NESTED_HANDLER = 10; +int nested_handler(const sgx_pfinfo *pfinfo, void *private_data) +{ + pf_data_t* orig_pd = (pf_data_t *) private_data; + memcpy(private_data, pfinfo, sizeof(*pfinfo)); + void* addr = (void*) orig_pd->pf.maddr; + + if (addr == orig_pd->addr_expected) + { + void* data = 0; + if (orig_pd->magic == MAX_NESTED_HANDLER) + { + int ret = sgx_mm_alloc(NULL, SGX_PAGE_SIZE, SGX_EMA_COMMIT_NOW, + NULL, NULL, &data); + if (ret) abort(); + assert(data!=0); + memset(data, orig_pd->magic, SGX_PAGE_SIZE); + ret = sgx_mm_commit_data(addr, SGX_PAGE_SIZE, (uint8_t*)data, + SGX_EMA_PROT_READ); + if (ret) abort(); + } else + { + pf_data_t nested_pd; + memset((void*) &nested_pd, 0, sizeof(nested_pd)); + int ret = sgx_mm_alloc(NULL, SGX_PAGE_SIZE, + SGX_EMA_COMMIT_ON_DEMAND, + &nested_handler, + &nested_pd, &data); + + if (ret) abort(); + assert(data != 0); + nested_pd.addr_expected = data; + nested_pd.magic = orig_pd->magic + 1; + ret = sgx_mm_commit_data(addr, SGX_PAGE_SIZE, (uint8_t*)data, + SGX_EMA_PROT_READ); + if (ret) abort(); + if (nested_pd.pf.pfec.errcd == 0) abort(); //READ suceess with PF + if (nested_pd.pf.pfec.rw != 0) abort(); //READ indicated in PFEC + } + int ret = sgx_mm_dealloc((void*)data, SGX_PAGE_SIZE); + if (ret) abort(); + return SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + }else + return SGX_MM_EXCEPTION_CONTINUE_SEARCH; +} + int test_sgx_mm_permissions() { @@ -393,7 +439,6 @@ int test_sgx_mm_permissions() return 0; } - int test_sgx_mm_permissions_dealloc() { void* addr = 0; @@ -615,11 +660,38 @@ int test_sgx_mm_commit_data() return 0; } +int test_sgx_mm_alloc_nested() +{ + void* addr = 0; + pf_data_t pd; + memset((void*) &pd, 0, sizeof(pd)); + int ret = sgx_mm_alloc(NULL, ALLOC_SIZE, + SGX_EMA_COMMIT_ON_DEMAND, + &nested_handler, + &pd, &addr); + + EXPECT_EQ(ret, 0); + EXPECT_NEQ(addr, NULL); + + pd.addr_expected = addr; + pd.magic = 1; + + uint8_t* data = (uint8_t*)addr; + for (int i =0; i Date: Thu, 8 Sep 2022 21:33:35 -0700 Subject: [PATCH 50/96] sgx-emm/api_tests: add random allocation and stack expansion cases Signed-off-by: Haitao Huang --- .../sgx-emm/api_tests/Enclave/Enclave.cpp | 55 +++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/external/sgx-emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp index a580f4c2c..4fd8ef876 100644 --- a/external/sgx-emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -39,6 +39,7 @@ #define SGX_PAGE_SIZE 4096 #include "sgx_thread.h" #include +#include "sgx_trts.h" #include "../tcs.h" using namespace std; /* @@ -688,11 +689,56 @@ int test_sgx_mm_alloc_nested() EXPECT_EQ(ret, 0); return 0; } +int test_sgx_mm_stack_expansion() +{ + static const int BUF_SIZE = 0x8000; // slightly smaller than max set in config + uint8_t buf[BUF_SIZE]; + for (int i = 0; i < BUF_SIZE; i++) + { + buf[i] = (uint8_t)(i % 256); + } + for ( int i = 0; i < BUF_SIZE; i++) + { + uint8_t expected = (uint8_t)(i % 256); + EXPECT_EQ (buf[i], expected); + } + return 0; +} +#define ROUND_TO(x, align) ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) + +extern uint8_t __ImageBase; +extern size_t g_enclave_size; +int test_sgx_mm_alloc_random() +{ + //randomly choose 5000 address to alloc and dealloc + //make sure no crashes + static const int MAX_ITERATIONS = 5000; + size_t enclave_base = (size_t)(&__ImageBase);//from Makefile + for (int i = 0; i < MAX_ITERATIONS; i++) + { + size_t r = 0; + do + { + if(SGX_SUCCESS != sgx_read_rand((unsigned char *)&r, sizeof(r))) + { + return 1; + } + } while(r == 0); + r = enclave_base + (r % g_enclave_size) - 2 * ALLOC_SIZE; + void* addr = (void*) ROUND_TO (r, SGX_PAGE_SIZE); + void* addr_alloc = 0; + int ret = sgx_mm_alloc(addr, ALLOC_SIZE, + SGX_EMA_COMMIT_NOW|SGX_EMA_FIXED, NULL, NULL, &addr_alloc); + if (!ret) { + EXPECT_EQ (addr, addr_alloc); + ret = sgx_mm_dealloc(addr, ALLOC_SIZE); + EXPECT_EQ (ret, 0); + } + } + return 0; +} // Thread-safe tests in separate threads -// TODO: -// - alloc big buf on stack to trigger expansion -// - random addrss allocation and deallocation // int ecall_test_sgx_mm(int sid) { @@ -705,6 +751,8 @@ int ecall_test_sgx_mm(int sid) failures += test_sgx_mm_dealloc(); failures += test_sgx_mm_alloc_jmp(); failures += test_sgx_mm_alloc_nested(); + failures += test_sgx_mm_stack_expansion(); + failures += test_sgx_mm_alloc_random(); if(failures) LOG("!!! %d fail(s) in thread %d\n", failures, sid); return failures; @@ -763,7 +811,6 @@ int ecall_test_sgx_mm_unsafe() typedef void (*entry_t)(void); extern entry_t enclave_entry; -extern uint8_t __ImageBase; size_t ecall_alloc_context() { // Intel SDK thread context memory layout From ec216f76197ee7a307cfaa94f5e12c2018ee3fa2 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Fri, 9 Sep 2022 18:07:50 -0700 Subject: [PATCH 51/96] sgx-emm/api_tests: move random tests to unsafe Random address request with FIXED flag may override RESERVE allocations in other threads Signed-off-by: Haitao Huang --- external/sgx-emm/api_tests/Enclave/Enclave.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/external/sgx-emm/api_tests/Enclave/Enclave.cpp b/external/sgx-emm/api_tests/Enclave/Enclave.cpp index 4fd8ef876..c7d809bc0 100644 --- a/external/sgx-emm/api_tests/Enclave/Enclave.cpp +++ b/external/sgx-emm/api_tests/Enclave/Enclave.cpp @@ -710,8 +710,11 @@ extern uint8_t __ImageBase; extern size_t g_enclave_size; int test_sgx_mm_alloc_random() { - //randomly choose 5000 address to alloc and dealloc + //randomly choose some address to alloc and dealloc //make sure no crashes + // Reduce the value if kernel ever returns ENOMEM from mmap in enclave_alloc. + // this is not safe to run with other threads that allocates with SGX_EMA_RESERVE + // flag as it may take those reserves away static const int MAX_ITERATIONS = 5000; size_t enclave_base = (size_t)(&__ImageBase);//from Makefile for (int i = 0; i < MAX_ITERATIONS; i++) @@ -752,7 +755,6 @@ int ecall_test_sgx_mm(int sid) failures += test_sgx_mm_alloc_jmp(); failures += test_sgx_mm_alloc_nested(); failures += test_sgx_mm_stack_expansion(); - failures += test_sgx_mm_alloc_random(); if(failures) LOG("!!! %d fail(s) in thread %d\n", failures, sid); return failures; @@ -805,6 +807,7 @@ int ecall_test_sgx_mm_unsafe() int failures = 0; failures += test_sgx_mm_alloc_dealloc_unsafe1(); failures += test_sgx_mm_alloc_dealloc_unsafe2(); + failures += test_sgx_mm_alloc_random(); return failures; } From 0cb086da71544f724870c7d742cab153bdbf17b0 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Sun, 11 Sep 2022 13:43:16 -0700 Subject: [PATCH 52/96] sdk/trts: remove init_rts_ema_root Signed-off-by: xxu36 Signed-off-by: Haitao Huang --- sdk/trts/ema_init.cpp | 6 +----- sdk/trts/init_enclave.cpp | 6 ++---- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/sdk/trts/ema_init.cpp b/sdk/trts/ema_init.cpp index d4bdfc1b6..aca05f530 100644 --- a/sdk/trts/ema_init.cpp +++ b/sdk/trts/ema_init.cpp @@ -164,16 +164,12 @@ static int init_rts_contexts_emas(layout_t *start, layout_t *end, uint64_t delta return SGX_SUCCESS; } -extern "C" void init_rts_ema_root(size_t, size_t); extern "C" int init_segment_emas(void* enclave_base); -extern "C" int init_rts_emas(size_t rts_base, size_t rts_end, - layout_t *layout_start, layout_t *layout_end) +extern "C" int init_rts_emas(size_t rts_base, layout_t *layout_start, layout_t *layout_end) { int ret = SGX_ERROR_UNEXPECTED; - init_rts_ema_root(rts_base, rts_end); - ret = init_segment_emas((void *)rts_base); if (SGX_SUCCESS != ret) { return ret; diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 7609b6752..141cb6cbc 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -77,7 +77,7 @@ extern sgx_status_t pcl_entry(void* enclave_base,void* ms) __attribute__((weak)) extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section(".nipx"))); extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); -extern "C" int init_rts_emas(size_t rts_base, size_t rts_end, layout_t *start, layout_t *end); +extern "C" int init_rts_emas(size_t rts_base, layout_t *start, layout_t *end); extern "C" int sgx_mm_init(size_t, size_t); // init_enclave() // Initialize enclave. @@ -269,7 +269,6 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) if (EDMM_supported) { size_t rts_base = g_enclave_base; - size_t rts_end = g_enclave_base + g_enclave_size; size_t user_base = 0; size_t user_end = 0; @@ -290,13 +289,12 @@ sgx_status_t do_init_enclave(void *ms, void *tcs) if(user_base > user_end) return SGX_ERROR_UNEXPECTED; - rts_end = user_base; } if (sgx_mm_init(user_base, user_end)) return SGX_ERROR_UNEXPECTED; - int ret = init_rts_emas(rts_base, rts_end, layout_start, layout); + int ret = init_rts_emas(rts_base, layout_start, layout); if (ret != SGX_SUCCESS) { return SGX_ERROR_UNEXPECTED; } From 720b1c14bbcbff3561b2659207f011446d304343 Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Mon, 19 Sep 2022 07:35:18 -0700 Subject: [PATCH 53/96] urts: add a null pointer check for acquire_thread This can happen when UNINIT called before all threads exit Signed-off-by: Haitao Huang --- psw/urts/tcs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psw/urts/tcs.cpp b/psw/urts/tcs.cpp index fb4827622..46079ee19 100644 --- a/psw/urts/tcs.cpp +++ b/psw/urts/tcs.cpp @@ -433,7 +433,7 @@ CTrustThread * CTrustThreadPool::acquire_thread(int ecall_cmd) } if(is_special_ecall != true && - need_to_new_thread() == true) + need_to_new_thread() == true && NULL != m_utility_thread) { m_utility_thread->get_enclave()->fill_tcs_mini_pool_fn(); } From bfeded4b0cb708710ed5e82f130bf1c437fca84d Mon Sep 17 00:00:00 2001 From: Haitao Huang Date: Mon, 19 Sep 2022 09:01:36 -0700 Subject: [PATCH 54/96] update sgx-emm commit Signed-off-by: Haitao Huang --- external/sgx-emm/emm_src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/sgx-emm/emm_src b/external/sgx-emm/emm_src index ffb1d04b1..5497098a7 160000 --- a/external/sgx-emm/emm_src +++ b/external/sgx-emm/emm_src @@ -1 +1 @@ -Subproject commit ffb1d04b143f143c04c60fb9e7a8b943c4286cd0 +Subproject commit 5497098a71cd6e791fcbde6f524b334f7b45b80f From 0e68258febc7055182df39ffcc2ca25b588228ec Mon Sep 17 00:00:00 2001 From: "Tate, Hongliang Tian" Date: Mon, 24 Dec 2018 17:22:55 +0800 Subject: [PATCH 55/96] Add two helper scripts to compile and install --- compile.sh | 4 ++++ install.sh | 11 +++++++++++ 2 files changed, 15 insertions(+) create mode 100755 compile.sh create mode 100755 install.sh diff --git a/compile.sh b/compile.sh new file mode 100755 index 000000000..bae5dbaea --- /dev/null +++ b/compile.sh @@ -0,0 +1,4 @@ +#!/bin/bash +make clean +make +make sdk_install_pkg psw_install_pkg diff --git a/install.sh b/install.sh new file mode 100755 index 000000000..aec4cf1ef --- /dev/null +++ b/install.sh @@ -0,0 +1,11 @@ +#!/bin/bash +pushd `dirname $0` > /dev/null +SCRIPT_PATH=`pwd` +popd > /dev/null + +sudo /opt/intel/sgxpsw/uninstall.sh +sudo /opt/intel/sgxsdk/uninstall.sh +sudo mkdir -p /opt/intel +cd /opt/intel +sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin +yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin From 020c837670980a84cfefd5c40afcb434d1e92df3 Mon Sep 17 00:00:00 2001 From: "Tate, Hongliang Tian" Date: Fri, 28 Dec 2018 16:20:46 +0800 Subject: [PATCH 56/96] Enable Intel MPX for enclaves by default --- common/inc/sgx_attributes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/inc/sgx_attributes.h b/common/inc/sgx_attributes.h index 4f5e18008..955e69595 100644 --- a/common/inc/sgx_attributes.h +++ b/common/inc/sgx_attributes.h @@ -49,7 +49,7 @@ #define SGX_XFRM_MPX 0x0000000000000018ULL /* MPX XFRM - not supported */ #define SGX_XFRM_PKRU 0x0000000000000200ULL /* PKRU state */ -#define SGX_XFRM_RESERVED (~(SGX_XFRM_LEGACY | SGX_XFRM_AVX | SGX_XFRM_AVX512 | SGX_XFRM_PKRU)) +#define SGX_XFRM_RESERVED (~(SGX_XFRM_LEGACY | SGX_XFRM_AVX | SGX_XFRM_AVX512 | SGX_XFRM_MPX | SGX_XFRM_PKRU)) typedef struct _attributes_t { From dfea91f8c1a9f3bb9be6911e3dab7e4d57b9cd1d Mon Sep 17 00:00:00 2001 From: "Tate, Hongliang Tian" Date: Fri, 4 Jan 2019 20:04:14 +0800 Subject: [PATCH 57/96] Add sgx_thread_get_self API --- sdk/tlibthread/Makefile | 3 ++- sdk/tlibthread/sethread_self.cpp | 39 ++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 sdk/tlibthread/sethread_self.cpp diff --git a/sdk/tlibthread/Makefile b/sdk/tlibthread/Makefile index fdca14bdf..5be4e9c1f 100755 --- a/sdk/tlibthread/Makefile +++ b/sdk/tlibthread/Makefile @@ -43,7 +43,8 @@ OBJ := sethread_mutex.o \ sethread_spinlock.o \ sethread_rwlock.o \ sethread_cond.o \ - sethread_utils.o + sethread_utils.o \ + sethread_self.o LIBTLIBTHREAD := libtlibthread.a diff --git a/sdk/tlibthread/sethread_self.cpp b/sdk/tlibthread/sethread_self.cpp new file mode 100644 index 000000000..165143a84 --- /dev/null +++ b/sdk/tlibthread/sethread_self.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011-2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "sethread_internal.h" +#include "util.h" + +/* Occlum's notes: make TCS avaiable to Occlum */ +extern "C" const void* sgx_thread_get_self(void) { + sgx_thread_t self = (sgx_thread_t)get_thread_data(); + return TD2TCS(self); +} From dfdabfe1a3729c06a187a587b4defc951538c92c Mon Sep 17 00:00:00 2001 From: Liu Shuang Date: Tue, 6 Aug 2019 07:04:09 +0000 Subject: [PATCH 58/96] Add integrity-only mode for SGX protected files 1. Add sgx_fopen_integrity_only API 2. Add sgx_fget_mac API --- common/inc/sgx_tprotected_fs.edl | 1 - common/inc/sgx_tprotected_fs.h | 48 ++++++++-- .../sgx_tprotected_fs/file_crypto.cpp | 66 +++++++++---- .../sgx_tprotected_fs/file_flush.cpp | 72 ++++++++++---- .../sgx_tprotected_fs/file_init.cpp | 94 ++++++++++++------- .../sgx_tprotected_fs/file_other.cpp | 32 ++++--- .../sgx_tprotected_fs/file_read_write.cpp | 66 ++++++++----- .../sgx_tprotected_fs/protected_fs_file.h | 19 ++-- .../sgx_tprotected_fs/protected_fs_nodes.h | 5 +- .../sgx_tprotected_fs/sgx_tprotected_fs.cpp | 30 ++++-- .../sgx_uprotected_fs/sgx_uprotected_fs.cpp | 36 +++---- 11 files changed, 316 insertions(+), 153 deletions(-) diff --git a/common/inc/sgx_tprotected_fs.edl b/common/inc/sgx_tprotected_fs.edl index b6f0b8c3f..5cd025da7 100644 --- a/common/inc/sgx_tprotected_fs.edl +++ b/common/inc/sgx_tprotected_fs.edl @@ -14,7 +14,6 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR diff --git a/common/inc/sgx_tprotected_fs.h b/common/inc/sgx_tprotected_fs.h index dbd5abf91..a461f18cc 100644 --- a/common/inc/sgx_tprotected_fs.h +++ b/common/inc/sgx_tprotected_fs.h @@ -45,6 +45,7 @@ #include "sgx_defs.h" #include "sgx_key.h" +#include "sgx_tcrypto.h" #define SGX_FILE void @@ -94,6 +95,20 @@ SGX_FILE* SGXAPI sgx_fopen(const char* filename, const char* mode, const sgx_key SGX_FILE* SGXAPI sgx_fopen_auto_key(const char* filename, const char* mode); +/* sgx_fopen_integrity_only +* Purpose: open existing protected file (created with previous call to sgx_fopen_integrity_only) or create a new one (see c++ fopen documentation for more details). +* This API skips encryption and only performs MAC calculation/validation, thus protecting the file's integrity, not confidentiality. +* +* Parameters: +* filename - [IN] the name of the file to open/create. +* mode - [IN] open mode. only supports 'r' or 'w' or 'a' (one and only one of them must be present), and optionally 'b' and/or '+'. +* +* Return value: +* SGX_FILE* - pointer to the newly created file handle, NULL if an error occurred - check errno for the error code. +*/ +SGX_FILE* SGXAPI sgx_fopen_integrity_only(const char* filename, const char* mode); + + /* sgx_fwrite * Purpose: write data to a file (see c++ fwrite documentation for more details). * @@ -101,7 +116,7 @@ SGX_FILE* SGXAPI sgx_fopen_auto_key(const char* filename, const char* mode); * ptr - [IN] pointer to the input data buffer * size - [IN] size of data block * count - [IN] count of data blocks to write - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * size_t - number of 'size' blocks written to the file, 0 in case of an error - check sgx_ferror for error code @@ -116,7 +131,7 @@ size_t SGXAPI sgx_fwrite(const void* ptr, size_t size, size_t count, SGX_FILE* s * ptr - [OUT] pointer to the output data buffer * size - [IN] size of data block * count - [IN] count of data blocks to write - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * size_t - number of 'size' blocks read from the file, 0 in case of an error - check sgx_ferror for error code @@ -128,7 +143,7 @@ size_t SGXAPI sgx_fread(void* ptr, size_t size, size_t count, SGX_FILE* stream); * Purpose: get the current value of the position indicator of the file (see c++ ftell documentation for more details). * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int64_t - the current value of the position indicator, -1 on error - check errno for the error code @@ -140,7 +155,7 @@ int64_t SGXAPI sgx_ftell(SGX_FILE* stream); * Purpose: set the current value of the position indicator of the file (see c++ fseek documentation for more details). * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * offset - [IN] the new required value, relative to the origin parameter * origin - [IN] the origin from which to calculate the offset (SEEK_SET, SEEK_CUR or SEEK_END) * @@ -154,7 +169,7 @@ int32_t SGXAPI sgx_fseek(SGX_FILE* stream, int64_t offset, int origin); * Purpose: force actual write of all the cached data to the disk (see c++ fflush documentation for more details). * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int32_t - result, 0 on success, 1 in case of an error - check sgx_ferror for error code @@ -166,7 +181,7 @@ int32_t SGXAPI sgx_fflush(SGX_FILE* stream); * Purpose: get the latest operation error code (see c++ ferror documentation for more details). * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int32_t - the error code, 0 means no error, anything else is the latest operation error code @@ -178,7 +193,7 @@ int32_t SGXAPI sgx_ferror(SGX_FILE* stream); * Purpose: did the file's position indicator hit the end of the file in a previous read operation (see c++ feof documentation for more details). * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int32_t - 1 - end of file was reached, 0 - end of file wasn't reached @@ -191,7 +206,7 @@ int32_t SGXAPI sgx_feof(SGX_FILE* stream); * call sgx_ferror or sgx_feof after a call to this function to learn if it was successful or not * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * none @@ -204,7 +219,7 @@ void SGXAPI sgx_clearerr(SGX_FILE* stream); * after a call to this function, the handle is invalid even if an error is returned * * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) + * stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int32_t - result, 0 - file was closed successfully, 1 - there were errors during the operation @@ -261,7 +276,7 @@ int32_t SGXAPI sgx_fimport_auto_key(const char* filename, const sgx_key_128bit_t * if a user wishes to remove all secrets from memory, he should close the file handle with sgx_fclose * * Parameters: -* stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key +* stream - [IN] the file handle (opened with sgx_fopen*) * * Return value: * int32_t - result, 0 - success, 1 - there was an error, check errno for the error code @@ -269,6 +284,19 @@ int32_t SGXAPI sgx_fimport_auto_key(const char* filename, const sgx_key_128bit_t int32_t SGXAPI sgx_fclear_cache(SGX_FILE* stream); +/* sgx_fget_mac +* Purpose: get the MAC of the file. To ensure the MAC reflects all the content in the file, +* sgx_fflush will be called automatically before getting the MAC. It is the caller's responsibility of not doing any writes before +* this function returns the MAC. +* +* Parameters: +* stream - [IN] the file handle (opened with sgx_fopen*) +* +* Return value: +* int32_t - result, 0 - success, 1 - there was an error, check errno for the error code +*/ +int32_t SGXAPI sgx_fget_mac(SGX_FILE* stream, sgx_aes_gcm_128bit_tag_t* mac); + #ifdef __cplusplus } #endif diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp index ffef99314..3cc040916 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp @@ -56,6 +56,11 @@ typedef struct { bool protected_fs_file::generate_secure_blob(sgx_aes_gcm_128bit_key_t* key, const char* label, uint64_t physical_node_number, sgx_aes_gcm_128bit_tag_t* output) { + if (integrity_only) + { + return true; + } + kdf_input_t buf = {0, "", 0, "", 0}; uint32_t len = (uint32_t)strnlen(label, MAX_LABEL_LEN + 1); @@ -67,19 +72,19 @@ bool protected_fs_file::generate_secure_blob(sgx_aes_gcm_128bit_key_t* key, cons // index // SP800-108: - // i - A counter, a binary string of length r that is an input to each iteration of a PRF in counter mode [...]. + // i ? A counter, a binary string of length r that is an input to each iteration of a PRF in counter mode [...]. buf.index = 0x01; // label // SP800-108: - // Label - A string that identifies the purpose for the derived keying material, which is encoded as a binary string. + // Label ? A string that identifies the purpose for the derived keying material, which is encoded as a binary string. // The encoding method for the Label is defined in a larger context, for example, in the protocol that uses a KDF. strncpy(buf.label, label, len); // context and nonce - // SP800-108: - // Context - A binary string containing the information related to the derived keying material. - // It may include identities of parties who are deriving and / or using the derived keying material and, + // SP800-108: + // Context ? A binary string containing the information related to the derived keying material. + // It may include identities of parties who are deriving and / or using the derived keying material and, // optionally, a nonce known by the parties who derive the keys. buf.node_number = physical_node_number; @@ -108,24 +113,29 @@ bool protected_fs_file::generate_secure_blob(sgx_aes_gcm_128bit_key_t* key, cons bool protected_fs_file::generate_secure_blob_from_user_kdk(bool restore) { + if (integrity_only) + { + return true; + } + kdf_input_t buf = {0, "", 0, "", 0}; sgx_status_t status = SGX_SUCCESS; // index // SP800-108: - // i - A counter, a binary string of length r that is an input to each iteration of a PRF in counter mode [...]. + // i ? A counter, a binary string of length r that is an input to each iteration of a PRF in counter mode [...]. buf.index = 0x01; // label // SP800-108: - // Label - A string that identifies the purpose for the derived keying material, which is encoded as a binary string. + // Label ? A string that identifies the purpose for the derived keying material, which is encoded as a binary string. // The encoding method for the Label is defined in a larger context, for example, in the protocol that uses a KDF. strncpy(buf.label, METADATA_KEY_NAME, strlen(METADATA_KEY_NAME)); // context and nonce - // SP800-108: - // Context - A binary string containing the information related to the derived keying material. - // It may include identities of parties who are deriving and / or using the derived keying material and, + // SP800-108: + // Context ? A binary string containing the information related to the derived keying material. + // It may include identities of parties who are deriving and / or using the derived keying material and, // optionally, a nonce known by the parties who derive the keys. buf.node_number = 0; @@ -143,7 +153,7 @@ bool protected_fs_file::generate_secure_blob_from_user_kdk(bool restore) { memcpy(&buf.nonce32, &file_meta_data.plain_part.meta_data_key_id, sizeof(sgx_key_id_t)); } - + // length of output (128 bits) buf.output_len = 0x80; @@ -168,8 +178,13 @@ bool protected_fs_file::generate_secure_blob_from_user_kdk(bool restore) bool protected_fs_file::init_session_master_key() { + if (integrity_only) + { + return true; + } + sgx_aes_gcm_128bit_key_t empty_key = {0}; - + if (generate_secure_blob(&empty_key, MASTER_KEY_NAME, 0, (sgx_aes_gcm_128bit_tag_t*)&session_master_key) == false) return false; @@ -181,6 +196,11 @@ bool protected_fs_file::init_session_master_key() bool protected_fs_file::derive_random_node_key(uint64_t physical_node_number) { + if (integrity_only) + { + return true; + } + if (master_key_count++ > MAX_MASTER_KEY_USAGES) { if (init_session_master_key() == false) @@ -196,15 +216,20 @@ bool protected_fs_file::derive_random_node_key(uint64_t physical_node_number) bool protected_fs_file::generate_random_meta_data_key() { + if (integrity_only) + { + return true; + } + if (use_user_kdk_key == 1) { return generate_secure_blob_from_user_kdk(false); } - // derive a random key from the enclave sealing key + // derive a random key from the enclave sealing key sgx_key_request_t key_request; - memset(&key_request, 0, sizeof(sgx_key_request_t)); - + memset(&key_request, 0, sizeof(sgx_key_request_t)); + key_request.key_name = SGX_KEYSELECT_SEAL; key_request.key_policy = SGX_KEYPOLICY_MRSIGNER; @@ -215,14 +240,14 @@ bool protected_fs_file::generate_random_meta_data_key() key_request.attribute_mask.xfrm = 0x0; key_request.misc_mask = TSEAL_DEFAULT_MISCMASK; - + sgx_status_t status = sgx_read_rand((unsigned char*)&key_request.key_id, sizeof(sgx_key_id_t)); if (status != SGX_SUCCESS) { last_error = status; return false; } - + status = sgx_get_key(&key_request, &cur_key); if (status != SGX_SUCCESS) { @@ -241,8 +266,13 @@ bool protected_fs_file::generate_random_meta_data_key() bool protected_fs_file::restore_current_meta_data_key(const sgx_aes_gcm_128bit_key_t* import_key) { + if (integrity_only) + { + return true; + } + if (import_key != NULL) - { + { memcpy(&cur_key, import_key, sizeof(sgx_aes_gcm_128bit_key_t)); return true; } diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp index cbfbbc8d7..5c4832a67 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp @@ -55,7 +55,7 @@ bool protected_fs_file::flush(/*bool mc*/) sgx_thread_mutex_unlock(&mutex); return false; } - + result = internal_flush(/*mc,*/ true); if (result == false) { @@ -259,7 +259,7 @@ bool protected_fs_file::set_update_flag(bool flush_to_disk) file_meta_data.plain_part.update_flag = 0; // turn it off in memory. at the end of the flush, when we'll write the meta-data to disk, this flag will also be cleared there. if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } @@ -327,8 +327,16 @@ bool protected_fs_file::update_all_data_and_mht_nodes() gcm_crypto_data_t* gcm_crypto_data = &data_node->parent->plain.data_nodes_crypto[data_node->data_node_number % ATTACHED_DATA_NODES_COUNT]; // encrypt the data, this also saves the gmac of the operation in the mht crypto node - status = sgx_rijndael128GCM_encrypt(&cur_key, data_node->plain.data, NODE_SIZE, data_node->encrypted.cipher, - empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + if(!integrity_only) { + status = sgx_rijndael128GCM_encrypt(&cur_key, data_node->plain.data, NODE_SIZE, data_node->encrypted.cipher, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + } + // calculate the MAC only + else { + status = sgx_rijndael128GCM_encrypt(&cur_key, NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, data_node->plain.data, NODE_SIZE, &gcm_crypto_data->gmac); + memcpy(data_node->encrypted.cipher, data_node->plain.data, NODE_SIZE); + } if (status != SGX_SUCCESS) { last_error = status; @@ -380,9 +388,15 @@ bool protected_fs_file::update_all_data_and_mht_nodes() mht_list.clear(); return false; } - - status = sgx_rijndael128GCM_encrypt(&cur_key, (const uint8_t*)&file_mht_node->plain, NODE_SIZE, file_mht_node->encrypted.cipher, - empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + if(!integrity_only) { + status = sgx_rijndael128GCM_encrypt(&cur_key, (const uint8_t*)&file_mht_node->plain, NODE_SIZE, file_mht_node->encrypted.cipher, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + } + else { + status = sgx_rijndael128GCM_encrypt(&cur_key, NULL ,0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, (const uint8_t*)&file_mht_node->plain, NODE_SIZE, &gcm_crypto_data->gmac); + memcpy(file_mht_node->encrypted.cipher, (const uint8_t*)&file_mht_node->plain, NODE_SIZE); + } if (status != SGX_SUCCESS) { mht_list.clear(); @@ -399,8 +413,15 @@ bool protected_fs_file::update_all_data_and_mht_nodes() if (derive_random_node_key(root_mht.physical_node_number) == false) return false; - status = sgx_rijndael128GCM_encrypt(&cur_key, (const uint8_t*)&root_mht.plain, NODE_SIZE, root_mht.encrypted.cipher, - empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &encrypted_part_plain.mht_gmac); + if(!integrity_only) { + status = sgx_rijndael128GCM_encrypt(&cur_key, (const uint8_t*)&root_mht.plain, NODE_SIZE, root_mht.encrypted.cipher, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &encrypted_part_plain.mht_gmac); + } + else { + status = sgx_rijndael128GCM_encrypt(&cur_key, NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, (const uint8_t*)&root_mht.plain, NODE_SIZE, &encrypted_part_plain.mht_gmac); + memcpy(root_mht.encrypted.cipher, (const uint8_t*)&root_mht.plain, NODE_SIZE); + } if (status != SGX_SUCCESS) { last_error = status; @@ -416,20 +437,31 @@ bool protected_fs_file::update_all_data_and_mht_nodes() bool protected_fs_file::update_meta_data_node() { sgx_status_t status; - + // randomize a new key, saves the key _id_ in the meta data plain part if (generate_random_meta_data_key() != true) { // last error already set return false; } - - // encrypt meta data encrypted part, also updates the gmac in the meta data plain part - status = sgx_rijndael128GCM_encrypt(&cur_key, - (const uint8_t*)&encrypted_part_plain, sizeof(meta_data_encrypted_t), (uint8_t*)&file_meta_data.encrypted_part, - empty_iv, SGX_AESGCM_IV_SIZE, - NULL, 0, - &file_meta_data.plain_part.meta_data_gmac); + + if (!integrity_only) { + // encrypt meta data encrypted part, also updates the gmac in the meta data plain part + status = sgx_rijndael128GCM_encrypt(&cur_key, + (const uint8_t*)&encrypted_part_plain, sizeof(meta_data_encrypted_t), (uint8_t*)&file_meta_data.encrypted_part, + empty_iv, SGX_AESGCM_IV_SIZE, + NULL, 0, + &file_meta_data.plain_part.meta_data_gmac); + } + else { + status = sgx_rijndael128GCM_encrypt(&cur_key, + NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, + (const uint8_t*)&encrypted_part_plain, sizeof(meta_data_encrypted_t), + &file_meta_data.plain_part.meta_data_gmac); + memcpy((uint8_t*)&file_meta_data.encrypted_part, (const uint8_t*)&encrypted_part_plain, sizeof(meta_data_encrypted_t)); + } + if (status != SGX_SUCCESS) { last_error = status; @@ -482,7 +514,7 @@ bool protected_fs_file::write_all_changes_to_disk(bool flush_to_disk) status = u_sgxprotectedfs_fwrite_node(&result32, file, node_number, data_to_write, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } @@ -504,7 +536,7 @@ bool protected_fs_file::write_all_changes_to_disk(bool flush_to_disk) status = u_sgxprotectedfs_fwrite_node(&result32, file, 1, (uint8_t*)&root_mht.encrypted, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } @@ -515,7 +547,7 @@ bool protected_fs_file::write_all_changes_to_disk(bool flush_to_disk) status = u_sgxprotectedfs_fwrite_node(&result32, file, 0, (uint8_t*)&file_meta_data, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp index a68e3639d..53d23449c 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp @@ -67,15 +67,15 @@ bool protected_fs_file::cleanup_filename(const char* src, char* dest) } -protected_fs_file::protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key) +protected_fs_file::protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool _integrity_only) { sgx_status_t status = SGX_SUCCESS; uint8_t result = 0; int32_t result32 = 0; - + init_fields(); - if (filename == NULL || mode == NULL || + if (filename == NULL || mode == NULL || strnlen(filename, 1) == 0 || strnlen(mode, 1) == 0) { last_error = EINVAL; @@ -108,7 +108,7 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con return; } - if (init_session_master_key() == false) + if (init_session_master_key() == false) // last_error already set return; @@ -116,16 +116,16 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con { // for new file, this value will later be saved in the meta data plain part (init_new_file) // for existing file, we will later compare this value with the value from the file (init_existing_file) - use_user_kdk_key = 1; + use_user_kdk_key = 1; memcpy(user_kdk_key, kdk_key, sizeof(sgx_aes_gcm_128bit_key_t)); } - + // get the clean file name (original name might be clean or with relative path or with absolute path...) char clean_filename[FILENAME_MAX_LEN]; if (cleanup_filename(filename, clean_filename) == false) // last_error already set return; - + if (import_key != NULL) {// verify the key is not empty - note from SAFE review sgx_aes_gcm_128bit_key_t empty_aes_key = {0}; @@ -183,6 +183,8 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con return; } + integrity_only = _integrity_only; + // now open the file read_only = (open_mode.read == 1 && open_mode.update == 0); // read only files can be opened simultaneously by many enclaves @@ -206,7 +208,7 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con last_error = SGX_ERROR_FILE_NOT_SGX_FILE; break; } - + strncpy(recovery_filename, filename, FULLNAME_MAX_LEN - 1); // copy full file name recovery_filename[FULLNAME_MAX_LEN - 1] = '\0'; // just to be safe size_t full_name_len = strnlen(recovery_filename, RECOVERY_FILE_MAX_LEN); @@ -222,7 +224,7 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con if (init_existing_file(filename, clean_filename, import_key) == false) break; - + if (open_mode.append == 1 && open_mode.update == 0) offset = encrypted_part_plain.size; } @@ -252,6 +254,7 @@ void protected_fs_file::init_fields() meta_data_node_number = 0; memset(&file_meta_data, 0, sizeof(meta_data_node_t)); memset(&encrypted_part_plain, 0, sizeof(meta_data_encrypted_t)); + memset(&cur_key, 0, sizeof(sgx_aes_gcm_128bit_key_t)); memset(&empty_iv, 0, sizeof(sgx_iv_t)); @@ -261,21 +264,22 @@ void protected_fs_file::init_fields() root_mht.mht_node_number = 0; root_mht.new_node = true; root_mht.need_writing = false; - + offset = 0; file = NULL; end_of_file = false; + integrity_only = false; need_writing = false; read_only = 0; file_status = SGX_FILE_STATUS_NOT_INITIALIZED; last_error = SGX_SUCCESS; - real_file_size = 0; + real_file_size = 0; open_mode.raw = 0; use_user_kdk_key = 0; master_key_count = 0; recovery_filename[0] = '\0'; - + memset(&mutex, 0, sizeof(sgx_thread_mutex_t)); // set hash size to fit MAX_PAGES_IN_CACHE @@ -343,7 +347,7 @@ bool protected_fs_file::file_recovery(const char* filename) status = u_sgxprotectedfs_fclose(&result32, file); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EINVAL; return false; } @@ -361,7 +365,7 @@ bool protected_fs_file::file_recovery(const char* filename) status = u_sgxprotectedfs_exclusive_file_open(&file, filename, read_only, &new_file_size, &result32); if (status != SGX_SUCCESS || file == NULL) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != 0) ? result32 : EACCES; return false; } @@ -376,7 +380,7 @@ bool protected_fs_file::file_recovery(const char* filename) status = u_sgxprotectedfs_fread_node(&result32, file, 0, (uint8_t*)&file_meta_data, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } @@ -394,7 +398,7 @@ bool protected_fs_file::init_existing_file(const char* filename, const char* cle status = u_sgxprotectedfs_fread_node(&result32, file, 0, (uint8_t*)&file_meta_data, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } @@ -439,15 +443,31 @@ bool protected_fs_file::init_existing_file(const char* filename, const char* cle return false; } + if (file_meta_data.plain_part.integrity_only != integrity_only) + { + last_error = EINVAL; + return false; + } + if (restore_current_meta_data_key(import_key) == false) return false; - // decrypt the encrypted part of the meta-data - status = sgx_rijndael128GCM_decrypt(&cur_key, - (const uint8_t*)file_meta_data.encrypted_part, sizeof(meta_data_encrypted_blob_t), (uint8_t*)&encrypted_part_plain, - empty_iv, SGX_AESGCM_IV_SIZE, - NULL, 0, - &file_meta_data.plain_part.meta_data_gmac); + if(!integrity_only) { + // decrypt the encrypted part of the meta-data + status = sgx_rijndael128GCM_decrypt(&cur_key, + (const uint8_t*)file_meta_data.encrypted_part, sizeof(meta_data_encrypted_blob_t), (uint8_t*)&encrypted_part_plain, + empty_iv, SGX_AESGCM_IV_SIZE, + NULL, 0, + &file_meta_data.plain_part.meta_data_gmac); + } + else { + status = sgx_rijndael128GCM_decrypt(&cur_key, + NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, + (const uint8_t*)file_meta_data.encrypted_part, sizeof(meta_data_encrypted_blob_t), + &file_meta_data.plain_part.meta_data_gmac); + memcpy((uint8_t*)&encrypted_part_plain, (const uint8_t*)file_meta_data.encrypted_part, sizeof(meta_data_encrypted_blob_t)); + } if (status != SGX_SUCCESS) { last_error = status; @@ -511,15 +531,24 @@ bool protected_fs_file::init_existing_file(const char* filename, const char* cle status = u_sgxprotectedfs_fread_node(&result32, file, 1, root_mht.encrypted.cipher, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return false; } - // this also verifies the root mht gmac against the gmac in the meta-data encrypted part - status = sgx_rijndael128GCM_decrypt(&encrypted_part_plain.mht_key, - root_mht.encrypted.cipher, NODE_SIZE, (uint8_t*)&root_mht.plain, - empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &encrypted_part_plain.mht_gmac); + if(!integrity_only){ + // this also verifies the root mht gmac against the gmac in the meta-data encrypted part + status = sgx_rijndael128GCM_decrypt(&encrypted_part_plain.mht_key, + root_mht.encrypted.cipher, NODE_SIZE, (uint8_t*)&root_mht.plain, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &encrypted_part_plain.mht_gmac); + } + else { + status = sgx_rijndael128GCM_decrypt(&encrypted_part_plain.mht_key, + NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, root_mht.encrypted.cipher, NODE_SIZE, &encrypted_part_plain.mht_gmac); + memcpy((uint8_t*)&root_mht.plain, root_mht.encrypted.cipher, NODE_SIZE); + } + if (status != SGX_SUCCESS) { last_error = status; @@ -540,9 +569,10 @@ bool protected_fs_file::init_new_file(const char* clean_filename) file_meta_data.plain_part.minor_version = SGX_FILE_MINOR_VERSION; file_meta_data.plain_part.use_user_kdk_key = use_user_kdk_key; + file_meta_data.plain_part.integrity_only = integrity_only; strncpy(encrypted_part_plain.clean_filename, clean_filename, FILENAME_MAX_LEN); - + need_writing = true; return true; @@ -552,7 +582,7 @@ bool protected_fs_file::init_new_file(const char* clean_filename) protected_fs_file::~protected_fs_file() { void* data; - + while ((data = cache.get_last()) != NULL) { if (((file_data_node_t*)data)->type == FILE_DATA_NODE_TYPE) // type is in the same offset in both node types, need to scrub the plaintext @@ -573,7 +603,7 @@ protected_fs_file::~protected_fs_file() // scrub the last encryption key and the session key memset_s(&cur_key, sizeof(sgx_aes_gcm_128bit_key_t), 0, sizeof(sgx_aes_gcm_128bit_key_t)); memset_s(&session_master_key, sizeof(sgx_aes_gcm_128bit_key_t), 0, sizeof(sgx_aes_gcm_128bit_key_t)); - + // scrub first 3KB of user data and the gmac_key memset_s(&encrypted_part_plain, sizeof(meta_data_encrypted_t), 0, sizeof(meta_data_encrypted_t)); @@ -616,7 +646,7 @@ bool protected_fs_file::pre_close(sgx_key_128bit_t* key, bool import) status = u_sgxprotectedfs_fclose(&result32, file); if (status != SGX_SUCCESS || result32 != 0) { - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : SGX_ERROR_FILE_CLOSE_FAILED; retval = false; } @@ -624,7 +654,7 @@ bool protected_fs_file::pre_close(sgx_key_128bit_t* key, bool import) file = NULL; } - if (file_status == SGX_FILE_STATUS_OK && + if (file_status == SGX_FILE_STATUS_OK && last_error == SGX_SUCCESS) // else...maybe something bad happened and the recovery file will be needed erase_recovery_file(); diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp index ea708687d..0cc6da7a2 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp @@ -81,7 +81,7 @@ int32_t protected_fs_file::remove(const char* filename) if (real_file_size == 0 || real_file_size % NODE_SIZE != 0) break; // empty file or not an SGX protected FS file - + // might be an SGX protected FS file status = u_sgxprotectedfs_fread_node(&result32, file, 0, (uint8_t*)file_meta_data, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) @@ -89,22 +89,22 @@ int32_t protected_fs_file::remove(const char* filename) if (file_meta_data->plain_part.major_version != SGX_FILE_MAJOR_VERSION) break; - + sgx_aes_gcm_128bit_key_t zero_key_id = {0}; sgx_aes_gcm_128bit_key_t key = {0}; if (consttime_memequal(&file_meta_data->plain_part.key_id, &zero_key_id, sizeof(sgx_aes_gcm_128bit_key_t)) == 1) break; // shared file - no monotonic counter - + sgx_key_request_t key_request = {0}; key_request.key_name = SGX_KEYSELECT_SEAL; key_request.key_policy = SGX_KEYPOLICY_MRENCLAVE; memcpy(&key_request.key_id, &file_meta_data->plain_part.key_id, sizeof(sgx_key_id_t)); - + status = sgx_get_key(&key_request, &key); if (status != SGX_SUCCESS) - break; + break; - status = sgx_rijndael128GCM_decrypt(&key, + status = sgx_rijndael128GCM_decrypt(&key, file_meta_data->encrypted_part, sizeof(meta_data_encrypted_blob_t), (uint8_t*)encrypted_part_plain, file_meta_data->plain_part.meta_data_iv, SGX_AESGCM_IV_SIZE, @@ -136,14 +136,14 @@ int32_t protected_fs_file::remove(const char* filename) delete encrypted_part_plain; } - if (file != NULL) + if (file != NULL) u_sgxprotectedfs_fclose(&result32, file); */ - + // do the actual file removal status = u_sgxprotectedfs_remove(&result32, filename); - if (status != SGX_SUCCESS) + if (status != SGX_SUCCESS) { errno = status; return 1; @@ -232,7 +232,7 @@ int protected_fs_file::seek(int64_t new_offset, int origin) } break; - default: + default: break; } @@ -309,7 +309,7 @@ void protected_fs_file::clear_error() } } - if ((file_status == SGX_FILE_STATUS_MC_NOT_INCREMENTED) && + if ((file_status == SGX_FILE_STATUS_MC_NOT_INCREMENTED) && (encrypted_part_plain.mc_value <= (UINT_MAX-2))) { uint32_t mc_value; @@ -325,7 +325,7 @@ void protected_fs_file::clear_error() } } */ - + if (file_status == SGX_FILE_STATUS_OK) { last_error = SGX_SUCCESS; @@ -364,13 +364,13 @@ int32_t protected_fs_file::clear_cache() assert(data != NULL); assert(((file_data_node_t*)data)->need_writing == false); // need_writing is in the same offset in both node types - // for production - + // for production - if (data == NULL || ((file_data_node_t*)data)->need_writing == true) { sgx_thread_mutex_unlock(&mutex); return 1; } - + cache.remove_last(); // before deleting the memory, need to scrub the plain secrets @@ -393,3 +393,7 @@ int32_t protected_fs_file::clear_cache() return 0; } +int32_t protected_fs_file::get_root_mac(sgx_aes_gcm_128bit_tag_t* mac) { + memcpy(mac, file_meta_data.plain_part.meta_data_gmac, sizeof(*mac)); + return 0; +} diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp index 8d0c5f18c..5c2d6cfc5 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp @@ -110,7 +110,7 @@ size_t protected_fs_file::write(const void* ptr, size_t size, size_t count) data_to_write += empty_place_left_in_md; data_left_to_write -= empty_place_left_in_md; } - + if (offset > encrypted_part_plain.size) encrypted_part_plain.size = offset; // file grew, update the new file size @@ -126,7 +126,7 @@ size_t protected_fs_file::write(const void* ptr, size_t size, size_t count) size_t offset_in_node = (size_t)((offset - MD_USER_DATA_SIZE) % NODE_SIZE); size_t empty_place_left_in_node = NODE_SIZE - offset_in_node; - + if (data_left_to_write <= empty_place_left_in_node) { // this will be the last write memcpy(&file_data_node->plain.data[offset_in_node], data_to_write, data_left_to_write); @@ -272,7 +272,7 @@ size_t protected_fs_file::read(void* ptr, size_t size, size_t count) size_t offset_in_node = (offset - MD_USER_DATA_SIZE) % NODE_SIZE; size_t data_left_in_node = NODE_SIZE - offset_in_node; - + if (data_left_to_read <= data_left_in_node) { memcpy(out_buffer, &file_data_node->plain.data[offset_in_node], data_left_to_read); @@ -304,8 +304,8 @@ size_t protected_fs_file::read(void* ptr, size_t size, size_t count) } -// this is a very 'specific' function, tied to the architecture of the file layout, returning the node numbers according to the offset in the file -void get_node_numbers(uint64_t offset, uint64_t* mht_node_number, uint64_t* data_node_number, +// this is a very 'specific' function, tied to the architecture of the file layout, returning the node numbers according to the offset in the file +void get_node_numbers(uint64_t offset, uint64_t* mht_node_number, uint64_t* data_node_number, uint64_t* physical_mht_node_number, uint64_t* physical_data_node_number) { // node 0 - meta data node @@ -348,7 +348,7 @@ file_data_node_t* protected_fs_file::get_data_node() return NULL; } - if ((offset - MD_USER_DATA_SIZE) % NODE_SIZE == 0 && + if ((offset - MD_USER_DATA_SIZE) % NODE_SIZE == 0 && offset == encrypted_part_plain.size) {// new node file_data_node = append_data_node(); @@ -374,7 +374,7 @@ file_data_node_t* protected_fs_file::get_data_node() { void* data = cache.get_last(); assert(data != NULL); - // for production - + // for production - if (data == NULL) { last_error = SGX_ERROR_UNEXPECTED; @@ -409,7 +409,7 @@ file_data_node_t* protected_fs_file::get_data_node() } } } - + return file_data_node; } @@ -461,7 +461,7 @@ file_data_node_t* protected_fs_file::read_data_node() file_data_node_t* file_data_node = (file_data_node_t*)cache.get(physical_node_number); if (file_data_node != NULL) return file_data_node; - + // need to read the data node from the disk file_mht_node = get_mht_node(); @@ -481,20 +481,32 @@ file_data_node_t* protected_fs_file::read_data_node() file_data_node->data_node_number = data_node_number; file_data_node->physical_node_number = physical_node_number; file_data_node->parent = file_mht_node; - + status = u_sgxprotectedfs_fread_node(&result32, file, file_data_node->physical_node_number, file_data_node->encrypted.cipher, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { delete file_data_node; - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return NULL; } gcm_crypto_data_t* gcm_crypto_data = &file_data_node->parent->plain.data_nodes_crypto[file_data_node->data_node_number % ATTACHED_DATA_NODES_COUNT]; - // this function decrypt the data _and_ checks the integrity of the data against the gmac - status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, file_data_node->encrypted.cipher, NODE_SIZE, file_data_node->plain.data, empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + if(!integrity_only) { + // this function decrypt the data _and_ checks the integrity of the data against the gmac + status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, + file_data_node->encrypted.cipher, + NODE_SIZE, file_data_node->plain.data, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + } + else { + status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, + NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, + file_data_node->encrypted.cipher, NODE_SIZE, &gcm_crypto_data->gmac); + memcpy(file_data_node->plain.data, file_data_node->encrypted.cipher, NODE_SIZE); + } if (status != SGX_SUCCESS) { delete file_data_node; @@ -505,7 +517,7 @@ file_data_node_t* protected_fs_file::read_data_node() } return NULL; } - + if (cache.add(file_data_node->physical_node_number, file_data_node) == false) { memset_s(&file_data_node->plain, sizeof(data_node_t), 0, sizeof(data_node_t)); // scrub the plaintext data @@ -536,7 +548,7 @@ file_mht_node_t* protected_fs_file::get_mht_node() return &root_mht; // file is constructed from 128*4KB = 512KB per MHT node. - if ((offset - MD_USER_DATA_SIZE) % (ATTACHED_DATA_NODES_COUNT * NODE_SIZE) == 0 && + if ((offset - MD_USER_DATA_SIZE) % (ATTACHED_DATA_NODES_COUNT * NODE_SIZE) == 0 && offset == encrypted_part_plain.size) { file_mht_node = append_mht_node(mht_node_number); @@ -582,7 +594,7 @@ file_mht_node_t* protected_fs_file::append_mht_node(uint64_t mht_node_number) last_error = ENOMEM; return NULL; } - + return new_file_mht_node; } @@ -619,20 +631,32 @@ file_mht_node_t* protected_fs_file::read_mht_node(uint64_t mht_node_number) file_mht_node->mht_node_number = mht_node_number; file_mht_node->physical_node_number = physical_node_number; file_mht_node->parent = parent_file_mht_node; - + status = u_sgxprotectedfs_fread_node(&result32, file, file_mht_node->physical_node_number, file_mht_node->encrypted.cipher, NODE_SIZE); if (status != SGX_SUCCESS || result32 != 0) { delete file_mht_node; - last_error = (status != SGX_SUCCESS) ? status : + last_error = (status != SGX_SUCCESS) ? status : (result32 != -1) ? result32 : EIO; return NULL; } - + gcm_crypto_data_t* gcm_crypto_data = &file_mht_node->parent->plain.mht_nodes_crypto[(file_mht_node->mht_node_number - 1) % CHILD_MHT_NODES_COUNT]; - // this function decrypt the data _and_ checks the integrity of the data against the gmac - status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, file_mht_node->encrypted.cipher, NODE_SIZE, (uint8_t*)&file_mht_node->plain, empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + if(!integrity_only){ + // this function decrypt the data _and_ checks the integrity of the data against the gmac + status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, + file_mht_node->encrypted.cipher, + NODE_SIZE, (uint8_t*)&file_mht_node->plain, + empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac); + } + else { + status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, + NULL, 0, NULL, + empty_iv, SGX_AESGCM_IV_SIZE, + file_mht_node->encrypted.cipher, NODE_SIZE, &gcm_crypto_data->gmac); + memcpy((uint8_t*)&file_mht_node->plain, file_mht_node->encrypted.cipher, NODE_SIZE); + } if (status != SGX_SUCCESS) { delete file_mht_node; diff --git a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h index f02b208f1..8c44089e1 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h +++ b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h @@ -143,22 +143,22 @@ class protected_fs_file }; meta_data_encrypted_t encrypted_part_plain; // encrypted part of meta data node, decrypted - + file_mht_node_t root_mht; // the root of the mht is always needed (for files bigger than 3KB) FILE* file; // OS's FILE pointer - + open_mode_t open_mode; uint8_t read_only; int64_t offset; // current file position (user's view) bool end_of_file; // flag int64_t real_file_size; - + bool integrity_only; // If true, no encryption, only MAC. Default: false. bool need_writing; // flag uint32_t last_error; // last operation error protected_fs_status_e file_status; - + sgx_thread_mutex_t mutex; uint8_t use_user_kdk_key; @@ -167,7 +167,7 @@ class protected_fs_file sgx_aes_gcm_128bit_key_t cur_key; sgx_aes_gcm_128bit_key_t session_master_key; uint32_t master_key_count; - + char recovery_filename[RECOVERY_FILE_MAX_LEN]; // might include full path to the file lru_cache cache; @@ -182,15 +182,15 @@ class protected_fs_file bool file_recovery(const char* filename); bool init_existing_file(const char* filename, const char* clean_filename, const sgx_aes_gcm_128bit_key_t* import_key); bool init_new_file(const char* clean_filename); - + bool generate_secure_blob(sgx_aes_gcm_128bit_key_t* key, const char* label, uint64_t physical_node_number, sgx_aes_gcm_128bit_tag_t* output); bool generate_secure_blob_from_user_kdk(bool restore); bool init_session_master_key(); bool derive_random_node_key(uint64_t physical_node_number); bool generate_random_meta_data_key(); bool restore_current_meta_data_key(const sgx_aes_gcm_128bit_key_t* import_key); - - + + file_data_node_t* get_data_node(); file_data_node_t* read_data_node(); file_data_node_t* append_data_node(); @@ -207,7 +207,7 @@ class protected_fs_file bool internal_flush(/*bool mc,*/ bool flush_to_disk); public: - protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key); + protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool integrity_only); ~protected_fs_file(); size_t write(const void* ptr, size_t size, size_t count); @@ -220,6 +220,7 @@ class protected_fs_file int32_t clear_cache(); bool flush(/*bool mc*/); bool pre_close(sgx_key_128bit_t* key, bool import); + int32_t get_root_mac(sgx_aes_gcm_128bit_tag_t* root_mac); static int32_t remove(const char* filename); }; diff --git a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h index e360cca7d..8c9abdec7 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h +++ b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h @@ -66,8 +66,9 @@ typedef struct _meta_data_plain sgx_attributes_t attribute_mask; sgx_aes_gcm_128bit_tag_t meta_data_gmac; - + uint8_t update_flag; + uint8_t integrity_only; } meta_data_plain_t; // these are all defined as relative to node size, so we can decrease node size in tests and have deeper tree @@ -83,7 +84,7 @@ typedef struct _meta_data_encrypted { char clean_filename[FILENAME_MAX_LEN]; int64_t size; - + sgx_mc_uuid_t mc_uuid; // not used uint32_t mc_value; // not used diff --git a/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp b/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp index fe8634acc..2784a0d5c 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp @@ -35,8 +35,7 @@ #include - -static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, const sgx_key_128bit_t *auto_key, const sgx_key_128bit_t *kdk_key) +static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, const sgx_key_128bit_t *auto_key, const sgx_key_128bit_t *kdk_key, bool integrity_only) { protected_fs_file* file = NULL; @@ -47,7 +46,7 @@ static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, cons } try { - file = new protected_fs_file(filename, mode, auto_key, kdk_key); + file = new protected_fs_file(filename, mode, auto_key, kdk_key, integrity_only); } catch (std::bad_alloc& e) { (void)e; // remove warning @@ -68,13 +67,18 @@ static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, cons SGX_FILE* sgx_fopen_auto_key(const char* filename, const char* mode) { - return sgx_fopen_internal(filename, mode, NULL, NULL); + return sgx_fopen_internal(filename, mode, NULL, NULL, false); } +SGX_FILE* sgx_fopen_integrity_only(const char* filename, const char* mode) +{ + sgx_key_128bit_t empty_key = {0}; + return sgx_fopen_internal(filename, mode, NULL, &empty_key, true); +} SGX_FILE* sgx_fopen(const char* filename, const char* mode, const sgx_key_128bit_t *key) { - return sgx_fopen_internal(filename, mode, NULL, key); + return sgx_fopen_internal(filename, mode, NULL, key, false); } @@ -172,7 +176,7 @@ int32_t sgx_feof(SGX_FILE* stream) { if (stream == NULL) return -1; - + protected_fs_file* file = (protected_fs_file*)stream; return ((file->get_eof() == true) ? 1 : 0); @@ -222,7 +226,7 @@ int32_t sgx_remove(const char* filename) int32_t sgx_fexport_auto_key(const char* filename, sgx_key_128bit_t *key) { - SGX_FILE* stream = sgx_fopen_internal(filename, "r", NULL, NULL); + SGX_FILE* stream = sgx_fopen_internal(filename, "r", NULL, NULL, false); if (stream == NULL) return 1; @@ -232,7 +236,7 @@ int32_t sgx_fexport_auto_key(const char* filename, sgx_key_128bit_t *key) int32_t sgx_fimport_auto_key(const char* filename, const sgx_key_128bit_t *key) { - SGX_FILE* stream = sgx_fopen_internal(filename, "r+", key, NULL); + SGX_FILE* stream = sgx_fopen_internal(filename, "r+", key, NULL, false); if (stream == NULL) return 1; @@ -251,4 +255,14 @@ int32_t sgx_fclear_cache(SGX_FILE* stream) } +int32_t SGXAPI sgx_fget_mac(SGX_FILE* stream, sgx_aes_gcm_128bit_tag_t* mac) +{ + if (stream == NULL) + return 1; + protected_fs_file* file = (protected_fs_file*)stream; + if (file->flush() == false) + return 1; + + return file->get_root_mac(mac); +} diff --git a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp index d38cb46a7..7e451cc27 100644 --- a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp +++ b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp @@ -58,7 +58,7 @@ void* u_sgxprotectedfs_exclusive_file_open(const char* filename, uint8_t read_on int fd = -1; mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; struct stat stat_st; - + memset(&stat_st, 0, sizeof(struct stat)); if (filename == NULL || strnlen(filename, 1) == 0) @@ -99,7 +99,7 @@ void* u_sgxprotectedfs_exclusive_file_open(const char* filename, uint8_t read_on assert(result == 0); return NULL; } - + // convert the file handle to standard 'C' API file pointer f = fdopen(fd, read_only ? "rb" : "r+b"); if (f == NULL) @@ -122,7 +122,7 @@ void* u_sgxprotectedfs_exclusive_file_open(const char* filename, uint8_t read_on uint8_t u_sgxprotectedfs_check_if_file_exists(const char* filename) { struct stat stat_st; - + memset(&stat_st, 0, sizeof(struct stat)); if (filename == NULL || strnlen(filename, 1) == 0) @@ -130,8 +130,8 @@ uint8_t u_sgxprotectedfs_check_if_file_exists(const char* filename) DEBUG_PRINT("filename is NULL or empty\n"); return 1; } - - return (stat(filename, &stat_st) == 0); + + return (stat(filename, &stat_st) == 0); } @@ -247,7 +247,7 @@ int32_t u_sgxprotectedfs_fclose(void* f) DEBUG_PRINT("fileno returned -1\n"); else flock(fd, LOCK_UN); - + if ((result = fclose(file)) != 0) { if (errno != 0) @@ -274,13 +274,13 @@ uint8_t u_sgxprotectedfs_fflush(void* f) DEBUG_PRINT("file is NULL\n"); return 1; } - + if ((result = fflush(file)) != 0) { DEBUG_PRINT("fflush returned %d\n", result); return 1; } - + return 0; } @@ -302,7 +302,7 @@ int32_t u_sgxprotectedfs_remove(const char* filename) return errno; return -1; } - + return 0; } @@ -317,7 +317,7 @@ void* u_sgxprotectedfs_recovery_file_open(const char* filename) DEBUG_PRINT("recovery filename is NULL or empty\n"); return NULL; } - + for (int i = 0; i < MAX_FOPEN_RETRIES; i++) { f = fopen(filename, "wb"); @@ -330,7 +330,7 @@ void* u_sgxprotectedfs_recovery_file_open(const char* filename) DEBUG_PRINT("fopen (%s) returned NULL\n", filename); return NULL; } - + return f; } @@ -344,7 +344,7 @@ uint8_t u_sgxprotectedfs_fwrite_recovery_node(void* f, uint8_t* data, uint32_t d DEBUG_PRINT("file is NULL\n"); return 1; } - + // recovery nodes are written sequentially size_t count = fwrite(data, 1, data_length, file); if (count != data_length) @@ -371,7 +371,7 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco uint8_t* recovery_node = NULL; uint32_t i = 0; - do + do { if (filename == NULL || strnlen(filename, 1) == 0) { @@ -384,7 +384,7 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco DEBUG_PRINT("recovery filename is NULL or empty\n"); return (int32_t)NULL; } - + recovery_file = fopen(recovery_filename, "rb"); if (recovery_file == NULL) { @@ -402,7 +402,7 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco } file_size = ftello(recovery_file); - + if ((result = fseeko(recovery_file, 0, SEEK_SET)) != 0) { DEBUG_PRINT("fseeko returned %d\n", result); @@ -445,7 +445,7 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco err = ferror(recovery_file); if (err != 0) ret = err; - else if (errno != 0) + else if (errno != 0) ret = errno; break; } @@ -466,7 +466,7 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco err = ferror(source_file); if (err != 0) ret = err; - else if (errno != 0) + else if (errno != 0) ret = errno; break; } @@ -503,6 +503,6 @@ int32_t u_sgxprotectedfs_do_file_recovery(const char* filename, const char* reco if (ret == 0) remove(recovery_filename); - + return ret; } From 10b7fb3d7f993b82be894e14358372a326511f5a Mon Sep 17 00:00:00 2001 From: LI Qing Date: Mon, 13 Jan 2020 05:54:09 +0000 Subject: [PATCH 59/96] Refactor scripts that compiles and installs SGX SDK --- compile.sh | 4 ---- install.sh => compile_and_install.sh | 17 +++++++++++++++-- 2 files changed, 15 insertions(+), 6 deletions(-) delete mode 100755 compile.sh rename install.sh => compile_and_install.sh (56%) diff --git a/compile.sh b/compile.sh deleted file mode 100755 index bae5dbaea..000000000 --- a/compile.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -make clean -make -make sdk_install_pkg psw_install_pkg diff --git a/install.sh b/compile_and_install.sh similarity index 56% rename from install.sh rename to compile_and_install.sh index aec4cf1ef..30eaa764e 100755 --- a/install.sh +++ b/compile_and_install.sh @@ -3,9 +3,22 @@ pushd `dirname $0` > /dev/null SCRIPT_PATH=`pwd` popd > /dev/null -sudo /opt/intel/sgxpsw/uninstall.sh +# Uninstall and clean up sudo /opt/intel/sgxsdk/uninstall.sh +sudo /opt/intel/sgxpsw/uninstall.sh +make clean + +# Compile SDK and install +make sdk +make sdk_install_pkg sudo mkdir -p /opt/intel cd /opt/intel -sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin + +# Compile PSW and install +# Note that the compilation of PSW requires the installation of SDK. +cd ${SCRIPT_PATH} +make psw +make psw_install_pkg +cd /opt/intel +sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin From 0a88815b39e0734e3cf796de51357d3870de50f0 Mon Sep 17 00:00:00 2001 From: He Sun Date: Wed, 12 Feb 2020 11:49:28 +0800 Subject: [PATCH 60/96] Add --no-start-aesm parameter to psw installation We start AESM by invoking /opt/intel/sgxpsw/aesm/aesm_service rather than using systemctl/initctl. --- compile_and_install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compile_and_install.sh b/compile_and_install.sh index 30eaa764e..f6952591c 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -21,4 +21,4 @@ cd ${SCRIPT_PATH} make psw make psw_install_pkg cd /opt/intel -sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin +sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin --no-start-aesm From a18698f70279f58e18fc4c29fc8abf4af57a51cb Mon Sep 17 00:00:00 2001 From: LI Qing Date: Tue, 4 Feb 2020 11:01:50 +0000 Subject: [PATCH 61/96] Add sgx-gdb support for apps running on Occlum --- .../linux/gdb-sgx-plugin/gdb_sgx_plugin.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py index be120d16c..d71adbc8a 100755 --- a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py +++ b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py @@ -56,6 +56,7 @@ ENCLAVE_INFO_SIZE = 8 * 7 + 2 * 4 INFO_FMT = 'QQQIIQQQQ' ENCLAVES_ADDR = {} +OCCLUM_GDB = 0 # The following definitions should strictly align with the struct of # tcs_t @@ -173,6 +174,8 @@ def init_enclave_debug(self): gdb.execute(gdb_cmd, False, True) global ENCLAVES_ADDR ENCLAVES_ADDR[self.start_addr] = gdb_cmd.split()[2] + if OCCLUM_GDB == 1: + GetOcclumElfBreakpoint() return 0 def get_peak_heap_used(self): @@ -686,6 +689,62 @@ def stop(self): gdb.execute(gdb_cmd, False, True) return False +class GetMuslLoadLibraryReturnBreakpoint(gdb.FinishBreakpoint): + def __init__(self): + gdb.FinishBreakpoint.__init__ (self, gdb.newest_frame(), internal=1) + self.silent = True + + def stop(self): + dso_addr_ = gdb.parse_and_eval("$rax") + dso_addr = ctypes.c_uint64(dso_addr_).value + string = read_from_memory(dso_addr, 16) + elf_start_addr, elf_name_addr = struct.unpack('QQ', string) + # Assume the file name length is less than 512 + string = read_from_memory(elf_name_addr, 512) + elf_name = "image" + for i in range(512): + if string[i] != struct.pack("B", 0): + elf_name += string[i].decode('ascii') + else: + break + gdb_cmd = load_symbol_cmd.GetLoadSymbolCommand(elf_name, str(elf_start_addr)) + if gdb_cmd == -1: + return 0 + gdb.execute(gdb_cmd, False, True) + return False + +class GetMuslLoadLibraryBreakpoint(gdb.Breakpoint): + def __init__(self): + gdb.Breakpoint.__init__ (self, spec="load_library", internal=1) + + def stop(self): + GetMuslLoadLibraryReturnBreakpoint() + return False + +class GetOcclumElfBreakpoint(gdb.Breakpoint): + def __init__(self): + gdb.Breakpoint.__init__ (self, spec="occlum_gdb_hook_load_elf", internal=1) + + def stop(self): + addr_ = gdb.parse_and_eval("$rdi") + addr = ctypes.c_uint64(addr_).value + file_name_ = gdb.parse_and_eval("$rsi") + file_name = ctypes.c_uint64(file_name_).value + file_name_len_ = gdb.parse_and_eval("$rdx") + file_name_len = ctypes.c_uint64(file_name_len_).value + file_string = read_from_memory(file_name, file_name_len) + file_path = "image" + \ + struct.unpack('{length}s'.format(length=file_name_len), \ + file_string[0:file_name_len])[0].decode("ascii") + gdb_cmd = load_symbol_cmd.GetLoadSymbolCommand(file_path, str(addr)) + if gdb_cmd == -1: + return 0 + print (gdb_cmd) + gdb.execute(gdb_cmd, False, True) + GetMuslLoadLibraryBreakpoint() + return False + + def sgx_debugger_init(): print ("detect urts is loaded, initializing") global SIZE @@ -697,6 +756,9 @@ def sgx_debugger_init(): if bp.location == "sgx_debug_load_state_add_element": inited = 1 break + if os.getenv("OCCLUM_GDB") == "1": + global OCCLUM_GDB + OCCLUM_GDB = 1 if inited == 0: detach_enclaves() gdb.execute("source gdb_sgx_cmd", False, True) From 2c68a3e097b0dd06be3fe8757f25680d5b02d4c8 Mon Sep 17 00:00:00 2001 From: "Zongmin.Gu" Date: Fri, 6 Mar 2020 15:12:25 +0800 Subject: [PATCH 62/96] Support handling exceptions in Occlum's user space --- sdk/trts/trts_veh.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index 6d9f6b19c..d53a6772b 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -339,8 +339,10 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) goto default_handler; } - if ((TD2TCS(thread_data) != tcs) - || (((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) { + // This check conflict with occlum design. Since it is defence in depth, just remove it + //if ((TD2TCS(thread_data) != tcs) + // || (((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) { + if ((((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) { goto default_handler; } From f359f3c727e732312f6f6d1186f5871036c780d2 Mon Sep 17 00:00:00 2001 From: LI Qing Date: Sun, 15 Mar 2020 11:47:14 +0000 Subject: [PATCH 63/96] Add sgx_thread_wait_untrusted_event_timeout_ocall API --- common/inc/internal/se_event.h | 3 +++ common/inc/sgx_tstdc.edl | 3 +++ common/src/se_event.c | 17 +++++++++++++++++ psw/urts/enclave_mutex.cpp | 16 ++++++++++++++++ psw/urts/linux/urts.lds | 1 + psw/urts/linux/urts_internal.lds | 1 + sdk/simulation/urtssim/urts_deploy.c | 1 + 7 files changed, 42 insertions(+) diff --git a/common/inc/internal/se_event.h b/common/inc/internal/se_event.h index 99fb4de8a..80a6149cd 100644 --- a/common/inc/internal/se_event.h +++ b/common/inc/internal/se_event.h @@ -34,6 +34,8 @@ # include # include +# include +# include # include typedef void * se_handle_t; @@ -53,6 +55,7 @@ se_handle_t SGXAPI se_event_init(void); void SGXAPI se_event_destroy(se_handle_t); int SGXAPI se_event_wait(se_handle_t); +int SGXAPI se_event_timeout_wait(se_handle_t, const struct timespec *, int *); int SGXAPI se_event_wait_timeout(se_handle_t se_event, uint64_t timeout); int SGXAPI se_event_wake(se_handle_t); diff --git a/common/inc/sgx_tstdc.edl b/common/inc/sgx_tstdc.edl index 7e5769132..fc503f2e9 100644 --- a/common/inc/sgx_tstdc.edl +++ b/common/inc/sgx_tstdc.edl @@ -36,6 +36,9 @@ enclave { /* Go outside and wait on my untrusted event */ [cdecl] int sgx_thread_wait_untrusted_event_ocall([user_check] const void *self); + /* Go outside and wait on my untrusted event with timeout */ + [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, int64_t sec, int64_t nsec, [out] int* err); + /* Wake a thread waiting on its untrusted event */ [cdecl] int sgx_thread_set_untrusted_event_ocall([user_check] const void *waiter); diff --git a/common/src/se_event.c b/common/src/se_event.c index 71b51a721..9b71810c3 100644 --- a/common/src/se_event.c +++ b/common/src/se_event.c @@ -57,6 +57,23 @@ int se_event_wait(se_handle_t se_event) return SE_MUTEX_SUCCESS; } +int se_event_timeout_wait(se_handle_t se_event, const struct timespec *ts, int *err) +{ + int ret = 0; + + if (se_event == NULL || err == NULL) + return SE_MUTEX_INVALID; + + if (__sync_fetch_and_add((int*)se_event, -1) == 0) { + ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); + __sync_val_compare_and_swap((int*)se_event, -1, 0); + } + *err = ret < 0 ? errno : 0; + + return SE_MUTEX_SUCCESS; +} + + /* * timeout: Second */ diff --git a/psw/urts/enclave_mutex.cpp b/psw/urts/enclave_mutex.cpp index 49abe95b9..da24e5a94 100644 --- a/psw/urts/enclave_mutex.cpp +++ b/psw/urts/enclave_mutex.cpp @@ -52,6 +52,22 @@ extern "C" int sgx_thread_wait_untrusted_event_ocall(const void *self) return SGX_SUCCESS; } +extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, int64_t sec, int64_t nsec, int *err) +{ + if (self == NULL) + return SGX_ERROR_INVALID_PARAMETER; + + se_handle_t hevent = CEnclavePool::instance()->get_event(self); + if (hevent == NULL) + return SE_ERROR_MUTEX_GET_EVENT; + + struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; + if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, &ts, err)) + return SE_ERROR_MUTEX_WAIT_EVENT; + + return SGX_SUCCESS; +} + /* set untrusted event */ extern "C" int sgx_thread_set_untrusted_event_ocall(const void *waiter) { diff --git a/psw/urts/linux/urts.lds b/psw/urts/linux/urts.lds index 44897f21e..fcee9c930 100644 --- a/psw/urts/linux/urts.lds +++ b/psw/urts/linux/urts.lds @@ -6,6 +6,7 @@ sgx_ecall; sgx_ecall_switchless; sgx_thread_wait_untrusted_event_ocall; + sgx_thread_wait_untrusted_event_timeout_ocall; sgx_thread_set_untrusted_event_ocall; sgx_thread_setwait_untrusted_events_ocall; sgx_thread_set_multiple_untrusted_events_ocall; diff --git a/psw/urts/linux/urts_internal.lds b/psw/urts/linux/urts_internal.lds index b2a0f716b..f91338a8b 100644 --- a/psw/urts/linux/urts_internal.lds +++ b/psw/urts/linux/urts_internal.lds @@ -6,6 +6,7 @@ sgx_ecall; sgx_ecall_switchless; sgx_thread_wait_untrusted_event_ocall; + sgx_thread_wait_untrusted_event_timeout_ocall; sgx_thread_set_untrusted_event_ocall; sgx_thread_setwait_untrusted_events_ocall; sgx_thread_set_multiple_untrusted_events_ocall; diff --git a/sdk/simulation/urtssim/urts_deploy.c b/sdk/simulation/urtssim/urts_deploy.c index ac5f6223b..b9aa79fd4 100644 --- a/sdk/simulation/urtssim/urts_deploy.c +++ b/sdk/simulation/urtssim/urts_deploy.c @@ -70,6 +70,7 @@ void sgx_thread_set_multiple_untrusted_events_ocall(){}; void sgx_thread_set_untrusted_event_ocall(){}; void sgx_thread_setwait_untrusted_events_ocall(){}; void sgx_thread_wait_untrusted_event_ocall(){}; +void sgx_thread_wait_untrusted_event_timeout_ocall(){}; sgx_status_t pthread_create_ocall() { From 2c7dcb060e50249f121c124a93840325ea18ab05 Mon Sep 17 00:00:00 2001 From: Zongmin Date: Wed, 29 Apr 2020 17:28:51 +0800 Subject: [PATCH 64/96] Support user code manage stack Go, Java, JIT code may switch the stack. SDK should support it in exception handler. --- sdk/trts/init_enclave.cpp | 6 +-- sdk/trts/linux/elf_parser.c | 32 ++++++++++++++++ sdk/trts/linux/elf_parser.h | 5 +++ sdk/trts/trts_veh.cpp | 75 ++++++++++++++++++++++++++++--------- 4 files changed, 97 insertions(+), 21 deletions(-) diff --git a/sdk/trts/init_enclave.cpp b/sdk/trts/init_enclave.cpp index 141cb6cbc..7c7f4bd21 100644 --- a/sdk/trts/init_enclave.cpp +++ b/sdk/trts/init_enclave.cpp @@ -79,6 +79,8 @@ extern "C" int init_enclave(void *enclave_base, void *ms) __attribute__((section extern "C" int rsrv_mem_init(void *_rsrv_mem_base, size_t _rsrv_mem_size, size_t _rsrv_mem_min_size); extern "C" int init_rts_emas(size_t rts_base, layout_t *start, layout_t *end); extern "C" int sgx_mm_init(size_t, size_t); +extern uintptr_t enclave_code_start_address; +extern size_t enclave_code_size; // init_enclave() // Initialize enclave. // Parameters: @@ -222,9 +224,7 @@ extern "C" int init_enclave(void *enclave_base, void *ms) return -1; } - - - return 0; + return get_first_executable_segment_info(enclave_base, &enclave_code_start_address, &enclave_code_size); } extern size_t rsrv_mem_min_size; diff --git a/sdk/trts/linux/elf_parser.c b/sdk/trts/linux/elf_parser.c index 37d71ae88..e7c521381 100644 --- a/sdk/trts/linux/elf_parser.c +++ b/sdk/trts/linux/elf_parser.c @@ -576,4 +576,36 @@ int init_segment_emas(void* enclave_base) } return 0; } + +int get_first_executable_segment_info(const void *enclave_base, + uintptr_t *segment_start_addr, + size_t *segment_size) +{ + ElfW(Half) phnum = 0; + const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr) *)enclave_base; + ElfW(Phdr) *phdr = get_phdr(ehdr); + int ret = -1; + + if (!segment_start_addr || !segment_size) + return ret; + + *segment_start_addr = 0; + *segment_size = 0; + + if (phdr == NULL) + return ret; /* Invalid image. */ + + for (; phnum < ehdr->e_phnum; phnum++, phdr++) + { + if (phdr->p_type == PT_LOAD && phdr->p_flags | PF_X) + { + *segment_start_addr = (size_t)enclave_base + phdr->p_vaddr; + *segment_size = phdr->p_memsz; + break; + } + } + + return 0; +} + /* vim: set ts=4 sw=4 et cin: */ diff --git a/sdk/trts/linux/elf_parser.h b/sdk/trts/linux/elf_parser.h index ae66935e7..9199f6cb3 100644 --- a/sdk/trts/linux/elf_parser.h +++ b/sdk/trts/linux/elf_parser.h @@ -55,6 +55,11 @@ int elf_get_init_array(const void* enclave_base, int elf_get_uninit_array(const void* enclave_base, uintptr_t *uninit_array_addr, size_t *uninit_array_size); + +int get_first_executable_segment_info(const void *enclave_base, + uintptr_t *segment_start_addr, + size_t *segment_size); + #ifdef __cplusplus } #endif diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index d53a6772b..87783696f 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -181,6 +181,8 @@ int sgx_unregister_exception_handler(void *handler) return status; } +static bool is_standard_exception(uintptr_t); + // continue_execution(sgx_exception_info_t *info): // try to restore the thread context saved in info to current execution context. extern "C" __attribute__((regparm(1))) void continue_execution(sgx_exception_info_t *info); @@ -198,6 +200,7 @@ extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_except uintptr_t *nhead = NULL; uintptr_t *ntmp = NULL; uintptr_t xsp = 0; + bool standard_exception = true; if (thread_data->exception_flag < 0) goto failed_end; @@ -273,11 +276,13 @@ extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_except size -= sizeof(sgx_exception_handler_t); } + standard_exception = is_standard_exception(info->cpu_context.REG(ip)); + // call default handler // ignore invalid return value, treat to EXCEPTION_CONTINUE_SEARCH // check SP to be written on SSA is pointing to the trusted stack xsp = info->cpu_context.REG(sp); - if (!is_valid_sp(xsp)) + if (standard_exception && !is_valid_sp(xsp)) { goto failed_end; } @@ -324,6 +329,7 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) sgx_exception_info_t *info = NULL; uintptr_t sp_u, sp, *new_sp = NULL; size_t size = 0; + bool standard_exception = true; if ((thread_data == NULL) || (tcs == NULL)) goto default_handler; if (check_static_stack_canary(tcs) != 0) @@ -338,32 +344,45 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) if(thread_data->exception_flag == -1) { goto default_handler; } - - // This check conflict with occlum design. Since it is defence in depth, just remove it - //if ((TD2TCS(thread_data) != tcs) - // || (((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) { - if ((((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) { + + if (TD2TCS(thread_data) != tcs || (((thread_data->first_ssa_gpr) & (~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) + { goto default_handler; } // no need to check the result of ssa_gpr because thread_data is always trusted ssa_gpr = reinterpret_cast(thread_data->first_ssa_gpr); - // The unstrusted RSP should never point inside the enclave - sp_u = ssa_gpr->REG(sp_u); - if (!sgx_is_outside_enclave((void *)sp_u, sizeof(sp_u))) + // The point of differentiating the two types of exceptions is that when handling an exception, we must choose a stack that is suitable for the type of the exception. + // For standard exceptions, we can just use the stacks managed by SGX SDK; + // but, for non-standard exceptions, we cannot make any assumption about how the dynamically-loaded code uses stack--- it may choose an arbitrary memory region as its stack. + // Thus, when handling non-standard exceptions, we use a special, SDK-reserved memory region as the stack. + standard_exception = is_standard_exception(ssa_gpr->REG(ip)); + + if (!standard_exception) { - g_enclave_state = ENCLAVE_CRASHED; - return SGX_ERROR_STACK_OVERRUN; + // The bottom 2 pages are used as stack to handle the non-standard exceptions. + // User should take responsibility to confirm the stack is not corrupted. + sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*2; } - - // The untrusted and trusted RSPs cannot be the same, unless - // an exception happened before the enclave setup the trusted stack - sp = ssa_gpr->REG(sp); - if (sp_u == sp) + else { - g_enclave_state = ENCLAVE_CRASHED; - return SGX_ERROR_STACK_OVERRUN; + // The unstrusted RSP should never point inside the enclave + sp_u = ssa_gpr->REG(sp_u); + if (!sgx_is_outside_enclave((void *)sp_u, sizeof(sp_u))) + { + g_enclave_state = ENCLAVE_CRASHED; + return SGX_ERROR_STACK_OVERRUN; + } + + // The untrusted and trusted RSPs cannot be the same, unless + // an exception happened before the enclave setup the trusted stack + sp = ssa_gpr->REG(sp); + if (sp_u == sp) + { + g_enclave_state = ENCLAVE_CRASHED; + return SGX_ERROR_STACK_OVERRUN; + } } if(!is_stack_addr((void*)sp, 0)) // check stack overrun only, alignment will be checked after exception handled @@ -491,3 +510,23 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) g_enclave_state = ENCLAVE_CRASHED; return SGX_ERROR_ENCLAVE_CRASHED; } + +uintptr_t enclave_code_start_address = 0; +size_t enclave_code_size = 0; + +// Exceptions, according to their sources, can be categorized into two types: standard exceptions and non-standard exceptions. +// Standard exceptions are those triggered by the code of SGX SDK itself or the app code that statically linked to SGX SDK. +// Non-standard exceptions are those triggered by dynamically-loaded code. +static bool is_standard_exception(uintptr_t xip) +{ + assert(enclave_code_start_address != 0); + assert(enclave_code_size != 0); + + if (xip >= enclave_code_start_address && + xip < (enclave_code_start_address + enclave_code_size)) + { + return true; + } + + return false; +} From 572ac006a4401cd1ab227727d1db6851f3234051 Mon Sep 17 00:00:00 2001 From: LI Qing Date: Tue, 19 May 2020 07:10:14 +0000 Subject: [PATCH 65/96] Add no_mitigation option to compile sdk --- compile_and_install.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/compile_and_install.sh b/compile_and_install.sh index f6952591c..2ac5f5e9e 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -9,8 +9,13 @@ sudo /opt/intel/sgxpsw/uninstall.sh make clean # Compile SDK and install -make sdk -make sdk_install_pkg +if [ "no_mitigation" = "$1" ]; then + make sdk_no_mitigation + make sdk_install_pkg_no_mitigation +else + make sdk + make sdk_install_pkg +fi sudo mkdir -p /opt/intel cd /opt/intel yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin From c4c69fbb1a1eb6a969407a93acc65ed9d98b98f8 Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Wed, 29 Apr 2020 05:15:32 +0000 Subject: [PATCH 66/96] Add macro to get rid of rdrand for non-supported platforms --- common/inc/sgx_random_buffers.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/common/inc/sgx_random_buffers.h b/common/inc/sgx_random_buffers.h index 721de3d7f..0a78bae11 100644 --- a/common/inc/sgx_random_buffers.h +++ b/common/inc/sgx_random_buffers.h @@ -164,7 +164,12 @@ struct alignas(A)randomly_placed_buffer // used for objects without a constructor. T *randomize_object(std::size_t count = 1) { - return (T*)(reset(count).__bigger_ + ((rdrand() % M) & ~(A - 1))); +#if defined(MAXIMAL_CALLSTACK) + unsigned rand_size = M - 1; +#else + unsigned rand_size = rdrand() % M; +#endif + return (T*)(reset(count).__bigger_ + ((rand_size) & ~(A - 1))); } // instantiate_object() invokes T's constructor on the object returned by From a7520f13726d6732037e2cfab76cc2a6bb38e9fe Mon Sep 17 00:00:00 2001 From: Zongmin Date: Mon, 8 Jun 2020 18:56:08 +0800 Subject: [PATCH 67/96] Create uRTS static library --- common/se_wrapper/Makefile | 3 +- common/se_wrapper_psw/Makefile | 5 +- common/src/se_event.c | 17 ------- common/src/se_event_timeout_wait.c | 52 +++++++++++++++++++++ linux/installer/common/sdk/BOMs/sdk_x64.txt | 2 + psw/enclave_common/Makefile | 2 +- psw/uae_service/linux/Makefile | 3 +- psw/urts/linux/Makefile | 17 +++++-- sdk/Makefile | 1 + sdk/simulation/urtssim/linux/Makefile | 13 +++++- 10 files changed, 88 insertions(+), 27 deletions(-) create mode 100644 common/src/se_event_timeout_wait.c diff --git a/common/se_wrapper/Makefile b/common/se_wrapper/Makefile index d741a7b9c..92ff48abc 100644 --- a/common/se_wrapper/Makefile +++ b/common/se_wrapper/Makefile @@ -43,7 +43,8 @@ OBJS := se_memory.o \ se_event.o \ se_rwlock.o \ se_time.o \ - se_map.o + se_map.o \ + se_event_timeout_wait.o LIBWRAPPER := libwrapper.a diff --git a/common/se_wrapper_psw/Makefile b/common/se_wrapper_psw/Makefile index e776c2b25..92ff48abc 100644 --- a/common/se_wrapper_psw/Makefile +++ b/common/se_wrapper_psw/Makefile @@ -34,7 +34,7 @@ include ../../buildenv.mk CFLAGS += -Werror -D_GNU_SOURCE -fPIC CFLAGS += $(ADDED_INC) -CPPFLAGS := -I$(SGX_HEADER_DIR) \ +CPPFLAGS := -I$(COMMON_DIR)/inc \ -I$(COMMON_DIR)/inc/internal OBJS := se_memory.o \ @@ -43,7 +43,8 @@ OBJS := se_memory.o \ se_event.o \ se_rwlock.o \ se_time.o \ - se_map.o + se_map.o \ + se_event_timeout_wait.o LIBWRAPPER := libwrapper.a diff --git a/common/src/se_event.c b/common/src/se_event.c index 9b71810c3..71b51a721 100644 --- a/common/src/se_event.c +++ b/common/src/se_event.c @@ -57,23 +57,6 @@ int se_event_wait(se_handle_t se_event) return SE_MUTEX_SUCCESS; } -int se_event_timeout_wait(se_handle_t se_event, const struct timespec *ts, int *err) -{ - int ret = 0; - - if (se_event == NULL || err == NULL) - return SE_MUTEX_INVALID; - - if (__sync_fetch_and_add((int*)se_event, -1) == 0) { - ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); - __sync_val_compare_and_swap((int*)se_event, -1, 0); - } - *err = ret < 0 ? errno : 0; - - return SE_MUTEX_SUCCESS; -} - - /* * timeout: Second */ diff --git a/common/src/se_event_timeout_wait.c b/common/src/se_event_timeout_wait.c new file mode 100644 index 000000000..ff92f0613 --- /dev/null +++ b/common/src/se_event_timeout_wait.c @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2011-2020 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +#include "se_event.h" + +#include +#include + +int se_event_timeout_wait(se_handle_t se_event, const struct timespec *ts, int *err) +{ + int ret = 0; + + if (se_event == NULL || err == NULL) + return SE_MUTEX_INVALID; + + if (__sync_fetch_and_add((int*)se_event, -1) == 0) { + ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); + __sync_val_compare_and_swap((int*)se_event, -1, 0); + } + *err = ret < 0 ? errno : 0; + + return SE_MUTEX_SUCCESS; +} diff --git a/linux/installer/common/sdk/BOMs/sdk_x64.txt b/linux/installer/common/sdk/BOMs/sdk_x64.txt index d96b24c0b..fd77476cb 100644 --- a/linux/installer/common/sdk/BOMs/sdk_x64.txt +++ b/linux/installer/common/sdk/BOMs/sdk_x64.txt @@ -34,6 +34,8 @@ DeliveryName InstallName FileCheckSum FileFeature FileOwner /build/linux/libsgx_pclsim.a /package/lib64/libsgx_pclsim.a 0 main STP /build/linux/libsgx_urts_deploy.so /package/lib64/libsgx_urts.so 0 main STP /build/linux/libsgx_urts_sim.so /package/lib64/libsgx_urts_sim.so 0 main STP +/build/linux/libsgx_urts.a /package/lib64/libsgx_urts.a 0 main STP +/build/linux/libsgx_urts_sim.a /package/lib64/libsgx_urts_sim.a 0 main STP /build/linux/libc++_Changes_SGX.txt /package/lib64/libc++_Changes_SGX.txt 0 main STP /build/linux/sgx_config_cpusvn /package/bin/x64/sgx_config_cpusvn 0 main STP /build/linux/sgx_edger8r /package/bin/x64/sgx_edger8r 0 main STP diff --git a/psw/enclave_common/Makefile b/psw/enclave_common/Makefile index eed4c8a7b..583536796 100644 --- a/psw/enclave_common/Makefile +++ b/psw/enclave_common/Makefile @@ -44,7 +44,7 @@ CXXFLAGS += $(ADDED_INC) CFLAGS += -fPIC -Werror -g CFLAGS += $(ADDED_INC) -INC += -I$(SGX_HEADER_DIR) \ +INC += -I$(COMMON_DIR)/inc \ -I$(COMMON_DIR)/inc/internal \ -I$(COMMON_DIR)/inc/internal/linux \ -I$(LINUX_EXTERNAL_DIR)/sgx-emm/emm_src/include \ diff --git a/psw/uae_service/linux/Makefile b/psw/uae_service/linux/Makefile index c797b00a9..523c2b03f 100644 --- a/psw/uae_service/linux/Makefile +++ b/psw/uae_service/linux/Makefile @@ -52,8 +52,7 @@ INCLUDE += -I$(COMMON_DIR) \ INCLUDE += -I$(LINUX_PSW_DIR)/ae/common \ -I$(LINUX_PSW_DIR)/ae/inc \ - -I$(LINUX_PSW_DIR)/ae/inc/internal \ - -I$(SGX_HEADER_DIR) + -I$(LINUX_PSW_DIR)/ae/inc/internal INCLUDE += -I$(LINUX_EXTERNAL_DIR)/epid-sdk \ -I$(IPC_COMMON_INC_DIR) \ diff --git a/psw/urts/linux/Makefile b/psw/urts/linux/Makefile index f815e7faf..689fd4585 100644 --- a/psw/urts/linux/Makefile +++ b/psw/urts/linux/Makefile @@ -44,7 +44,7 @@ CFLAGS += $(ADDED_INC) VTUNE_DIR = $(LINUX_EXTERNAL_DIR)/vtune/linux -INC += -I$(SGX_HEADER_DIR) \ +INC += -I$(COMMON_DIR)/inc \ -I$(COMMON_DIR)/inc/internal \ -I$(COMMON_DIR)/inc/internal/linux \ -I$(LINUX_PSW_DIR)/enclave_common \ @@ -122,11 +122,13 @@ LIBSGX_ENCLAVE_COMMON := libsgx_enclave_common.a LIBURTS := libsgx_urts.so LIBURTS_INTERNAL := liburts_internal.so LIBURTS_DEBUG := libsgx_urts.so.debug +LIBURTS_STATIC := libsgx_urts.a .PHONY: all -all: $(LIBURTS) $(LIBURTS_INTERNAL) $(LIBURTS_DEBUG) | $(BUILD_DIR) +all: $(LIBURTS) $(LIBURTS_STATIC) $(LIBURTS_INTERNAL) $(LIBURTS_DEBUG) | $(BUILD_DIR) @$(CP) $(LIBURTS) $| + @$(CP) $(LIBURTS_STATIC) $| @$(CP) $(LIBURTS_INTERNAL) $| ifndef DEBUG @$(CP) $(LIBURTS_DEBUG) $| @@ -144,6 +146,15 @@ $(LIBURTS_INTERNAL): $(INTERNAL_OBJ) $(LIBWRAPPER) $(LIBSGX_ENCLAVE_COMMON) ittn $(LIBURTS): $(URTS_OBJ) $(LIBWRAPPER) $(LIBSGX_ENCLAVE_COMMON) ittnotify $(CXX) $(CXXFLAGS) -shared -Wl,-soname=$@ $(LIB) -o $@ $(URTS_OBJ) $(LDFLAGS) +$(LIBURTS_STATIC): $(LIBURTS) + @$(MKDIR) $(BUILD_DIR)/.sgx_static_urts + @$(RM) -f $(BUILD_DIR)/.sgx_static_urts/* + cd $(BUILD_DIR)/.sgx_static_urts && \ + $(AR) x $(COMMON_DIR)/se_wrapper/libwrapper.a && \ + $(AR) x $(VTUNE_DIR)/sdk/src/ittnotify/libittnotify.a && \ + $(RM) -f se_event.o + $(AR) rsD $@ $(URTS_OBJ) $(BUILD_DIR)/.sgx_static_urts/*.o + $(LIBURTS_DEBUG): $(LIBURTS) ifndef DEBUG $(CP) $(LIBURTS) $(LIBURTS).orig @@ -173,7 +184,7 @@ $(BUILD_DIR): .PHONY: clean clean:: - @$(RM) *.o $(LIBURTS) $(LIBURTS_INTERNAL) $(LIBURTS_DEBUG) + @$(RM) *.o $(LIBURTS) $(LIBURTS_INTERNAL) $(LIBURTS_DEBUG) $(LIBURTS_STATIC) @$(RM) $(BUILD_DIR)/$(LIBURTS) $(BUILD_DIR)/$(LIBURTS_INTERNAL) @$(RM) $(LIBURTS).orig $(BUILD_DIR)/$(LIBURTS_DEBUG) $(MAKE) -C $(COMMON_DIR)/se_wrapper_psw/ clean diff --git a/sdk/Makefile b/sdk/Makefile index 4c95dd3d7..fd3e90667 100644 --- a/sdk/Makefile +++ b/sdk/Makefile @@ -48,6 +48,7 @@ opt_check_failed: .PHONY: all all: $(CHECK_OPT) $(MAKE) components + $(MAKE) -C ../psw/urts/linux ifneq ($(MITIGATION-CVE-2020-0551),) $(RM) -r $(BUILD_DIR)$(MITIGATION-CVE-2020-0551) mv $(BUILD_DIR) $(BUILD_DIR)$(MITIGATION-CVE-2020-0551) diff --git a/sdk/simulation/urtssim/linux/Makefile b/sdk/simulation/urtssim/linux/Makefile index af6456f93..626351f58 100644 --- a/sdk/simulation/urtssim/linux/Makefile +++ b/sdk/simulation/urtssim/linux/Makefile @@ -121,6 +121,7 @@ vpath %.c .:$(DIR6) LDFLAGS += $(COMMON_LDFLAGS) -Wl,--version-script=$(LINUX_PSW_DIR)/urts/linux/urts.lds LIBURTSSIM_SHARED := libsgx_urts_sim.so +LIBURTSSIM_STATIC := libsgx_urts_sim.a LIBURTSSIM_DEBUG := libsgx_urts_sim.so.debug LIBURTS_DEPLOY := libsgx_urts_deploy.so @@ -128,8 +129,9 @@ LDLIBS += -lwrapper -lcrypto -Wl,-Bdynamic -Wl,-Bsymbolic -lsgx_uae_service_sim SONAME = $(LIBURTSSIM_SHARED) .PHONY: all -all: $(LIBURTSSIM_SHARED) $(LIBURTSSIM_DEBUG) $(LIBURTS_DEPLOY)| $(BUILD_DIR) +all: $(LIBURTSSIM_SHARED) $(LIBURTSSIM_STATIC) $(LIBURTSSIM_DEBUG) $(LIBURTS_DEPLOY)| $(BUILD_DIR) $(CP) $(LIBURTSSIM_SHARED) $| + $(CP) $(LIBURTSSIM_STATIC) $| $(CP) $(LIBURTS_DEPLOY) $| ifndef DEBUG $(CP) $(LIBURTSSIM_DEBUG) $| @@ -138,6 +140,15 @@ endif $(LIBURTSSIM_SHARED): simasm uinst driver_api wrapper uae_service_sim $(OBJ) $(OBJ6) ittnotify $(CXX) $(CXXFLAGS) -shared -Wl,-soname=$(SONAME) $(OBJ) $(OBJ6) $(LDFLAGS) $(LDLIBS) -o $@ +$(LIBURTSSIM_STATIC): $(LIBURTSSIM_SHARED) + @$(MKDIR) $(BUILD_DIR)/.sgx_static_urts + @$(RM) -f $(BUILD_DIR)/.sgx_static_urts/* + cd $(BUILD_DIR)/.sgx_static_urts && \ + $(AR) x $(COMMON_DIR)/se_wrapper/libwrapper.a && \ + $(AR) x $(VTUNE_DIR)/sdk/src/ittnotify/libittnotify.a && \ + $(RM) -f se_event.o + $(AR) rsD $@ $(URTS_OBJ) $(BUILD_DIR)/.sgx_static_urts/*.o + $(LIBURTSSIM_DEBUG): $(LIBURTSSIM_SHARED) ifndef DEBUG $(CP) $(LIBURTSSIM_SHARED) $(LIBURTSSIM_SHARED).orig From 9c7875d530219b8f71c92831d95b2626bf6ef3c6 Mon Sep 17 00:00:00 2001 From: He Sun Date: Sun, 28 Jun 2020 10:25:47 +0800 Subject: [PATCH 68/96] Fix urts not found by sgx debugger when statically linked by libocclum-pal.so --- sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py index d71adbc8a..dd5a4b828 100755 --- a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py +++ b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py @@ -526,7 +526,7 @@ def is_bp_in_urts(): try: ip = gdb.parse_and_eval("$pc") solib_name = gdb.solib_name(int(str(ip).split()[0], 16)) - if(solib_name.find("libsgx_urts.so") == -1 and solib_name.find("libsgx_urts_sim.so") == -1 and solib_name.find("libsgx_aesm_service.so") == -1): + if(solib_name.find("libocclum-pal.so") == -1 and solib_name.find("libsgx_urts.so") == -1 and solib_name.find("libsgx_urts_sim.so") == -1 and solib_name.find("libsgx_aesm_service.so") == -1): return False else: return True @@ -778,7 +778,7 @@ def exit_handler(event): def newobj_handler(event): solib_name = os.path.basename(event.new_objfile.filename) - if solib_name == 'libsgx_urts.so' or solib_name == 'libsgx_urts_sim.so' or solib_name == 'libsgx_aesm_service.so': + if solib_name.find("libocclum-pal.so") != -1 or solib_name == 'libsgx_urts.so' or solib_name == 'libsgx_urts_sim.so' or solib_name == 'libsgx_aesm_service.so': sgx_debugger_init() return From d03fcc3ead03be80deefa484d60d936c80992f57 Mon Sep 17 00:00:00 2001 From: "Tate, Hongliang Tian" Date: Fri, 3 Jul 2020 13:32:36 +0000 Subject: [PATCH 69/96] Add optional timeout to the OCall for waiting events --- common/inc/sgx_tstdc.edl | 10 ++++++++-- psw/urts/enclave_mutex.cpp | 5 ++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/common/inc/sgx_tstdc.edl b/common/inc/sgx_tstdc.edl index fc503f2e9..a277b0f17 100644 --- a/common/inc/sgx_tstdc.edl +++ b/common/inc/sgx_tstdc.edl @@ -30,14 +30,20 @@ */ enclave { + /* Define libc's timespec_t for EDL */ + struct sgx_edl_timespec { + int64_t sec; + int64_t nsec; + }; + untrusted { [cdecl] void sgx_oc_cpuidex([out] int cpuinfo[4], int leaf, int subleaf); - + /* Go outside and wait on my untrusted event */ [cdecl] int sgx_thread_wait_untrusted_event_ocall([user_check] const void *self); /* Go outside and wait on my untrusted event with timeout */ - [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, int64_t sec, int64_t nsec, [out] int* err); + [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, [in] const struct sgx_edl_timespec* ts, [out] int* err); /* Wake a thread waiting on its untrusted event */ [cdecl] int sgx_thread_set_untrusted_event_ocall([user_check] const void *waiter); diff --git a/psw/urts/enclave_mutex.cpp b/psw/urts/enclave_mutex.cpp index da24e5a94..2cfa6e042 100644 --- a/psw/urts/enclave_mutex.cpp +++ b/psw/urts/enclave_mutex.cpp @@ -52,7 +52,7 @@ extern "C" int sgx_thread_wait_untrusted_event_ocall(const void *self) return SGX_SUCCESS; } -extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, int64_t sec, int64_t nsec, int *err) +extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, const struct timespec *ts, int *err) { if (self == NULL) return SGX_ERROR_INVALID_PARAMETER; @@ -61,8 +61,7 @@ extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, i if (hevent == NULL) return SE_ERROR_MUTEX_GET_EVENT; - struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; - if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, &ts, err)) + if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, ts, err)) return SE_ERROR_MUTEX_WAIT_EVENT; return SGX_SUCCESS; From ad735207c0863959b2dfc6954877bfcacbf4002c Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Thu, 16 Jul 2020 06:24:49 +0000 Subject: [PATCH 70/96] Add new EDL file for occlum specific usage --- common/inc/sgx_occlum_utils.edl | 6 ++++++ common/inc/sgx_tstdc.edl | 3 --- linux/installer/common/sdk/BOMs/sdk_base.txt | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 common/inc/sgx_occlum_utils.edl diff --git a/common/inc/sgx_occlum_utils.edl b/common/inc/sgx_occlum_utils.edl new file mode 100644 index 000000000..7677e9c3f --- /dev/null +++ b/common/inc/sgx_occlum_utils.edl @@ -0,0 +1,6 @@ +enclave { + untrusted { + /* Go outside and wait on my untrusted event with timeout */ + [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, [in] const struct sgx_edl_timespec* ts, [out] int* err); + }; +}; diff --git a/common/inc/sgx_tstdc.edl b/common/inc/sgx_tstdc.edl index a277b0f17..0ec65f723 100644 --- a/common/inc/sgx_tstdc.edl +++ b/common/inc/sgx_tstdc.edl @@ -42,9 +42,6 @@ enclave { /* Go outside and wait on my untrusted event */ [cdecl] int sgx_thread_wait_untrusted_event_ocall([user_check] const void *self); - /* Go outside and wait on my untrusted event with timeout */ - [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, [in] const struct sgx_edl_timespec* ts, [out] int* err); - /* Wake a thread waiting on its untrusted event */ [cdecl] int sgx_thread_set_untrusted_event_ocall([user_check] const void *waiter); diff --git a/linux/installer/common/sdk/BOMs/sdk_base.txt b/linux/installer/common/sdk/BOMs/sdk_base.txt index c9e5f348e..2b4309cd6 100644 --- a/linux/installer/common/sdk/BOMs/sdk_base.txt +++ b/linux/installer/common/sdk/BOMs/sdk_base.txt @@ -26,6 +26,7 @@ DeliveryName InstallName FileCheckSum FileFeature FileOwner /common/inc/sgx_trts.h /package/include/./sgx_trts.h 0 main STP /common/inc/sgx_tseal.h /package/include/./sgx_tseal.h 0 main STP /common/inc/sgx_tstdc.edl /package/include/./sgx_tstdc.edl 0 main STP +/common/inc/sgx_occlum_utils.edl /package/include/./sgx_occlum_utils.edl 0 main Occlum /common/inc/sgx_uae_service.h /package/include/./sgx_uae_service.h 0 main STP /common/inc/sgx_uae_epid.h /package/include/./sgx_uae_epid.h 0 main STP /common/inc/sgx_uae_launch.h /package/include/./sgx_uae_launch.h 0 main STP From 64c04218ea489c8aace0317bbebbb876fb35c74a Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Tue, 4 Aug 2020 09:20:48 +0000 Subject: [PATCH 71/96] Improve support for urts_sim static library building 1. Fix urts_sim static library lack of symbols 2. Add new urts_sim static library needed by Occlum tools --- linux/installer/common/sdk/BOMs/sdk_x64.txt | 1 + sdk/simulation/urtssim/linux/Makefile | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/linux/installer/common/sdk/BOMs/sdk_x64.txt b/linux/installer/common/sdk/BOMs/sdk_x64.txt index fd77476cb..ca62ff7df 100644 --- a/linux/installer/common/sdk/BOMs/sdk_x64.txt +++ b/linux/installer/common/sdk/BOMs/sdk_x64.txt @@ -36,6 +36,7 @@ DeliveryName InstallName FileCheckSum FileFeature FileOwner /build/linux/libsgx_urts_sim.so /package/lib64/libsgx_urts_sim.so 0 main STP /build/linux/libsgx_urts.a /package/lib64/libsgx_urts.a 0 main STP /build/linux/libsgx_urts_sim.a /package/lib64/libsgx_urts_sim.a 0 main STP +/build/linux/libsgx_urts_sim_with_se_event.a /package/lib64/libsgx_urts_sim_with_se_event.a 0 main STP /build/linux/libc++_Changes_SGX.txt /package/lib64/libc++_Changes_SGX.txt 0 main STP /build/linux/sgx_config_cpusvn /package/bin/x64/sgx_config_cpusvn 0 main STP /build/linux/sgx_edger8r /package/bin/x64/sgx_edger8r 0 main STP diff --git a/sdk/simulation/urtssim/linux/Makefile b/sdk/simulation/urtssim/linux/Makefile index 626351f58..77ea7e1b3 100644 --- a/sdk/simulation/urtssim/linux/Makefile +++ b/sdk/simulation/urtssim/linux/Makefile @@ -122,6 +122,7 @@ LDFLAGS += $(COMMON_LDFLAGS) -Wl,--version-script=$(LINUX_PSW_DIR)/urts/linux/ur LIBURTSSIM_SHARED := libsgx_urts_sim.so LIBURTSSIM_STATIC := libsgx_urts_sim.a +LIBURTSSIM_STATIC_WITH_EVENT := libsgx_urts_sim_with_se_event.a LIBURTSSIM_DEBUG := libsgx_urts_sim.so.debug LIBURTS_DEPLOY := libsgx_urts_deploy.so @@ -129,9 +130,10 @@ LDLIBS += -lwrapper -lcrypto -Wl,-Bdynamic -Wl,-Bsymbolic -lsgx_uae_service_sim SONAME = $(LIBURTSSIM_SHARED) .PHONY: all -all: $(LIBURTSSIM_SHARED) $(LIBURTSSIM_STATIC) $(LIBURTSSIM_DEBUG) $(LIBURTS_DEPLOY)| $(BUILD_DIR) +all: $(LIBURTSSIM_SHARED) $(LIBURTSSIM_STATIC) $(LIBURTSSIM_STATIC_WITH_EVENT) $(LIBURTSSIM_DEBUG) $(LIBURTS_DEPLOY)| $(BUILD_DIR) $(CP) $(LIBURTSSIM_SHARED) $| $(CP) $(LIBURTSSIM_STATIC) $| + $(CP) $(LIBURTSSIM_STATIC_WITH_EVENT) $| $(CP) $(LIBURTS_DEPLOY) $| ifndef DEBUG $(CP) $(LIBURTSSIM_DEBUG) $| @@ -147,7 +149,15 @@ $(LIBURTSSIM_STATIC): $(LIBURTSSIM_SHARED) $(AR) x $(COMMON_DIR)/se_wrapper/libwrapper.a && \ $(AR) x $(VTUNE_DIR)/sdk/src/ittnotify/libittnotify.a && \ $(RM) -f se_event.o - $(AR) rsD $@ $(URTS_OBJ) $(BUILD_DIR)/.sgx_static_urts/*.o + $(AR) rsD $@ $(OBJ) $(OBJ6) $(BUILD_DIR)/.sgx_static_urts/*.o + +$(LIBURTSSIM_STATIC_WITH_EVENT): $(LIBURTSSIM_SHARED) + @$(MKDIR) $(BUILD_DIR)/.sgx_static_urts + @$(RM) -f $(BUILD_DIR)/.sgx_static_urts/* + cd $(BUILD_DIR)/.sgx_static_urts && \ + $(AR) x $(COMMON_DIR)/se_wrapper/libwrapper.a && \ + $(AR) x $(VTUNE_DIR)/sdk/src/ittnotify/libittnotify.a + $(AR) rsD $@ $(OBJ) $(OBJ6) $(BUILD_DIR)/.sgx_static_urts/*.o $(LIBURTSSIM_DEBUG): $(LIBURTSSIM_SHARED) ifndef DEBUG @@ -197,7 +207,7 @@ $(LIBURTS_DEPLOY):../urts_deploy.c .PHONY: clean clean:: - @$(RM) *.o $(LIBURTSSIM_SHARED) $(LIBURTS_DEPLOY) $(LIBURTSSIM_DEBUG) $(LIBURTSSIM_SHARED).orig + @$(RM) *.o $(LIBURTSSIM_SHARED) $(LIBURTSSIM_STATIC) $(LIBURTSSIM_STATIC_WITH_EVENT) $(LIBURTS_DEPLOY) $(LIBURTSSIM_DEBUG) $(LIBURTSSIM_SHARED).orig @$(RM) $(BUILD_DIR)/$(LIBURTSSIM_SHARED) $(BUILD_DIR)/$(LIBURTS_DEPLOY) $(BUILD_DIR)/$(LIBURTSSIM_DEBUG) $(MAKE) -C $(COMMON_DIR)/se_wrapper clean $(MAKE) -C $(SIM_DIR)/driver_api/ clean From c15d8f7aea416233351ca5150c0698550db6426b Mon Sep 17 00:00:00 2001 From: Zongmin Date: Wed, 23 Sep 2020 21:32:23 +0800 Subject: [PATCH 72/96] Support no-return customer exception handlers --- sdk/trts/trts_veh.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index 87783696f..df5a8ca9e 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -240,7 +240,8 @@ extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_except //instruction triggering the exception will be executed again. continue_execution(info); } - // The customer handler may never return, use alloca instead of malloc + + // The customer handler may never return, use alloca instead of malloc if ((nhead = (uintptr_t *)alloca(size)) == NULL) { sgx_spin_unlock(&g_handler_lock); From 617d0dfc9b89fb0d57efce9d736b6ef7839c8a2b Mon Sep 17 00:00:00 2001 From: LI Qing Date: Tue, 17 Nov 2020 14:21:56 +0800 Subject: [PATCH 73/96] Support to wait event with an absolute timeout --- common/inc/internal/se_event.h | 2 +- common/inc/sgx_occlum_utils.edl | 2 +- common/src/se_event_timeout_wait.c | 8 ++++++-- psw/urts/enclave_mutex.cpp | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/common/inc/internal/se_event.h b/common/inc/internal/se_event.h index 80a6149cd..4b8b0db37 100644 --- a/common/inc/internal/se_event.h +++ b/common/inc/internal/se_event.h @@ -55,7 +55,7 @@ se_handle_t SGXAPI se_event_init(void); void SGXAPI se_event_destroy(se_handle_t); int SGXAPI se_event_wait(se_handle_t); -int SGXAPI se_event_timeout_wait(se_handle_t, const struct timespec *, int *); +int SGXAPI se_event_timeout_wait(se_handle_t, int, const struct timespec *, int *); int SGXAPI se_event_wait_timeout(se_handle_t se_event, uint64_t timeout); int SGXAPI se_event_wake(se_handle_t); diff --git a/common/inc/sgx_occlum_utils.edl b/common/inc/sgx_occlum_utils.edl index 7677e9c3f..d396b9465 100644 --- a/common/inc/sgx_occlum_utils.edl +++ b/common/inc/sgx_occlum_utils.edl @@ -1,6 +1,6 @@ enclave { untrusted { /* Go outside and wait on my untrusted event with timeout */ - [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, [in] const struct sgx_edl_timespec* ts, [out] int* err); + [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, int clockbit, [in] const struct sgx_edl_timespec* ts, [out] int* err); }; }; diff --git a/common/src/se_event_timeout_wait.c b/common/src/se_event_timeout_wait.c index ff92f0613..3bc80a692 100644 --- a/common/src/se_event_timeout_wait.c +++ b/common/src/se_event_timeout_wait.c @@ -35,7 +35,7 @@ #include #include -int se_event_timeout_wait(se_handle_t se_event, const struct timespec *ts, int *err) +int se_event_timeout_wait(se_handle_t se_event, int clockbit, const struct timespec *ts, int *err) { int ret = 0; @@ -43,7 +43,11 @@ int se_event_timeout_wait(se_handle_t se_event, const struct timespec *ts, int * return SE_MUTEX_INVALID; if (__sync_fetch_and_add((int*)se_event, -1) == 0) { - ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); + if (clockbit & FUTEX_CLOCK_REALTIME) { + ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, -1, ts, NULL, FUTEX_BITSET_MATCH_ANY); + } else { + ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); + } __sync_val_compare_and_swap((int*)se_event, -1, 0); } *err = ret < 0 ? errno : 0; diff --git a/psw/urts/enclave_mutex.cpp b/psw/urts/enclave_mutex.cpp index 2cfa6e042..3b81ffce5 100644 --- a/psw/urts/enclave_mutex.cpp +++ b/psw/urts/enclave_mutex.cpp @@ -52,7 +52,7 @@ extern "C" int sgx_thread_wait_untrusted_event_ocall(const void *self) return SGX_SUCCESS; } -extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, const struct timespec *ts, int *err) +extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, int clockbit, const struct timespec *ts, int *err) { if (self == NULL) return SGX_ERROR_INVALID_PARAMETER; @@ -61,7 +61,7 @@ extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, c if (hevent == NULL) return SE_ERROR_MUTEX_GET_EVENT; - if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, ts, err)) + if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, clockbit, ts, err)) return SE_ERROR_MUTEX_WAIT_EVENT; return SGX_SUCCESS; From af62b2ff426afe991e06adfb48317b3bb319317e Mon Sep 17 00:00:00 2001 From: He Sun Date: Thu, 10 Dec 2020 12:46:20 +0000 Subject: [PATCH 74/96] Compile and install DCAP package --- compile_and_install.sh | 61 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/compile_and_install.sh b/compile_and_install.sh index 2ac5f5e9e..05efbad2a 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -8,6 +8,8 @@ sudo /opt/intel/sgxsdk/uninstall.sh sudo /opt/intel/sgxpsw/uninstall.sh make clean +make preparation + # Compile SDK and install if [ "no_mitigation" = "$1" ]; then make sdk_no_mitigation @@ -27,3 +29,62 @@ make psw make psw_install_pkg cd /opt/intel sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin --no-start-aesm + +# Compile and install DCAP package +# The DCAP package is not the latest. It contains an out-of-data TCB. +cd ${SCRIPT_PATH} + +DEB_DISTRO_URL=https://download.01.org/intel-sgx/sgx-dcap/1.8/linux/distro/ubuntu18.04-server/debian_pkgs + +if [ -f "/etc/debian_version" ]; then + make deb_psw_pkg + + cd linux/installer/deb + + # Get Intel-signed application enclaves from the official website. + mkdir libsgx-ae + pushd libsgx-ae + wget ${DEB_DISTRO_URL}/libs/libsgx-ae-qe3/libsgx-ae-qe3_1.8.100.2-bionic1_amd64.deb \ + ${DEB_DISTRO_URL}/libs/libsgx-ae-qve/libsgx-ae-qve_1.8.100.2-bionic1_amd64.deb \ + ${DEB_DISTRO_URL}/utils/libsgx-ae-pce/libsgx-ae-pce_2.11.100.2-bionic1_amd64.deb + sudo dpkg -i libsgx-ae-pce_*.deb libsgx-ae-qe3_*.deb libsgx-ae-qve_*.deb + popd + + sudo dpkg -i libsgx-enclave-common/libsgx-enclave-common_*.deb \ + libsgx-quote-ex/libsgx-quote-ex_*.deb \ + libsgx-urts/libsgx-urts_*.deb + + cd sgx-aesm-service/ + sudo dpkg -i libsgx-dcap-ql_*.deb libsgx-qe3-logic_*.deb libsgx-pce-logic_*.deb \ + libsgx-dcap-quote-verify_*.deb libsgx-dcap-ql_*.deb libsgx-dcap-ql-dev_*.deb \ + libsgx-dcap-default-qpl_*.deb libsgx-dcap-default-qpl-dev_*.deb \ + libsgx-dcap-quote-verify-dev_*.deb + +elif command -v rpm >/dev/null 2>&1; then + make rpm_psw_pkg + + cd linux/installer/rpm + + # Get Intel-signed application enclaves from the official website. + # Libaries have dependencies on AE. Install AE first. + mkdir libsgx-ae + pushd libsgx-ae + wget https://download.01.org/intel-sgx/sgx-dcap/1.8/linux/distro/centos8.1-server/sgx_rpm_local_repo.tgz + tar -xvf sgx_rpm_local_repo.tgz + cd sgx_rpm_local_repo + sudo rpm -ivh libsgx-ae-pce*.rpm libsgx-ae-qe3*.rpm libsgx-ae-qve*.rpm + popd + + sudo rpm -ivh libsgx-enclave-common/libsgx-enclave-common*.rpm \ + libsgx-quote-ex/libsgx-quote-ex*.rpm \ + libsgx-urts/libsgx-urts*.rpm + + cd sgx-aesm-service/ + sudo rpm -ivh libsgx-dcap-ql*.rpm libsgx-qe3-logic*.rpm libsgx-pce-logic*.rpm \ + libsgx-dcap-quote-verify*.rpm libsgx-dcap-ql*.rpm libsgx-dcap-ql-dev*.rpm \ + libsgx-dcap-default-qpl*.rpm libsgx-dcap-default-qpl-dev*.rpm \ + libsgx-dcap-quote-verify-dev*.rpm + +else + echo "unsupported package system" +fi From e08e42146c9e725d8ac9cccc6e80e703e8770d5a Mon Sep 17 00:00:00 2001 From: LI Qing Date: Wed, 13 Jan 2021 14:56:11 +0800 Subject: [PATCH 75/96] Support SGX-GDB to load library's symbol for Glibc --- .../linux/gdb-sgx-plugin/gdb_sgx_plugin.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py index dd5a4b828..aeca1f950 100755 --- a/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py +++ b/sdk/debugger_interface/linux/gdb-sgx-plugin/gdb_sgx_plugin.py @@ -689,7 +689,7 @@ def stop(self): gdb.execute(gdb_cmd, False, True) return False -class GetMuslLoadLibraryReturnBreakpoint(gdb.FinishBreakpoint): +class GetLdLoadLibraryReturnBreakpoint(gdb.FinishBreakpoint): def __init__(self): gdb.FinishBreakpoint.__init__ (self, gdb.newest_frame(), internal=1) self.silent = True @@ -713,12 +713,15 @@ def stop(self): gdb.execute(gdb_cmd, False, True) return False -class GetMuslLoadLibraryBreakpoint(gdb.Breakpoint): +class GetLdLoadLibraryBreakpoint(gdb.Breakpoint): def __init__(self): + # for Musl gdb.Breakpoint.__init__ (self, spec="load_library", internal=1) + # for Glibc + gdb.Breakpoint.__init__ (self, spec="_dl_map_object", internal=1) def stop(self): - GetMuslLoadLibraryReturnBreakpoint() + GetLdLoadLibraryReturnBreakpoint() return False class GetOcclumElfBreakpoint(gdb.Breakpoint): @@ -741,10 +744,9 @@ def stop(self): return 0 print (gdb_cmd) gdb.execute(gdb_cmd, False, True) - GetMuslLoadLibraryBreakpoint() + GetLdLoadLibraryBreakpoint() return False - def sgx_debugger_init(): print ("detect urts is loaded, initializing") global SIZE From 73fc25cc975e44b73a6f1876977eca886cf3e022 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Mon, 25 Jan 2021 10:15:41 +0800 Subject: [PATCH 76/96] Fix aesm build failure in docker buildx enviroment --- linux/installer/common/sgx-aesm-service/Makefile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/linux/installer/common/sgx-aesm-service/Makefile b/linux/installer/common/sgx-aesm-service/Makefile index 71face180..c86ca03fb 100644 --- a/linux/installer/common/sgx-aesm-service/Makefile +++ b/linux/installer/common/sgx-aesm-service/Makefile @@ -42,14 +42,10 @@ AESMD_CONF_DEL=$(if $(wildcard /run/systemd/system/.*),aesmd.conf,$(if $(wildcar AESMD_CONF_PATH=$(if $(wildcard /run/systemd/system/.*),$(if $(wildcard /lib/systemd/system/.*),/lib/systemd/system,/usr/lib/systemd/system),$(if $(wildcard /etc/init/.*),/etc/init/)) ifeq ($(AESMD_CONF_NAME),) -ifneq ($(shell awk -F/ '$$2 == "docker"' /proc/self/cgroup),) AESMD_CONF_NAME=aesmd.service AESMD_CONF_DEL=aesmd.conf AESMD_CONF_PATH=/lib/systemd/system $(warning "You may need to start aesmd manually after it's installed!") -else -$(error "Unsupported platform - neither systemctl nor initctl is found!") -endif endif QE_VER=1.0.0 From f92b1c18dd4704c4fbdb2d31e66d21fbb48b974e Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Tue, 26 Jan 2021 10:51:55 +0800 Subject: [PATCH 77/96] Enable exception support under simulation mode --- sdk/simulation/uinst/u_instructions.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index 3532a5dbe..291753f96 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -103,7 +103,6 @@ void call_old_handler(int signum, void* siginfo, void *priv) sigset_t cur_set; pthread_sigmask(SIG_SETMASK, &g_old_sigact[signum].sa_mask, &cur_set); - if(g_old_sigact[signum].sa_flags & SA_SIGINFO) { @@ -276,6 +275,8 @@ uintptr_t _EINIT(secs_t* secs, enclave_css_t *css, token_t *launch) return SGX_ERROR_INVALID_ATTRIBUTE; } + reg_sig_handler_sim(); + mcp_same_size(&this_secs->mr_enclave, &css->body.enclave_hash, sizeof(sgx_measurement_t)); this_secs->isv_prod_id = css->body.isv_prod_id; this_secs->isv_svn = css->body.isv_svn; From e0546fc06cf92efb3cd0e0c93c1cd9114be64753 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Thu, 4 Feb 2021 11:58:23 +0800 Subject: [PATCH 78/96] Fix some simulation mode AEX bugs and add SIG64 support --- sdk/simulation/uinst/u_instructions.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index 291753f96..e0834c40c 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -119,11 +119,11 @@ void call_old_handler(int signum, void* siginfo, void *priv) g_old_sigact[signum].sa_handler = SIG_DFL; } } - +#define SIGRT_INTERRUPT (64) void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) __attribute__((optimize(0))) __attribute__((optimize("no-stack-protector"))); void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) { - GP_ON(signum != SIGFPE && signum != SIGSEGV); + GP_ON(signum != SIGFPE && signum != SIGSEGV && signum != SIGRT_INTERRUPT); thread_data_t *thread_data = (thread_data_t*)get_td_addr(); if (thread_data != NULL && (uintptr_t)thread_data == (uintptr_t)thread_data->self_addr) @@ -154,7 +154,7 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) size_t xip = context->uc_mcontext.gregs[REG_RIP]; secs_t *secs = ce->get_secs(); if (secs && (xip >= (size_t)secs->base) && (xip < (size_t)secs->base + secs->size)) - { + { GP_ON(tcs->cssa >= tcs->nssa); p_ssa_gpr = (ssa_gpr_t*)((size_t)p_ssa_gpr + tcs->cssa * secs->ssa_frame_size * SE_PAGE_SIZE); p_ssa_gpr->REG(ax) = context->uc_mcontext.gregs[REG_RAX]; @@ -202,13 +202,13 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) p_ssa_gpr->exit_info.vector = 0; //#DE } else - { + { p_ssa_gpr->exit_info.valid = 0; } tcs->cssa +=1; } - } - } + } + } } } call_old_handler(signum, siginfo, priv); @@ -239,10 +239,14 @@ void reg_sig_handler_sim() sigdelset(&sig_act.sa_mask, SIGSEGV); sigdelset(&sig_act.sa_mask, SIGFPE); } + sigdelset(&sig_act.sa_mask, SIGRT_INTERRUPT); + ret = sigaction(SIGSEGV, &sig_act, &g_old_sigact[SIGSEGV]); if (0 != ret) abort(); ret = sigaction(SIGFPE, &sig_act, &g_old_sigact[SIGFPE]); if (0 != ret) abort(); + ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); + if (0 != ret) abort(); } uintptr_t _EINIT(secs_t* secs, enclave_css_t *css, token_t *launch) @@ -524,8 +528,7 @@ void _SE3(uintptr_t xax, uintptr_t xbx, __atomic_exchange(&tcs_sim->tcs_state, &tcs_target_state, &tcs_current_state, __ATOMIC_RELAXED); GP_ON_EENTER(tcs_current_state != TCS_STATE_INACTIVE); - - tcs->cssa -=1; + tcs->cssa -=1; secs = ce->get_secs(); enclave_base_addr = secs->base; From 861a7b66e7bf7a345a4c0fb625822b18e2d2d9e5 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Mon, 8 Feb 2021 17:15:48 +0800 Subject: [PATCH 79/96] Remove PSW and DCAP build process --- compile_and_install.sh | 72 +----------------------------------------- 1 file changed, 1 insertion(+), 71 deletions(-) diff --git a/compile_and_install.sh b/compile_and_install.sh index 05efbad2a..f17e9d043 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -3,12 +3,8 @@ pushd `dirname $0` > /dev/null SCRIPT_PATH=`pwd` popd > /dev/null -# Uninstall and clean up +# Uninstall old sdk sudo /opt/intel/sgxsdk/uninstall.sh -sudo /opt/intel/sgxpsw/uninstall.sh -make clean - -make preparation # Compile SDK and install if [ "no_mitigation" = "$1" ]; then @@ -22,69 +18,3 @@ sudo mkdir -p /opt/intel cd /opt/intel yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin -# Compile PSW and install -# Note that the compilation of PSW requires the installation of SDK. -cd ${SCRIPT_PATH} -make psw -make psw_install_pkg -cd /opt/intel -sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_psw_*.bin --no-start-aesm - -# Compile and install DCAP package -# The DCAP package is not the latest. It contains an out-of-data TCB. -cd ${SCRIPT_PATH} - -DEB_DISTRO_URL=https://download.01.org/intel-sgx/sgx-dcap/1.8/linux/distro/ubuntu18.04-server/debian_pkgs - -if [ -f "/etc/debian_version" ]; then - make deb_psw_pkg - - cd linux/installer/deb - - # Get Intel-signed application enclaves from the official website. - mkdir libsgx-ae - pushd libsgx-ae - wget ${DEB_DISTRO_URL}/libs/libsgx-ae-qe3/libsgx-ae-qe3_1.8.100.2-bionic1_amd64.deb \ - ${DEB_DISTRO_URL}/libs/libsgx-ae-qve/libsgx-ae-qve_1.8.100.2-bionic1_amd64.deb \ - ${DEB_DISTRO_URL}/utils/libsgx-ae-pce/libsgx-ae-pce_2.11.100.2-bionic1_amd64.deb - sudo dpkg -i libsgx-ae-pce_*.deb libsgx-ae-qe3_*.deb libsgx-ae-qve_*.deb - popd - - sudo dpkg -i libsgx-enclave-common/libsgx-enclave-common_*.deb \ - libsgx-quote-ex/libsgx-quote-ex_*.deb \ - libsgx-urts/libsgx-urts_*.deb - - cd sgx-aesm-service/ - sudo dpkg -i libsgx-dcap-ql_*.deb libsgx-qe3-logic_*.deb libsgx-pce-logic_*.deb \ - libsgx-dcap-quote-verify_*.deb libsgx-dcap-ql_*.deb libsgx-dcap-ql-dev_*.deb \ - libsgx-dcap-default-qpl_*.deb libsgx-dcap-default-qpl-dev_*.deb \ - libsgx-dcap-quote-verify-dev_*.deb - -elif command -v rpm >/dev/null 2>&1; then - make rpm_psw_pkg - - cd linux/installer/rpm - - # Get Intel-signed application enclaves from the official website. - # Libaries have dependencies on AE. Install AE first. - mkdir libsgx-ae - pushd libsgx-ae - wget https://download.01.org/intel-sgx/sgx-dcap/1.8/linux/distro/centos8.1-server/sgx_rpm_local_repo.tgz - tar -xvf sgx_rpm_local_repo.tgz - cd sgx_rpm_local_repo - sudo rpm -ivh libsgx-ae-pce*.rpm libsgx-ae-qe3*.rpm libsgx-ae-qve*.rpm - popd - - sudo rpm -ivh libsgx-enclave-common/libsgx-enclave-common*.rpm \ - libsgx-quote-ex/libsgx-quote-ex*.rpm \ - libsgx-urts/libsgx-urts*.rpm - - cd sgx-aesm-service/ - sudo rpm -ivh libsgx-dcap-ql*.rpm libsgx-qe3-logic*.rpm libsgx-pce-logic*.rpm \ - libsgx-dcap-quote-verify*.rpm libsgx-dcap-ql*.rpm libsgx-dcap-ql-dev*.rpm \ - libsgx-dcap-default-qpl*.rpm libsgx-dcap-default-qpl-dev*.rpm \ - libsgx-dcap-quote-verify-dev*.rpm - -else - echo "unsupported package system" -fi From 05b64e2676a7fb3ff11a0defd298af208ae86da4 Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Wed, 11 Aug 2021 06:43:02 +0000 Subject: [PATCH 80/96] Fix create enclave failed due to ENOMEM in simulation mode In simulation mode, previously, ECREATE will commit all the pages which will cause ENOMEM if the size is very big. This patch fixed this by only reserving the pages (with PROT_NONE) in ECREATE but committing the pages in EADD. --- sdk/simulation/uinst/u_instructions.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index e0834c40c..376e5405a 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -343,16 +343,13 @@ uintptr_t _ECREATE(page_info_t* pi) // `ce' is not checked against NULL, since it is not // allocated with new(std::no_throw). - addr = mmap(secs->base, (size_t)secs->size, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); + addr = mmap(secs->base, (size_t)secs->size, SGX_PROT_NONE, mmap_flag, -1, 0); if(MAP_FAILED == addr) { delete ce; return 0; } - // Mark all the memory inaccessible. - se_virtual_protect(addr, (size_t)secs->size, SGX_PROT_NONE); - //set image_offset if(image_offset != 0) { From 7dafa4a321f038369745b79e159edd7291d4893e Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Fri, 13 Aug 2021 11:39:25 +0800 Subject: [PATCH 81/96] Solve the simulation mode register signal handler twice issue. This solution just works for us. Need an official solution form Intel. --- sdk/simulation/uinst/u_instructions.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index 376e5405a..245931744 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -215,8 +215,15 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) } #define SIG_STACK_SIZE (4096*10) +#include +std::atomic sig_handler_registed (false); void reg_sig_handler_sim() { + if (sig_handler_registed) + return; + + SE_TRACE(SE_TRACE_DEBUG, "signal hander for simulation registed\n"); + int ret = 0; struct sigaction sig_act; stack_t ss; @@ -247,6 +254,8 @@ void reg_sig_handler_sim() if (0 != ret) abort(); ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); if (0 != ret) abort(); + + sig_handler_registed = true; } uintptr_t _EINIT(secs_t* secs, enclave_css_t *css, token_t *launch) From 069f920f0344e2ed678204d865663667a1b9256e Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Fri, 3 Dec 2021 11:20:35 +0800 Subject: [PATCH 82/96] Update the simulation mode to align the Occlum implementation --- sdk/simulation/tinst/t_instructions.cpp | 29 ++++++++++++++-- sdk/simulation/uinst/td_mngr.h | 2 ++ sdk/simulation/uinst/u_instructions.cpp | 45 ++++++++++++++++++++----- sdk/trts/linux/trts_pic.h | 22 ------------ 4 files changed, 65 insertions(+), 33 deletions(-) diff --git a/sdk/simulation/tinst/t_instructions.cpp b/sdk/simulation/tinst/t_instructions.cpp index 85846a128..37bd54360 100644 --- a/sdk/simulation/tinst/t_instructions.cpp +++ b/sdk/simulation/tinst/t_instructions.cpp @@ -281,6 +281,28 @@ static void _EREPORT(const sgx_target_info_t* ti, const sgx_report_data_t* rd, s } //////////////////////////////////////////////////////////////////////// +#define ARCH_SET_GS 0x1001 +#define ARCH_SET_FS 0x1002 + + +static void arch_prctl(int code, unsigned long addr) __attribute__((section(".nipx"))); +static void arch_prctl(int code, unsigned long addr) +{ + int ret; + + __asm__("mov %1, %%edi\n\t" + "movq %2, %%rsi\n\t" + "mov $0x9e,%%eax\n\t" + "syscall\n\t" + "mov %%eax, %0\n\t" + :"=a"(ret) + :"r"(code), "r"(addr) + :"r11", "rcx"); + if(ret != 0) { + // This should never happen. + abort(); + } +} static void _EEXIT(uintptr_t dest, uintptr_t xcx, uintptr_t xdx, uintptr_t xsi, uintptr_t xdi) __attribute__((section(".nipx"))); @@ -301,9 +323,6 @@ _EEXIT(uintptr_t dest, uintptr_t xcx, uintptr_t xdx, uintptr_t xsi, uintptr_t xd tcs_t *tcs = GET_TCS_PTR(xdx); GP_ON(tcs == NULL); - // restore the used _tls_array - GP_ON(td_mngr_restore_td(tcs) == false); - // check thread is in use or not tcs_sim_t *tcs_sim = reinterpret_cast(tcs->reserved); @@ -318,6 +337,10 @@ _EEXIT(uintptr_t dest, uintptr_t xcx, uintptr_t xdx, uintptr_t xsi, uintptr_t xd regs.xsi = xsi; regs.xdi = xdi; + //restore the FS, GS base address + arch_prctl(ARCH_SET_FS, tcs_sim->saved_fs_base); + arch_prctl(ARCH_SET_GS, tcs_sim->saved_gs_base); + load_regs(®s); // jump back to the instruction after the call to _SE3 diff --git a/sdk/simulation/uinst/td_mngr.h b/sdk/simulation/uinst/td_mngr.h index 47e082edd..7001ad397 100644 --- a/sdk/simulation/uinst/td_mngr.h +++ b/sdk/simulation/uinst/td_mngr.h @@ -43,6 +43,8 @@ typedef struct _tcs_sim_t size_t tcs_state; uintptr_t saved_dtv; uintptr_t saved_fs_gs_0; + uintptr_t saved_fs_base; + uintptr_t saved_gs_base; uint64_t tcs_offset_update_flag; } tcs_sim_t; diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index 245931744..ebfd2eae0 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include "arch.h" @@ -59,7 +61,7 @@ static uintptr_t _EINIT(secs_t* secs, enclave_css_t* css, token_t* launch); static uintptr_t _ECREATE (page_info_t* pi); static uintptr_t _EADD (page_info_t* pi, void* epc_lin_addr); static uintptr_t _EREMOVE(const void* epc_lin_addr); -extern "C" void* get_td_addr(void); +extern "C" int arch_prctl(int code, unsigned long addr); extern "C" bool get_elrange_start_address(void* base_address, uint64_t &elrange_start_address); @@ -123,9 +125,12 @@ void call_old_handler(int signum, void* siginfo, void *priv) void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) __attribute__((optimize(0))) __attribute__((optimize("no-stack-protector"))); void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) { - GP_ON(signum != SIGFPE && signum != SIGSEGV && signum != SIGRT_INTERRUPT); + // FIXME:workaround the simulation issue + // GP_ON(signum != SIGFPE && signum != SIGSEGV && signum != SIGRT_INTERRUPT); + GP_ON(signum != SIGFPE && signum != SIGSEGV); - thread_data_t *thread_data = (thread_data_t*)get_td_addr(); + thread_data_t *thread_data = 0; + arch_prctl(ARCH_GET_GS, (unsigned long)&thread_data); if (thread_data != NULL && (uintptr_t)thread_data == (uintptr_t)thread_data->self_addr) { // first SSA can be used to get tcs, even cssa > 0. @@ -144,6 +149,15 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) size_t tcs_target_state = TCS_STATE_INACTIVE; __atomic_store(&tcs_sim->tcs_state, &tcs_target_state, __ATOMIC_RELAXED); + // save FS, GS base address + uint64_t tmp_fs_base = 0, tmp_gs_base = 0; + arch_prctl(ARCH_GET_FS, (unsigned long)&tmp_fs_base); + arch_prctl(ARCH_GET_GS, (unsigned long)&tmp_gs_base); + + // restore FS, GS base address + arch_prctl(ARCH_SET_FS, tcs_sim->saved_fs_base); + arch_prctl(ARCH_SET_GS, tcs_sim->saved_gs_base); + CEnclaveMngr *mngr = CEnclaveMngr::get_instance(); assert(mngr != NULL); @@ -175,6 +189,8 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) p_ssa_gpr->r14 = context->uc_mcontext.gregs[REG_R14]; p_ssa_gpr->r15 = context->uc_mcontext.gregs[REG_R15]; p_ssa_gpr->rflags = context->uc_flags; + p_ssa_gpr->fs = tmp_fs_base; + p_ssa_gpr->gs = tmp_gs_base; context->uc_mcontext.gregs[REG_RAX] = SE_ERESUME; context->uc_mcontext.gregs[REG_RBX] = (size_t)tcs; @@ -246,14 +262,18 @@ void reg_sig_handler_sim() sigdelset(&sig_act.sa_mask, SIGSEGV); sigdelset(&sig_act.sa_mask, SIGFPE); } - sigdelset(&sig_act.sa_mask, SIGRT_INTERRUPT); + + // FIXME:workaround the simulation issue + // sigdelset(&sig_act.sa_mask, SIGRT_INTERRUPT); ret = sigaction(SIGSEGV, &sig_act, &g_old_sigact[SIGSEGV]); if (0 != ret) abort(); ret = sigaction(SIGFPE, &sig_act, &g_old_sigact[SIGFPE]); if (0 != ret) abort(); - ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); - if (0 != ret) abort(); + + // FIXME:workaround the simulation issue + // ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); + // if (0 != ret) abort(); sig_handler_registed = true; } @@ -482,8 +502,9 @@ void _SE3(uintptr_t xax, uintptr_t xbx, xip = reinterpret_cast(enclave_base_addr); GP_ON_EENTER(xip == 0); - //set the _tls_array to point to the self_addr of TLS section inside the enclave - GP_ON_EENTER(td_mngr_set_td(enclave_base_addr, tcs) == false); + // save FS, GS base address + arch_prctl(ARCH_GET_FS, (unsigned long)&tcs_sim->saved_fs_base); + arch_prctl(ARCH_GET_GS, (unsigned long)&tcs_sim->saved_gs_base); // Destination depends on STATE xip += (uintptr_t)tcs->oentry; @@ -507,6 +528,10 @@ void _SE3(uintptr_t xax, uintptr_t xbx, regs.xsp = p_ssa_gpr->REG(sp_u); regs.xip = xip; + // adjust the FS, GS base address + arch_prctl(ARCH_SET_FS, (unsigned long)enclave_base_addr + tcs->ofs_base); + arch_prctl(ARCH_SET_GS, (unsigned long)enclave_base_addr + tcs->ogs_base); + load_regs(®s); // Returning from this function enters the enclave @@ -555,6 +580,10 @@ void _SE3(uintptr_t xax, uintptr_t xbx, regs.xbp = p_ssa_gpr->REG(bp); regs.xip = p_ssa_gpr->REG(ip); + // adjust the FS, GS base address + arch_prctl(ARCH_SET_FS, p_ssa_gpr->fs); + arch_prctl(ARCH_SET_GS, p_ssa_gpr->gs); + load_regs(®s); return; diff --git a/sdk/trts/linux/trts_pic.h b/sdk/trts/linux/trts_pic.h index 1f769bedc..550f4eeb0 100644 --- a/sdk/trts/linux/trts_pic.h +++ b/sdk/trts/linux/trts_pic.h @@ -79,34 +79,12 @@ /* OCALL command */ #define OCALL_FLAG 0x04F434944 -#define dtv SE_WORDSIZE -#define tls 0 .macro READ_TD_DATA offset -#ifdef SE_SIM -/* TLS support in simulation mode - * see "sdk/simulation/uinst/linux/set_tls.c" - * and "sdk/simulation/assembly/linux/gnu_tls.h" - * TD address (tcs->ofs_base) is set to tcb_head->dtv->value. - * The offset of tcb_head->dtv->value is SE_WORDSIZE. - */ - -#if defined(LINUX32) - mov %gs:dtv, %xax -#elif defined(LINUX64) - mov %fs:dtv, %xax -#endif - mov tls(%xax), %xax - mov \offset(%xax), %xax - -#else /* SE_SIM */ - #if defined(LINUX32) mov %fs:\offset, %xax #elif defined(LINUX64) mov %gs:\offset, %xax #endif - -#endif /* !SE_SIM */ .endm .macro GET_STACK_BASE tcs From 7ab0aa5c96bea15948f35d95b96eedf9cac9eb4d Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Tue, 7 Dec 2021 18:53:13 +0800 Subject: [PATCH 83/96] Workaround the Occlum interrupt mode --- sdk/simulation/tinst/t_instructions.cpp | 6 ++-- sdk/simulation/uinst/u_instructions.cpp | 37 ++++++++++++++----------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/sdk/simulation/tinst/t_instructions.cpp b/sdk/simulation/tinst/t_instructions.cpp index 37bd54360..ede1e389b 100644 --- a/sdk/simulation/tinst/t_instructions.cpp +++ b/sdk/simulation/tinst/t_instructions.cpp @@ -323,13 +323,11 @@ _EEXIT(uintptr_t dest, uintptr_t xcx, uintptr_t xdx, uintptr_t xsi, uintptr_t xd tcs_t *tcs = GET_TCS_PTR(xdx); GP_ON(tcs == NULL); - // check thread is in use or not tcs_sim_t *tcs_sim = reinterpret_cast(tcs->reserved); + // Update the tcs status size_t tcs_target_state = TCS_STATE_INACTIVE; - size_t tcs_current_state = TCS_STATE_INACTIVE; - __atomic_exchange(&tcs_sim->tcs_state, &tcs_target_state, &tcs_current_state, __ATOMIC_RELAXED); - GP_ON(tcs_current_state!= TCS_STATE_ACTIVE); + __atomic_store(&tcs_sim->tcs_state, &tcs_target_state, __ATOMIC_RELAXED); regs.xax = 0; regs.xbx = dest; diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index ebfd2eae0..0f56eae34 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -125,9 +125,7 @@ void call_old_handler(int signum, void* siginfo, void *priv) void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) __attribute__((optimize(0))) __attribute__((optimize("no-stack-protector"))); void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) { - // FIXME:workaround the simulation issue - // GP_ON(signum != SIGFPE && signum != SIGSEGV && signum != SIGRT_INTERRUPT); - GP_ON(signum != SIGFPE && signum != SIGSEGV); + GP_ON(signum != SIGFPE && signum != SIGSEGV && signum != SIGRT_INTERRUPT); thread_data_t *thread_data = 0; arch_prctl(ARCH_GET_GS, (unsigned long)&thread_data); @@ -141,15 +139,14 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) { tcs_sim_t *tcs_sim = reinterpret_cast(tcs->reserved); - size_t tcs_current_state = TCS_STATE_ACTIVE; - __atomic_load(&tcs_sim->tcs_state, &tcs_current_state, __ATOMIC_RELAXED); + size_t tcs_target_state = TCS_STATE_INACTIVE; + size_t tcs_current_state = TCS_STATE_INACTIVE; + __atomic_exchange(&tcs_sim->tcs_state, &tcs_target_state, &tcs_current_state, __ATOMIC_RELAXED); if (tcs_current_state == TCS_STATE_ACTIVE) { - size_t tcs_target_state = TCS_STATE_INACTIVE; - __atomic_store(&tcs_sim->tcs_state, &tcs_target_state, __ATOMIC_RELAXED); - // save FS, GS base address + bool user_interrupt = false; uint64_t tmp_fs_base = 0, tmp_gs_base = 0; arch_prctl(ARCH_GET_FS, (unsigned long)&tmp_fs_base); arch_prctl(ARCH_GET_GS, (unsigned long)&tmp_gs_base); @@ -167,8 +164,11 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) ucontext_t* context = reinterpret_cast(priv); size_t xip = context->uc_mcontext.gregs[REG_RIP]; secs_t *secs = ce->get_secs(); - if (secs && (xip >= (size_t)secs->base) && (xip < (size_t)secs->base + secs->size)) - { + + //Workaround for Occlum. Occlum only handle user application exception + if (secs && (xip >= (size_t)tcs) && (xip < (size_t)secs->base + secs->size)) + { + user_interrupt = true; GP_ON(tcs->cssa >= tcs->nssa); p_ssa_gpr = (ssa_gpr_t*)((size_t)p_ssa_gpr + tcs->cssa * secs->ssa_frame_size * SE_PAGE_SIZE); p_ssa_gpr->REG(ax) = context->uc_mcontext.gregs[REG_RAX]; @@ -224,6 +224,12 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) tcs->cssa +=1; } } + + if (user_interrupt == false) { + // restore FS, GS base address + arch_prctl(ARCH_SET_FS, tmp_fs_base); + arch_prctl(ARCH_SET_GS, tmp_gs_base); + } } } } @@ -262,18 +268,17 @@ void reg_sig_handler_sim() sigdelset(&sig_act.sa_mask, SIGSEGV); sigdelset(&sig_act.sa_mask, SIGFPE); } - - // FIXME:workaround the simulation issue - // sigdelset(&sig_act.sa_mask, SIGRT_INTERRUPT); + + sigdelset(&sig_act.sa_mask, SIGRT_INTERRUPT); ret = sigaction(SIGSEGV, &sig_act, &g_old_sigact[SIGSEGV]); if (0 != ret) abort(); ret = sigaction(SIGFPE, &sig_act, &g_old_sigact[SIGFPE]); if (0 != ret) abort(); - // FIXME:workaround the simulation issue - // ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); - // if (0 != ret) abort(); + ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); + if (0 != ret) + abort(); sig_handler_registed = true; } From ffaa5fadab8de3c357b67a31a6ffa53c078587d9 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Tue, 28 Dec 2021 18:46:56 +0800 Subject: [PATCH 84/96] Update the installation script --- compile_and_install.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/compile_and_install.sh b/compile_and_install.sh index f17e9d043..b4337128a 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -7,13 +7,8 @@ popd > /dev/null sudo /opt/intel/sgxsdk/uninstall.sh # Compile SDK and install -if [ "no_mitigation" = "$1" ]; then - make sdk_no_mitigation - make sdk_install_pkg_no_mitigation -else - make sdk - make sdk_install_pkg -fi +make USE_OPT_LIBS=3 sdk_no_mitigation +make sdk_install_pkg_no_mitigation sudo mkdir -p /opt/intel cd /opt/intel yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin From 38fdd14d5e9059bb899bbc48afd8c315132ee894 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Thu, 30 Dec 2021 11:09:24 +0800 Subject: [PATCH 85/96] Add one more check in simualtion mode signal hander to confirm the signal comes from user application --- sdk/simulation/uinst/u_instructions.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index 0f56eae34..dcf732bf6 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -151,6 +151,12 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) arch_prctl(ARCH_GET_FS, (unsigned long)&tmp_fs_base); arch_prctl(ARCH_GET_GS, (unsigned long)&tmp_gs_base); + // Workaround for Occlum. Occlum only handle user application exception + // the fs_base is used by application which is not same as the gs_base used by Occlum + if (tmp_fs_base == tmp_gs_base) { + return; + } + // restore FS, GS base address arch_prctl(ARCH_SET_FS, tcs_sim->saved_fs_base); arch_prctl(ARCH_SET_GS, tcs_sim->saved_gs_base); From 968d0f5dde6610c428e64b0c1403d565ae1b87a7 Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Wed, 23 Feb 2022 07:40:26 +0000 Subject: [PATCH 86/96] Fix se_event_timeout_wait to handle relative and absolute timeout --- common/inc/internal/se_event.h | 2 +- common/inc/sgx_occlum_utils.edl | 2 +- common/src/se_event_timeout_wait.c | 12 +++++++++--- psw/urts/enclave_mutex.cpp | 4 ++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/common/inc/internal/se_event.h b/common/inc/internal/se_event.h index 4b8b0db37..b4e6bd81a 100644 --- a/common/inc/internal/se_event.h +++ b/common/inc/internal/se_event.h @@ -55,7 +55,7 @@ se_handle_t SGXAPI se_event_init(void); void SGXAPI se_event_destroy(se_handle_t); int SGXAPI se_event_wait(se_handle_t); -int SGXAPI se_event_timeout_wait(se_handle_t, int, const struct timespec *, int *); +int SGXAPI se_event_timeout_wait(se_handle_t, int, const struct timespec *, int, int *); int SGXAPI se_event_wait_timeout(se_handle_t se_event, uint64_t timeout); int SGXAPI se_event_wake(se_handle_t); diff --git a/common/inc/sgx_occlum_utils.edl b/common/inc/sgx_occlum_utils.edl index d396b9465..5996b5621 100644 --- a/common/inc/sgx_occlum_utils.edl +++ b/common/inc/sgx_occlum_utils.edl @@ -1,6 +1,6 @@ enclave { untrusted { /* Go outside and wait on my untrusted event with timeout */ - [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, int clockbit, [in] const struct sgx_edl_timespec* ts, [out] int* err); + [cdecl] int sgx_thread_wait_untrusted_event_timeout_ocall([user_check] const void *self, int clockbit, [in] const struct sgx_edl_timespec* ts, int absolute_time, [out] int* err); }; }; diff --git a/common/src/se_event_timeout_wait.c b/common/src/se_event_timeout_wait.c index 3bc80a692..6ab337479 100644 --- a/common/src/se_event_timeout_wait.c +++ b/common/src/se_event_timeout_wait.c @@ -35,7 +35,7 @@ #include #include -int se_event_timeout_wait(se_handle_t se_event, int clockbit, const struct timespec *ts, int *err) +int se_event_timeout_wait(se_handle_t se_event, int clockbit, const struct timespec *ts, int absolute_time, int *err) { int ret = 0; @@ -43,9 +43,15 @@ int se_event_timeout_wait(se_handle_t se_event, int clockbit, const struct times return SE_MUTEX_INVALID; if (__sync_fetch_and_add((int*)se_event, -1) == 0) { - if (clockbit & FUTEX_CLOCK_REALTIME) { - ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, -1, ts, NULL, FUTEX_BITSET_MATCH_ANY); + // From futex man page: + // For FUTEX_WAIT, timeout is interpreted as a relative value. This differs from other futex operations, where + // timeout is interpreted as an absolute value. To obtain the equivalent of FUTEX_WAIT with an absolute timeout, + // employ FUTEX_WAIT_BITSET with val3 specified as FUTEX_BITSET_MATCH_ANY. + if (absolute_time == 1) { + ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT_BITSET | clockbit, -1, ts, NULL, FUTEX_BITSET_MATCH_ANY); } else { + // FUTEX_WAIT can't work with FUTEX_CLOCK_REALTIME in Linux. Thus, ignore the clockbit. + // Reference: https://github.com/torvalds/linux/commit/4fbf5d6837bf81fd7a27d771358f4ee6c4f243f8 ret = (int)syscall(__NR_futex, se_event, FUTEX_WAIT, -1, ts, NULL, 0); } __sync_val_compare_and_swap((int*)se_event, -1, 0); diff --git a/psw/urts/enclave_mutex.cpp b/psw/urts/enclave_mutex.cpp index 3b81ffce5..6588d61a6 100644 --- a/psw/urts/enclave_mutex.cpp +++ b/psw/urts/enclave_mutex.cpp @@ -52,7 +52,7 @@ extern "C" int sgx_thread_wait_untrusted_event_ocall(const void *self) return SGX_SUCCESS; } -extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, int clockbit, const struct timespec *ts, int *err) +extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, int clockbit, const struct timespec *ts, int absolute_time, int *err) { if (self == NULL) return SGX_ERROR_INVALID_PARAMETER; @@ -61,7 +61,7 @@ extern "C" int sgx_thread_wait_untrusted_event_timeout_ocall(const void *self, i if (hevent == NULL) return SE_ERROR_MUTEX_GET_EVENT; - if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, clockbit, ts, err)) + if (SE_MUTEX_SUCCESS != se_event_timeout_wait(hevent, clockbit, ts, absolute_time, err)) return SE_ERROR_MUTEX_WAIT_EVENT; return SGX_SUCCESS; From b819fa5ee6238a0e24a458ab10d24b498f6398b5 Mon Sep 17 00:00:00 2001 From: Shaowei Song <1498430017@qq.com> Date: Fri, 2 Sep 2022 17:43:29 +0800 Subject: [PATCH 87/96] Support parameterized cache size of SGX PFS (apply intel's patch) --- common/inc/sgx_tprotected_fs.h | 23 ++++ .../sgx_tprotected_fs/file_crypto.cpp | 2 - .../sgx_tprotected_fs/file_flush.cpp | 61 +------- .../sgx_tprotected_fs/file_init.cpp | 61 ++------ .../sgx_tprotected_fs/file_other.cpp | 130 ------------------ .../sgx_tprotected_fs/file_read_write.cpp | 5 +- .../sgx_tprotected_fs/lru_cache.cpp | 3 +- .../sgx_tprotected_fs/lru_cache.h | 2 +- .../sgx_tprotected_fs/protected_fs_file.h | 16 ++- .../sgx_tprotected_fs/protected_fs_nodes.h | 1 - .../sgx_tprotected_fs/sgx_tprotected_fs.cpp | 49 +++---- 11 files changed, 69 insertions(+), 284 deletions(-) diff --git a/common/inc/sgx_tprotected_fs.h b/common/inc/sgx_tprotected_fs.h index a461f18cc..acb6dbe32 100644 --- a/common/inc/sgx_tprotected_fs.h +++ b/common/inc/sgx_tprotected_fs.h @@ -109,6 +109,29 @@ SGX_FILE* SGXAPI sgx_fopen_auto_key(const char* filename, const char* mode); SGX_FILE* SGXAPI sgx_fopen_integrity_only(const char* filename, const char* mode); +/* sgx_fopen_ex + * Purpose: Expert version of sgx_fopen/sgx_fopen_auto_key which is used if you want to control the internal `cache size`. + * The specified `cache size` must be page (4KB by default) aligned. + * Note that `sgx_fexport_auto_key` and `sgx_fimport_auto_key` don't support configuring `cache_size` right now + * + * Parameters: + * filename - [IN] the name of the file to open/create. + * mode - [IN] open mode. only supports 'r' or 'w' or 'a' (one and only one of them must be present), and optionally 'b' and/or '+'. + * key - [IN] encryption key that will be used for the file encryption. + * If it's NULL, we will swtich back to `sgx_fopen_auto_key and use enclave's seal key to protect the file + * NOTE - the key is actually used as a KDK (key derivation key) and only for the meta-data node, and not used directly for the encryption of any part of the file + * this is important in order to prevent hitting the key wear-out problem, and some other issues with GCM encryptions using the same key + * cache_size - [IN] Internal cache size in byte, which used to cache R/W data in enclave before flush to actual file + * It must larger than default cache size (192KB), and must be page (4KB by default) aligned + * a) Please make sure enclave heap is enough for the `cache`, e.g. Configure enough heap in enclave config file + * b) All the data in cache may lost after exeception, please try to call `sgx_fflush` explicitly to avoid data loss + * + * Return value: + * SGX_FILE* - pointer to the newly created file handle, NULL if an error occurred - check errno for the error code. +*/ +SGX_FILE* SGXAPI sgx_fopen_ex(const char* filename, const char* mode, const sgx_key_128bit_t *key, const uint64_t cache_size); + + /* sgx_fwrite * Purpose: write data to a file (see c++ fwrite documentation for more details). * diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp index 3cc040916..02b155f76 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_crypto.cpp @@ -313,5 +313,3 @@ bool protected_fs_file::restore_current_meta_data_key(const sgx_aes_gcm_128bit_k return true; } - - diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp index 5c4832a67..969e5e47b 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_flush.cpp @@ -37,7 +37,7 @@ #include -bool protected_fs_file::flush(/*bool mc*/) +bool protected_fs_file::flush() { bool result = false; @@ -56,7 +56,7 @@ bool protected_fs_file::flush(/*bool mc*/) return false; } - result = internal_flush(/*mc,*/ true); + result = internal_flush(true); if (result == false) { assert(file_status != SGX_FILE_STATUS_OK); @@ -70,18 +70,11 @@ bool protected_fs_file::flush(/*bool mc*/) } -bool protected_fs_file::internal_flush(/*bool mc,*/ bool flush_to_disk) +bool protected_fs_file::internal_flush(bool flush_to_disk) { if (need_writing == false) // no changes at all return true; -/* - if (mc == true && encrypted_part_plain.mc_value > (UINT_MAX-2)) - { - last_error = SGX_ERROR_FILE_MONOTONIC_COUNTER_AT_MAX; - return false; - } -*/ if (encrypted_part_plain.size > MD_USER_DATA_SIZE && root_mht.need_writing == true) // otherwise it's just one write - the meta-data node { if (_RECOVERY_HOOK_(0) || write_recovery_file() != true) @@ -104,44 +97,17 @@ bool protected_fs_file::internal_flush(/*bool mc,*/ bool flush_to_disk) } } -/* - sgx_status_t status; - - if (mc == true) - { - // increase monotonic counter local value - only if everything is ok, we will increase the real counter - if (encrypted_part_plain.mc_value == 0) - { - // no monotonic counter so far, need to create a new one - status = sgx_create_monotonic_counter(&encrypted_part_plain.mc_uuid, &encrypted_part_plain.mc_value); - if (status != SGX_SUCCESS) - { - clear_update_flag(); - file_status = SGX_FILE_STATUS_FLUSH_ERROR; - last_error = status; - return false; - } - } - encrypted_part_plain.mc_value++; - } -*/ if (_RECOVERY_HOOK_(3) || update_meta_data_node() != true) { clear_update_flag(); - /* - if (mc == true) - encrypted_part_plain.mc_value--; // don't have to do this as the file cannot be fixed, but doing it anyway to prevent future errors - */ + file_status = SGX_FILE_STATUS_CRYPTO_ERROR; // this is something that shouldn't happen, can't fix this... return false; } if (_RECOVERY_HOOK_(4) || write_all_changes_to_disk(flush_to_disk) != true) { - //if (mc == false) - file_status = SGX_FILE_STATUS_WRITE_TO_DISK_FAILED; // special case, need only to repeat write_all_changes_to_disk in order to repair it - //else - //file_status = SGX_FILE_STATUS_WRITE_TO_DISK_FAILED_NEED_MC; // special case, need to repeat write_all_changes_to_disk AND increase the monotonic counter in order to repair it + file_status = SGX_FILE_STATUS_WRITE_TO_DISK_FAILED; // special case, need only to repeat write_all_changes_to_disk in order to repair it return false; } @@ -156,20 +122,7 @@ bool protected_fs_file::internal_flush(/*bool mc,*/ bool flush_to_disk) erase_recovery_file(); } */ -/* - if (mc == true) - { - uint32_t mc_value; - status = sgx_increment_monotonic_counter(&encrypted_part_plain.mc_uuid, &mc_value); - if (status != SGX_SUCCESS) - { - file_status = SGX_FILE_STATUS_MC_NOT_INCREMENTED; // special case - need only to increase the MC in order to repair it - last_error = status; - return false; - } - assert(mc_value == encrypted_part_plain.mc_value); - } -*/ + return true; } @@ -577,5 +530,3 @@ void protected_fs_file::erase_recovery_file() status = u_sgxprotectedfs_remove(&result32, recovery_filename); (void)status; // don't care if it succeeded or failed...just remove the warning } - - diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp index 53d23449c..64c0cffd0 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_init.cpp @@ -67,13 +67,13 @@ bool protected_fs_file::cleanup_filename(const char* src, char* dest) } -protected_fs_file::protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool _integrity_only) +protected_fs_file::protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool _integrity_only, const uint64_t cache_page) { sgx_status_t status = SGX_SUCCESS; uint8_t result = 0; int32_t result32 = 0; - init_fields(); + init_fields(cache_page); if (filename == NULL || mode == NULL || strnlen(filename, 1) == 0 || strnlen(mode, 1) == 0) @@ -249,7 +249,7 @@ protected_fs_file::protected_fs_file(const char* filename, const char* mode, con } -void protected_fs_file::init_fields() +void protected_fs_file::init_fields(const uint64_t cache_page) { meta_data_node_number = 0; memset(&file_meta_data, 0, sizeof(meta_data_node_t)); @@ -277,13 +277,16 @@ void protected_fs_file::init_fields() open_mode.raw = 0; use_user_kdk_key = 0; master_key_count = 0; + max_cache_page = 0; recovery_filename[0] = '\0'; - memset(&mutex, 0, sizeof(sgx_thread_mutex_t)); + max_cache_page = cache_page; + + // set hash size + cache.rehash(max_cache_page); - // set hash size to fit MAX_PAGES_IN_CACHE - cache.rehash(MAX_PAGES_IN_CACHE); + memset(&mutex, 0, sizeof(sgx_thread_mutex_t)); } @@ -480,51 +483,6 @@ bool protected_fs_file::init_existing_file(const char* filename, const char* cle return false; } -/* - sgx_mc_uuid_t empty_mc_uuid = {0}; - - // check if the file contains an active monotonic counter - if (consttime_memequal(&empty_mc_uuid, &encrypted_part_plain.mc_uuid, sizeof(sgx_mc_uuid_t)) == 0) - { - uint32_t mc_value = 0; - - status = sgx_read_monotonic_counter(&encrypted_part_plain.mc_uuid, &mc_value); - if (status != SGX_SUCCESS) - { - last_error = status; - return false; - } - - if (encrypted_part_plain.mc_value < mc_value) - { - last_error = SGX_ERROR_FILE_MONOTONIC_COUNTER_IS_BIGGER; - return false; - } - - if (encrypted_part_plain.mc_value == mc_value + 1) // can happen if AESM failed - file value stayed one higher - { - sgx_status_t status = sgx_increment_monotonic_counter(&encrypted_part_plain.mc_uuid, &mc_value); - if (status != SGX_SUCCESS) - { - file_status = SGX_FILE_STATUS_MC_NOT_INCREMENTED; - last_error = status; - return false; - } - } - - if (encrypted_part_plain.mc_value != mc_value) - { - file_status = SGX_FILE_STATUS_CORRUPTED; - last_error = SGX_ERROR_UNEXPECTED; - return false; - } - } - else - { - assert(encrypted_part_plain.mc_value == 0); - encrypted_part_plain.mc_value = 0; // do this anyway for release... - } -*/ if (encrypted_part_plain.size > MD_USER_DATA_SIZE) { // read the root node of the mht @@ -679,4 +637,3 @@ bool protected_fs_file::pre_close(sgx_key_128bit_t* key, bool import) return retval; } - diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp index 0cc6da7a2..a5ccc657d 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_other.cpp @@ -45,102 +45,6 @@ int32_t protected_fs_file::remove(const char* filename) sgx_status_t status; int32_t result32 = 0; -/* - void* file = NULL; - int64_t real_file_size = 0; - - if (filename == NULL) - return 1; - - meta_data_node_t* file_meta_data = NULL; - meta_data_encrypted_t* encrypted_part_plain = NULL; - - // if we have a problem in any of the stages, we simply jump to the end and try to remove the file... - do { - status = u_sgxprotectedfs_check_if_file_exists(&result, filename); - if (status != SGX_SUCCESS) - break; - - if (result == 0) - { - errno = EINVAL; - return 1; // no such file, or file locked so we can't delete it anyways - } - - try { - file_meta_data = new meta_data_node_t; - encrypted_part_plain = new meta_data_encrypted_t; - } - catch (std::bad_alloc e) { - break; - } - - status = u_sgxprotectedfs_exclusive_file_open(&file, filename, 1, &real_file_size, &result32); - if (status != SGX_SUCCESS || file == NULL) - break; - - if (real_file_size == 0 || real_file_size % NODE_SIZE != 0) - break; // empty file or not an SGX protected FS file - - // might be an SGX protected FS file - status = u_sgxprotectedfs_fread_node(&result32, file, 0, (uint8_t*)file_meta_data, NODE_SIZE); - if (status != SGX_SUCCESS || result32 != 0) - break; - - if (file_meta_data->plain_part.major_version != SGX_FILE_MAJOR_VERSION) - break; - - sgx_aes_gcm_128bit_key_t zero_key_id = {0}; - sgx_aes_gcm_128bit_key_t key = {0}; - if (consttime_memequal(&file_meta_data->plain_part.key_id, &zero_key_id, sizeof(sgx_aes_gcm_128bit_key_t)) == 1) - break; // shared file - no monotonic counter - - sgx_key_request_t key_request = {0}; - key_request.key_name = SGX_KEYSELECT_SEAL; - key_request.key_policy = SGX_KEYPOLICY_MRENCLAVE; - memcpy(&key_request.key_id, &file_meta_data->plain_part.key_id, sizeof(sgx_key_id_t)); - - status = sgx_get_key(&key_request, &key); - if (status != SGX_SUCCESS) - break; - - status = sgx_rijndael128GCM_decrypt(&key, - file_meta_data->encrypted_part, sizeof(meta_data_encrypted_blob_t), - (uint8_t*)encrypted_part_plain, - file_meta_data->plain_part.meta_data_iv, SGX_AESGCM_IV_SIZE, - NULL, 0, - &file_meta_data->plain_part.meta_data_gmac); - if (status != SGX_SUCCESS) - break; - - sgx_mc_uuid_t empty_mc_uuid = {0}; - if (consttime_memequal(&empty_mc_uuid, &encrypted_part_plain->mc_uuid, sizeof(sgx_mc_uuid_t)) == 0) - { - status = sgx_destroy_monotonic_counter(&encrypted_part_plain->mc_uuid); - if (status != SGX_SUCCESS) - break; - - // monotonic counter was deleted, mission accomplished!! - } - } - while (0); - - // cleanup - if (file_meta_data != NULL) - delete file_meta_data; - - if (encrypted_part_plain != NULL) - { - // scrub the encrypted part - memset_s(encrypted_part_plain, sizeof(meta_data_encrypted_t), 0, sizeof(meta_data_encrypted_t)); - delete encrypted_part_plain; - } - - if (file != NULL) - u_sgxprotectedfs_fclose(&result32, file); - -*/ - // do the actual file removal status = u_sgxprotectedfs_remove(&result32, filename); if (status != SGX_SUCCESS) @@ -197,13 +101,6 @@ int protected_fs_file::seek(int64_t new_offset, int origin) return -1; } - //if (open_mode.binary == 0 && origin != SEEK_SET && new_offset != 0) - //{ - // last_error = EINVAL; - // sgx_thread_mutex_unlock(&mutex); - // return -1; - //} - int result = -1; switch (origin) @@ -299,33 +196,6 @@ void protected_fs_file::clear_error() } } -/* - if (file_status == SGX_FILE_STATUS_WRITE_TO_DISK_FAILED_NEED_MC) - { - if (write_all_changes_to_disk(true) == true) - { - need_writing = false; - file_status = SGX_FILE_STATUS_MC_NOT_INCREMENTED; // fall through...next 'if' should take care of this one - } - } - - if ((file_status == SGX_FILE_STATUS_MC_NOT_INCREMENTED) && - (encrypted_part_plain.mc_value <= (UINT_MAX-2))) - { - uint32_t mc_value; - sgx_status_t status = sgx_increment_monotonic_counter(&encrypted_part_plain.mc_uuid, &mc_value); - if (status == SGX_SUCCESS) - { - assert(mc_value == encrypted_part_plain.mc_value); - file_status = SGX_FILE_STATUS_OK; - } - else - { - last_error = status; - } - } -*/ - if (file_status == SGX_FILE_STATUS_OK) { last_error = SGX_SUCCESS; diff --git a/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp b/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp index 5c2d6cfc5..410476c57 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/file_read_write.cpp @@ -312,7 +312,7 @@ void get_node_numbers(uint64_t offset, uint64_t* mht_node_number, uint64_t* data // node 1 - mht // nodes 2-97 - data (ATTACHED_DATA_NODES_COUNT == 96) // node 98 - mht - // node 99-195 - data + // node 99-194 - data // etc. uint64_t _mht_node_number; uint64_t _data_node_number; @@ -370,7 +370,7 @@ file_data_node_t* protected_fs_file::get_data_node() } // even if we didn't get the required data_node, we might have read other nodes in the process - while (cache.size() > MAX_PAGES_IN_CACHE) + while (cache.size() > max_cache_page) { void* data = cache.get_last(); assert(data != NULL); @@ -678,4 +678,3 @@ file_mht_node_t* protected_fs_file::read_mht_node(uint64_t mht_node_number) return file_mht_node; } - diff --git a/sdk/protected_fs/sgx_tprotected_fs/lru_cache.cpp b/sdk/protected_fs/sgx_tprotected_fs/lru_cache.cpp index 5b7918beb..bacc28526 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/lru_cache.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/lru_cache.cpp @@ -61,7 +61,7 @@ lru_cache::~lru_cache() } -void lru_cache::rehash(uint32_t size_) +void lru_cache::rehash(uint64_t size_) { map.rehash(size_); } @@ -272,4 +272,3 @@ void lru_cache::remove_last() map.erase(key); delete map_node; } - diff --git a/sdk/protected_fs/sgx_tprotected_fs/lru_cache.h b/sdk/protected_fs/sgx_tprotected_fs/lru_cache.h index f024f7e25..64b43eb83 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/lru_cache.h +++ b/sdk/protected_fs/sgx_tprotected_fs/lru_cache.h @@ -83,7 +83,7 @@ class lru_cache lru_cache(); ~lru_cache(); - void rehash(uint32_t size_); + void rehash(uint64_t size_); bool add(uint64_t key, void* p); void* get(uint64_t key); diff --git a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h index 8c44089e1..66fef67e8 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h +++ b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_file.h @@ -52,12 +52,15 @@ typedef enum SGX_FILE_STATUS_CRYPTO_ERROR, SGX_FILE_STATUS_CORRUPTED, SGX_FILE_STATUS_MEMORY_CORRUPTED, - //SGX_FILE_STATUS_WRITE_TO_DISK_FAILED_NEED_MC, - //SGX_FILE_STATUS_MC_NOT_INCREMENTED, SGX_FILE_STATUS_CLOSED, } protected_fs_status_e; +#ifndef SE_PAGE_SIZE +#define SE_PAGE_SIZE 0x1000 +#endif + #define MAX_PAGES_IN_CACHE 48 +#define DEFAULT_CACHE_SIZE (MAX_PAGES_IN_CACHE * SE_PAGE_SIZE) COMPILE_TIME_ASSERT(filename_length, FILENAME_MAX_LEN == FILENAME_MAX); @@ -152,6 +155,7 @@ class protected_fs_file uint8_t read_only; int64_t offset; // current file position (user's view) bool end_of_file; // flag + uint64_t max_cache_page; int64_t real_file_size; bool integrity_only; // If true, no encryption, only MAC. Default: false. @@ -176,7 +180,7 @@ class protected_fs_file sgx_iv_t empty_iv; sgx_report_t report; - void init_fields(); + void init_fields(const uint64_t cache_page); bool cleanup_filename(const char* src, char* dest); bool parse_mode(const char* mode); bool file_recovery(const char* filename); @@ -204,10 +208,10 @@ class protected_fs_file bool update_meta_data_node(); bool write_all_changes_to_disk(bool flush_to_disk); void erase_recovery_file(); - bool internal_flush(/*bool mc,*/ bool flush_to_disk); + bool internal_flush(bool flush_to_disk); public: - protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool integrity_only); + protected_fs_file(const char* filename, const char* mode, const sgx_aes_gcm_128bit_key_t* import_key, const sgx_aes_gcm_128bit_key_t* kdk_key, bool integrity_only, const uint64_t cache_page); ~protected_fs_file(); size_t write(const void* ptr, size_t size, size_t count); @@ -218,7 +222,7 @@ class protected_fs_file uint32_t get_error(); void clear_error(); int32_t clear_cache(); - bool flush(/*bool mc*/); + bool flush(); bool pre_close(sgx_key_128bit_t* key, bool import); int32_t get_root_mac(sgx_aes_gcm_128bit_tag_t* root_mac); static int32_t remove(const char* filename); diff --git a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h index 8c9abdec7..8541ed474 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h +++ b/sdk/protected_fs/sgx_tprotected_fs/protected_fs_nodes.h @@ -154,4 +154,3 @@ typedef struct _recovery_node #pragma pack(pop) #endif // _PROTECTED_FS_NODES_H_ - diff --git a/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp b/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp index 2784a0d5c..2cf3681b1 100644 --- a/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp +++ b/sdk/protected_fs/sgx_tprotected_fs/sgx_tprotected_fs.cpp @@ -35,18 +35,22 @@ #include -static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, const sgx_key_128bit_t *auto_key, const sgx_key_128bit_t *kdk_key, bool integrity_only) +static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, const sgx_key_128bit_t *auto_key, const sgx_key_128bit_t *kdk_key, bool integrity_only, const uint64_t cache_size) { protected_fs_file* file = NULL; + uint64_t cache_page = 0; - if (filename == NULL || mode == NULL) + if (filename == NULL || mode == NULL || + cache_size < DEFAULT_CACHE_SIZE || cache_size % SE_PAGE_SIZE != 0) { errno = EINVAL; return NULL; } + cache_page = cache_size / SE_PAGE_SIZE; + try { - file = new protected_fs_file(filename, mode, auto_key, kdk_key, integrity_only); + file = new protected_fs_file(filename, mode, auto_key, kdk_key, integrity_only, cache_page); } catch (std::bad_alloc& e) { (void)e; // remove warning @@ -67,18 +71,23 @@ static SGX_FILE* sgx_fopen_internal(const char* filename, const char* mode, cons SGX_FILE* sgx_fopen_auto_key(const char* filename, const char* mode) { - return sgx_fopen_internal(filename, mode, NULL, NULL, false); + return sgx_fopen_internal(filename, mode, NULL, NULL, false, DEFAULT_CACHE_SIZE); } SGX_FILE* sgx_fopen_integrity_only(const char* filename, const char* mode) { sgx_key_128bit_t empty_key = {0}; - return sgx_fopen_internal(filename, mode, NULL, &empty_key, true); + return sgx_fopen_internal(filename, mode, NULL, &empty_key, true, DEFAULT_CACHE_SIZE); } SGX_FILE* sgx_fopen(const char* filename, const char* mode, const sgx_key_128bit_t *key) { - return sgx_fopen_internal(filename, mode, NULL, key, false); + return sgx_fopen_internal(filename, mode, NULL, key, false, DEFAULT_CACHE_SIZE); +} + +SGX_FILE* SGXAPI sgx_fopen_ex(const char* filename, const char* mode, const sgx_key_128bit_t *key, const uint64_t cache_size) +{ + return sgx_fopen_internal(filename, mode, NULL, key, false, cache_size); } @@ -137,30 +146,6 @@ int32_t sgx_fflush(SGX_FILE* stream) } -/* sgx_fflush_and_increment_mc - * Purpose: force actual write of all the cached data to the disk (see c++ fflush documentation for more details). - * in addition, in the first time this function is called, it adds a monotonic counter to the file - * in subsequent calls, the monotonic counter is incremented by one every time this function is called - * the monotonic counter is a limited resource, please read the SGX documentation for more details - * - * Parameters: - * stream - [IN] the file handle (opened with sgx_fopen or sgx_fopen_auto_key) - * - * Return value: - * int32_t - result, 0 on success, 1 in case of an error - check sgx_ferror for error code - * -int32_t sgx_fflush_and_increment_mc(SGX_FILE* stream) -{ - if (stream == NULL) - return 1; - - protected_fs_file* file = (protected_fs_file*)stream; - - return file->flush(true) == true ? 0 : 1; -} -*/ - - int32_t sgx_ferror(SGX_FILE* stream) { if (stream == NULL) @@ -226,7 +211,7 @@ int32_t sgx_remove(const char* filename) int32_t sgx_fexport_auto_key(const char* filename, sgx_key_128bit_t *key) { - SGX_FILE* stream = sgx_fopen_internal(filename, "r", NULL, NULL, false); + SGX_FILE* stream = sgx_fopen_internal(filename, "r", NULL, NULL, false, DEFAULT_CACHE_SIZE); if (stream == NULL) return 1; @@ -236,7 +221,7 @@ int32_t sgx_fexport_auto_key(const char* filename, sgx_key_128bit_t *key) int32_t sgx_fimport_auto_key(const char* filename, const sgx_key_128bit_t *key) { - SGX_FILE* stream = sgx_fopen_internal(filename, "r+", key, NULL, false); + SGX_FILE* stream = sgx_fopen_internal(filename, "r+", key, NULL, false, DEFAULT_CACHE_SIZE); if (stream == NULL) return 1; From d786b38d5eb77d93642430c7ef9a0aef022a033c Mon Sep 17 00:00:00 2001 From: Shaowei Song <1498430017@qq.com> Date: Fri, 2 Sep 2022 17:44:41 +0800 Subject: [PATCH 88/96] Add fsync to sgx_fflush and sgx_fclose to ensure persistency --- .../sgx_uprotected_fs/sgx_uprotected_fs.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp index 7e451cc27..22a8113d4 100644 --- a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp +++ b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp @@ -248,6 +248,12 @@ int32_t u_sgxprotectedfs_fclose(void* f) else flock(fd, LOCK_UN); + if ((result = fsync(fd)) != 0) + { + DEBUG_PRINT("fsync returned %d\n", result); + return -1; + } + if ((result = fclose(file)) != 0) { if (errno != 0) @@ -281,6 +287,12 @@ uint8_t u_sgxprotectedfs_fflush(void* f) return 1; } + if ((result = fsync(fileno(file))) != 0) + { + DEBUG_PRINT("fsync returned %d\n", result); + return 1; + } + return 0; } From f1c5425e1c4088707d93c9a039039f5bcae673c1 Mon Sep 17 00:00:00 2001 From: zhubojun Date: Fri, 9 Sep 2022 15:19:16 +0800 Subject: [PATCH 89/96] Fix bug in get_first_executable_segment_info() The first executable segment (contains code section) should have PF_X flag. --- sdk/trts/linux/elf_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/trts/linux/elf_parser.c b/sdk/trts/linux/elf_parser.c index e7c521381..b91463aab 100644 --- a/sdk/trts/linux/elf_parser.c +++ b/sdk/trts/linux/elf_parser.c @@ -597,7 +597,7 @@ int get_first_executable_segment_info(const void *enclave_base, for (; phnum < ehdr->e_phnum; phnum++, phdr++) { - if (phdr->p_type == PT_LOAD && phdr->p_flags | PF_X) + if (phdr->p_type == PT_LOAD && phdr->p_flags & PF_X) { *segment_start_addr = (size_t)enclave_base + phdr->p_vaddr; *segment_size = phdr->p_memsz; From 6715a60601b650f152665128efaf35abe903e44a Mon Sep 17 00:00:00 2001 From: Shaowei Song <1498430017@qq.com> Date: Mon, 31 Oct 2022 10:48:45 +0800 Subject: [PATCH 90/96] Revert "Add fsync to sgx_fflush and sgx_fclose to ensure persistency" due to performance regression --- .../sgx_uprotected_fs/sgx_uprotected_fs.cpp | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp index 22a8113d4..7e451cc27 100644 --- a/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp +++ b/sdk/protected_fs/sgx_uprotected_fs/sgx_uprotected_fs.cpp @@ -248,12 +248,6 @@ int32_t u_sgxprotectedfs_fclose(void* f) else flock(fd, LOCK_UN); - if ((result = fsync(fd)) != 0) - { - DEBUG_PRINT("fsync returned %d\n", result); - return -1; - } - if ((result = fclose(file)) != 0) { if (errno != 0) @@ -287,12 +281,6 @@ uint8_t u_sgxprotectedfs_fflush(void* f) return 1; } - if ((result = fsync(fileno(file))) != 0) - { - DEBUG_PRINT("fsync returned %d\n", result); - return 1; - } - return 0; } From 32383ef8b82c5484c6447d2222c88bf3fa24ebc9 Mon Sep 17 00:00:00 2001 From: "Tate, Hongliang Tian" Date: Sat, 27 Jun 2020 17:37:00 +0800 Subject: [PATCH 91/96] Add the interrupt mechanism for dynamically-loaded workloads Workloads in an enclave can be classified into two categories: statically-loaded and dynamically-loaded workloads. Statically-loaded workloads are application code that are built into the enclave; that is, they are part of the enclave since enclave initialization. Dynamically-loaded workloads, as the name suggests, are application code loaded after the enclave gets running. One typical example of dynamically-loaded workloads is user programs loaded by a SGX LibOS. The user programs could be arbitrary code. As a result, once the user program gets executed, the LibOS may never have the opportunity to take control of the CPU. Without the ability to regain the control, it is impossible for the LibOS to implement features like interruptible signal handler or preemptive in-enclave thread scheduling. To address the issue above, we implement the signal-based interrupt mechanism for dynamically-loaded workloads. With the provided APIs, the users can now interrupt the dynamically-loaded workloads executed in a SGX thread by simply sending a real-time POSIX signal (whose number is 64, the max value of signal numbers on Linux) to the SGX thread. The signal will be captured and (if the timing is good) a pre-registered interrupt handler will get executed inside the enclave. --- common/inc/internal/rts_cmd.h | 2 + common/inc/sgx_interrupt.h | 65 +++++++++++++++ psw/urts/linux/sig_handler.cpp | 25 +++++- sdk/simulation/trtssim/linux/Makefile | 1 + sdk/trts/Makefile | 1 + sdk/trts/trts_internal.h | 5 ++ sdk/trts/trts_interrupt.cpp | 63 ++++++++++++++ sdk/trts/trts_nsp.cpp | 8 ++ sdk/trts/trts_veh.cpp | 114 ++++++++++++++++++++++++++ 9 files changed, 283 insertions(+), 1 deletion(-) create mode 100644 common/inc/sgx_interrupt.h create mode 100644 sdk/trts/trts_interrupt.cpp diff --git a/common/inc/internal/rts_cmd.h b/common/inc/internal/rts_cmd.h index 725e34dc6..d0fabdfd6 100644 --- a/common/inc/internal/rts_cmd.h +++ b/common/inc/internal/rts_cmd.h @@ -40,6 +40,8 @@ #define ECMD_ECALL_PTHREAD (-6) +#define ECMD_INTERRUPT (-32) + /* Reserved for 3rd party usage */ #define RESERVED_FOR_3RD_PARTY_START -100 #define RESERVED_FOR_3RD_PARTY_END -1000 diff --git a/common/inc/sgx_interrupt.h b/common/inc/sgx_interrupt.h new file mode 100644 index 000000000..e2ca93269 --- /dev/null +++ b/common/inc/sgx_interrupt.h @@ -0,0 +1,65 @@ +#ifndef _SGX_INTERRUPT_H_ +#define _SGX_INTERRUPT_H_ + +// An interrupt mechanism for dynamically-loaded workloads in enclaves. +// +// Workloads in an enclave can be classified into two categories: statically-loaded and +// dynamically-loaded workloads. Statically-loaded workloads are application code that +// are built into the enclave; that is, they are part of the enclave since enclave initialization. +// Dynamically-loaded workloads, as the name suggests, are application code loaded after +// the enclave gets running. +// +// One typical example of dynamically-loaded workloads is user programs loaded by a SGX +// LibOS. The user programs could be arbitrary code. As a result, once the user program +// gets executed, the LibOS may never have the opportunity to take control of the CPU. +// Without the ability to regain the control, it is impossible for the LibOS to implement +// features like interruptible signal handler or preemptive in-enclave thread scheduling. +// +// To address the issue above, we implement the signal-based interrupt mechanism for +// dynamically-loaded workloads. With the provided APIs, the users can now interrupt the +// dynamically-loaded workloads executed in a SGX thread by simply sending a real-time +// POSIX signal (whosenumber is 64, the max value of signal numbers on Linux) to the SGX +// thread. The signal will be captured and (if the timing is good) a pre-registered +// interrupt handler will get executed inside the enclave. +// +// Note that the interrupt mechanism only performs the signal-to-interrupt conversion +// described above is in a best-effort manner. That is, sending a signal may not +// result in the interrupt handler getting called. For example, if the target SGX thread is +// executing some code outside the enclave, then the signal received will be simply +// ignored, thus not triggering the interrupt handler to be executed. So the users of +// the interrupt mechanism should find other means to determine if an interrupt has been +// delivered, and if not, whether and when to resend the interrupt (via POSIX signal). + +#include "sgx_error.h" +#include "sgx_trts_exception.h" + +// A data structure that represents an interrupt +typedef struct _sgx_interrupt_info_t { + sgx_cpu_context_t cpu_context; +} sgx_interrupt_info_t; + +// A handler function that processes an interrupt +typedef void (*sgx_interrupt_handler_t)(sgx_interrupt_info_t*); + +#ifdef __cplusplus +extern "C" { +#endif + +// Initialize the interrupt mechanism for SGX threads. +sgx_status_t SGXAPI sgx_interrupt_init(sgx_interrupt_handler_t handler); + +// Make the current thread interruptible when executing in the given code region. +// +// By default, a SGX thread is not interruptible. It is the responsibility of the +// caller of this API to ensure that the given code region is ok to be interrupted, +// e.g., not causing deadlocks. +sgx_status_t SGXAPI sgx_interrupt_enable(size_t code_addr, size_t code_size); + +// Make the current thread uninterruptible. +sgx_status_t SGXAPI sgx_interrupt_disable(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _SGX_INTERRUPT_H_ */ diff --git a/psw/urts/linux/sig_handler.cpp b/psw/urts/linux/sig_handler.cpp index 56fcc7fac..e1f862f23 100644 --- a/psw/urts/linux/sig_handler.cpp +++ b/psw/urts/linux/sig_handler.cpp @@ -86,6 +86,9 @@ typedef struct _ecall_param_t #define ECALL_PARAM (reinterpret_cast(context->uc_mcontext.gregs[REG_EBP] + 2 * 4)) #endif +// Real-time signal 64 is used to trigger an interrupt to an enclave thread +#define SIGRT_INTERRUPT (64) + extern "C" void *get_aep(); extern "C" void *get_eenterp(); extern "C" void *get_eretp(); @@ -128,8 +131,15 @@ void sig_handler(int signum, siginfo_t* siginfo, void *priv) //The ecall looks recursively, but it will not cause infinite call. //If exception is raised in trts again and again, the SSA will overflow, and finally it is EENTER exception. assert(reinterpret_cast(xbx) == param->tcs); + int ecmd; + if (signum != SIGRT_INTERRUPT) { + ecmd = ECMD_EXCEPT; + } else { + ecmd = ECMD_INTERRUPT; + } + CEnclave *enclave = param->trust_thread->get_enclave(); - unsigned int ret = enclave->ecall(ECMD_EXCEPT, param->ocall_table, NULL); + unsigned int ret = enclave->ecall(ecmd, param->ocall_table, NULL); if(SGX_SUCCESS == ret) { //ERESUME execute @@ -149,6 +159,11 @@ void sig_handler(int signum, siginfo_t* siginfo, void *priv) enclave->rdunlock(); CEnclavePool::instance()->unref_enclave(enclave); } + else if (signum == SIGRT_INTERRUPT) + { + // If not interrupting the enclave, just ignore the signal + return; + } //the case of exception on EENTER instruction. else if(xip == get_eenterp() && SE_EENTER == xax) @@ -226,6 +241,10 @@ void reg_sig_handler() sigdelset(&sig_act.sa_mask, SIGBUS); sigdelset(&sig_act.sa_mask, SIGTRAP); } + // The signal for interrupt should only interrupt the normal execution of + // the enclave, not interrupt the enclave's handling of exceptions or + // interrupts + sigaddset(&sig_act.sa_mask, SIGRT_INTERRUPT); ret = sigaction(SIGSEGV, &sig_act, &g_old_sigact[SIGSEGV]); if (0 != ret) abort(); @@ -237,6 +256,10 @@ void reg_sig_handler() if (0 != ret) abort(); ret = sigaction(SIGTRAP, &sig_act, &g_old_sigact[SIGTRAP]); if (0 != ret) abort(); + + sig_act.sa_flags = SA_SIGINFO ; // Remove SA_RESTART and SA_NODEFER + ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); + if (0 != ret) abort(); } //trust_thread is saved at stack for ocall. diff --git a/sdk/simulation/trtssim/linux/Makefile b/sdk/simulation/trtssim/linux/Makefile index b079adda3..b25a3672e 100644 --- a/sdk/simulation/trtssim/linux/Makefile +++ b/sdk/simulation/trtssim/linux/Makefile @@ -66,6 +66,7 @@ TRTS1_OBJS := init_enclave.o \ trts_veh.o \ trts_xsave.o \ init_optimized_lib.o \ + trts_interrupt.o \ TRTS2_OBJS := trts_nsp.o TRTS_OBJS := $(TRTS1_OBJS) $(TRTS2_OBJS) diff --git a/sdk/trts/Makefile b/sdk/trts/Makefile index bf681fc05..8ff666615 100644 --- a/sdk/trts/Makefile +++ b/sdk/trts/Makefile @@ -54,6 +54,7 @@ OBJS1 := init_enclave.o \ trts_xsave.o \ init_optimized_lib.o \ trts_version.o \ + trts_interrupt.o \ trts_add_trim.o OBJS2 := trts_nsp.o diff --git a/sdk/trts/trts_internal.h b/sdk/trts/trts_internal.h index 195fc7217..45bb705dc 100644 --- a/sdk/trts/trts_internal.h +++ b/sdk/trts/trts_internal.h @@ -34,6 +34,7 @@ #include "util.h" #include "trts_shared_constants.h" #include "trts_internal_types.h" +#include "sgx_interrupt.h" #define TD2TCS(td) ((const void *)(((thread_data_t*)(td))->stack_base_addr + (size_t)STATIC_STACK_SIZE + (size_t)SE_GUARD_PAGE_SIZE)) #define TCS2CANARY(addr) ((size_t *)((size_t)(addr)-(size_t)SE_GUARD_PAGE_SIZE-(size_t)STATIC_STACK_SIZE+sizeof(size_t))) @@ -59,6 +60,10 @@ sgx_status_t do_uninit_enclave(void *tcs); int check_static_stack_canary(void *tcs); sgx_status_t _pthread_thread_run(void* ms); +sgx_status_t trts_handle_interrupt(void *tcs); +int check_ip_interruptible(size_t ip); +__attribute__((regparm(1))) void internal_handle_interrupt(sgx_interrupt_info_t *info); + #ifdef __cplusplus } #endif diff --git a/sdk/trts/trts_interrupt.cpp b/sdk/trts/trts_interrupt.cpp new file mode 100644 index 000000000..237f0eb5e --- /dev/null +++ b/sdk/trts/trts_interrupt.cpp @@ -0,0 +1,63 @@ +#include "sgx_interrupt.h" +#include "thread_data.h" +#include "trts_internal.h" + +static sgx_interrupt_handler_t registered_handler = NULL; + +static __thread size_t enabled_code_addr = 0; +static __thread size_t enabled_code_size = 0; +static __thread bool is_enabled = false; + +static void set_enabled(bool new_val) { + // Make sure all writes before this store are visible + __atomic_store_n(&is_enabled, new_val, __ATOMIC_RELEASE); +} + + +sgx_status_t sgx_interrupt_init(sgx_interrupt_handler_t handler) { + if (handler == NULL) { + return SGX_ERROR_INVALID_PARAMETER; + } + if (registered_handler != NULL) { + return SGX_ERROR_INVALID_STATE; + } + + registered_handler = handler; + return SGX_SUCCESS; +} + +sgx_status_t sgx_interrupt_enable(size_t code_addr, size_t code_size) { + if (registered_handler == NULL) { + return SGX_ERROR_INVALID_STATE; + } + if (is_enabled) { + return SGX_ERROR_INVALID_STATE; + } + + enabled_code_addr = code_addr; + enabled_code_size = code_size; + set_enabled(true); + return SGX_SUCCESS; +} + +sgx_status_t sgx_interrupt_disable(void) { + if (!is_enabled) { + return SGX_ERROR_INVALID_STATE; + } + set_enabled(false); + return SGX_SUCCESS; +} + +int check_ip_interruptible(size_t ip) { + return is_enabled && + ip >= enabled_code_addr && + (ip - enabled_code_addr) < enabled_code_size; +} + +__attribute__((regparm(1))) void internal_handle_interrupt(sgx_interrupt_info_t *info) { + registered_handler(info); + // Note that the registered handler must be in charge of continueing the execution of + // the interrupted workloads. + // TODO: restore the CPU context info + abort(); +} diff --git a/sdk/trts/trts_nsp.cpp b/sdk/trts/trts_nsp.cpp index 24a0ad5a1..964214917 100644 --- a/sdk/trts/trts_nsp.cpp +++ b/sdk/trts/trts_nsp.cpp @@ -120,6 +120,14 @@ extern "C" int enter_enclave(int index, void *ms, void *tcs, int cssa) error = SGX_ERROR_STACK_OVERRUN; } } + else if((cssa == 1) && (index == ECMD_INTERRUPT)) + { + error = trts_handle_interrupt(tcs); + if (check_static_stack_canary(tcs) != 0) + { + error = SGX_ERROR_STACK_OVERRUN; + } + } if(error == SGX_ERROR_UNEXPECTED) { set_enclave_state(ENCLAVE_CRASHED); diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index df5a8ca9e..d5ebf2eb7 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -52,6 +52,8 @@ #include "se_cdefs.h" #include "emm_private.h" #include "sgx_mm_rt_abstraction.h" +#include "sgx_interrupt.h" + typedef struct _handler_node_t { uintptr_t callback; @@ -531,3 +533,115 @@ static bool is_standard_exception(uintptr_t xip) return false; } + + +extern "C" sgx_status_t trts_handle_interrupt(void *tcs) +{ + thread_data_t *thread_data = get_thread_data(); + ssa_gpr_t *ssa_gpr = NULL; + sgx_interrupt_info_t *info = NULL; + uintptr_t sp, *new_sp = NULL; + size_t size = 0; + + if ((thread_data == NULL) || (tcs == NULL)) goto default_handler; + if (check_static_stack_canary(tcs) != 0) + goto default_handler; + + if(get_enclave_state() != ENCLAVE_INIT_DONE) + { + goto default_handler; + } + + if (TD2TCS(thread_data) != tcs || (((thread_data->first_ssa_gpr) & (~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) + { + goto default_handler; + } + + // no need to check the result of ssa_gpr because thread_data is always trusted + ssa_gpr = reinterpret_cast(thread_data->first_ssa_gpr); + + if(ssa_gpr->exit_info.valid == 1) + { // exceptions cannot be treated as interrupts + goto default_handler; + } + + if (is_standard_exception(ssa_gpr->REG(ip))) { + goto default_handler; + } + + if (!check_ip_interruptible(ssa_gpr->REG(ip))) { + goto default_handler; + } + + // The bottom 2 pages are used as stack to handle the non-standard exceptions. + // User should take responsibility to confirm the stack is not corrupted. + sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*2; + + if(!is_stack_addr((void*)sp, 0)) // check stack overrun only, alignment will be checked after exception handled + { + g_enclave_state = ENCLAVE_CRASHED; + return SGX_ERROR_STACK_OVERRUN; + } + + size = 0; + // x86_64 requires a 128-bytes red zone, which begins directly + // after the return addr and includes func's arguments + size += RED_ZONE_SIZE; + + // decrease the stack to give space for info + size += sizeof(sgx_exception_info_t); + sp -= size; + sp = sp & ~0xF; + + // check the decreased sp to make sure it is in the trusted stack range + if(!is_stack_addr((void *)sp, size)) + { + g_enclave_state = ENCLAVE_CRASHED; + return SGX_ERROR_STACK_OVERRUN; + } + + info = (sgx_interrupt_info_t *)sp; + // decrease the stack to save the SSA[0]->ip + size = sizeof(uintptr_t); + sp -= size; + if(!is_stack_addr((void *)sp, size)) + { + g_enclave_state = ENCLAVE_CRASHED; + return SGX_ERROR_STACK_OVERRUN; + } + + // initialize the info with SSA[0] + + info->cpu_context.REG(ax) = ssa_gpr->REG(ax); + info->cpu_context.REG(cx) = ssa_gpr->REG(cx); + info->cpu_context.REG(dx) = ssa_gpr->REG(dx); + info->cpu_context.REG(bx) = ssa_gpr->REG(bx); + info->cpu_context.REG(sp) = ssa_gpr->REG(sp); + info->cpu_context.REG(bp) = ssa_gpr->REG(bp); + info->cpu_context.REG(si) = ssa_gpr->REG(si); + info->cpu_context.REG(di) = ssa_gpr->REG(di); + info->cpu_context.REG(flags) = ssa_gpr->REG(flags); + info->cpu_context.REG(ip) = ssa_gpr->REG(ip); +#ifdef SE_64 + info->cpu_context.r8 = ssa_gpr->r8; + info->cpu_context.r9 = ssa_gpr->r9; + info->cpu_context.r10 = ssa_gpr->r10; + info->cpu_context.r11 = ssa_gpr->r11; + info->cpu_context.r12 = ssa_gpr->r12; + info->cpu_context.r13 = ssa_gpr->r13; + info->cpu_context.r14 = ssa_gpr->r14; + info->cpu_context.r15 = ssa_gpr->r15; +#endif + + new_sp = (uintptr_t *)sp; + ssa_gpr->REG(ip) = (size_t)internal_handle_interrupt; // prepare the ip for 2nd phrase handling + ssa_gpr->REG(sp) = (size_t)new_sp; // new stack for internal_handle_exception + ssa_gpr->REG(ax) = (size_t)info; // 1st parameter (info) for LINUX32 + ssa_gpr->REG(di) = (size_t)info; // 1st parameter (info) for LINUX64, LINUX32 also uses it while restoring the context + *new_sp = info->cpu_context.REG(ip); // for debugger to get call trace + + return SGX_SUCCESS; + +default_handler: + return SGX_SUCCESS; +} From d01ec4fb26d1c939a74a94cb3a6c9ffee8e079d8 Mon Sep 17 00:00:00 2001 From: "zongmin.gu" Date: Tue, 28 Dec 2021 18:45:01 +0800 Subject: [PATCH 92/96] Fix the interrupt mode issue by restore the FS and the RFlags --- sdk/simulation/uinst/u_instructions.cpp | 2 +- sdk/trts/trts_veh.cpp | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/sdk/simulation/uinst/u_instructions.cpp b/sdk/simulation/uinst/u_instructions.cpp index dcf732bf6..8ea8a2336 100644 --- a/sdk/simulation/uinst/u_instructions.cpp +++ b/sdk/simulation/uinst/u_instructions.cpp @@ -194,7 +194,7 @@ void sig_handler_sim(int signum, siginfo_t *siginfo, void *priv) p_ssa_gpr->r13 = context->uc_mcontext.gregs[REG_R13]; p_ssa_gpr->r14 = context->uc_mcontext.gregs[REG_R14]; p_ssa_gpr->r15 = context->uc_mcontext.gregs[REG_R15]; - p_ssa_gpr->rflags = context->uc_flags; + p_ssa_gpr->rflags = context->uc_mcontext.gregs[REG_EFL]; p_ssa_gpr->fs = tmp_fs_base; p_ssa_gpr->gs = tmp_gs_base; diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index d5ebf2eb7..52554b9d5 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -534,7 +534,6 @@ static bool is_standard_exception(uintptr_t xip) return false; } - extern "C" sgx_status_t trts_handle_interrupt(void *tcs) { thread_data_t *thread_data = get_thread_data(); @@ -573,6 +572,11 @@ extern "C" sgx_status_t trts_handle_interrupt(void *tcs) goto default_handler; } + // Confirm enclave is execting the user code + if (ssa_gpr->fs == ssa_gpr->gs) { + return SGX_SUCCESS; + } + // The bottom 2 pages are used as stack to handle the non-standard exceptions. // User should take responsibility to confirm the stack is not corrupted. sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*2; @@ -610,8 +614,10 @@ extern "C" sgx_status_t trts_handle_interrupt(void *tcs) return SGX_ERROR_STACK_OVERRUN; } - // initialize the info with SSA[0] + // restore the fs + ssa_gpr->fs = ssa_gpr->gs; + // initialize the info with SSA[0] info->cpu_context.REG(ax) = ssa_gpr->REG(ax); info->cpu_context.REG(cx) = ssa_gpr->REG(cx); info->cpu_context.REG(dx) = ssa_gpr->REG(dx); From 92ae3ecfa2e1107ce1a2ee9ab02af08c217ca262 Mon Sep 17 00:00:00 2001 From: zhubojun Date: Fri, 1 Apr 2022 15:57:52 +0800 Subject: [PATCH 93/96] Add PKU support: isolating LibOS from userspace apps --- sdk/trts/trts_veh.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index 52554b9d5..6cf444a8d 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -68,6 +68,8 @@ sgx_mm_pfhandler_t g_mm_pfhandler = NULL; #define ENC_VEH_POINTER(x) (uintptr_t)(x) ^ g_veh_cookie #define DEC_VEH_POINTER(x) (sgx_exception_handler_t)((x) ^ g_veh_cookie) +#define XSAVE_PKRU_OFFSET (2688) +#define PKRU_LIBOS (0x0) // sgx_register_exception_handler() // register a custom exception handler @@ -331,6 +333,8 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) ssa_gpr_t *ssa_gpr = NULL; sgx_exception_info_t *info = NULL; uintptr_t sp_u, sp, *new_sp = NULL; + uintptr_t first_ssa_base = 0, pkru_base = 0; + uint32_t *pkru_ptr = NULL; size_t size = 0; bool standard_exception = true; @@ -507,6 +511,16 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) //mark valid to 0 to prevent eenter again ssa_gpr->exit_info.valid = 0; + if (!standard_exception && is_pkru_enabled()) + { + // When handling non-standard exceptions, the PKRU saved in SSA XSAVE area can be PKRU_USER. + // We need to update PKRU to PKRU_LIBOS, ensuring LibOS has enough access rights at `internal_handle_exception()`. + first_ssa_base = (uintptr_t)tcs + SE_PAGE_SIZE; + pkru_base = (uintptr_t)first_ssa_base + XSAVE_PKRU_OFFSET; + pkru_ptr = (uint32_t *)pkru_base; + *pkru_ptr = PKRU_LIBOS; + } + return SGX_SUCCESS; default_handler: @@ -540,6 +554,8 @@ extern "C" sgx_status_t trts_handle_interrupt(void *tcs) ssa_gpr_t *ssa_gpr = NULL; sgx_interrupt_info_t *info = NULL; uintptr_t sp, *new_sp = NULL; + uintptr_t first_ssa_base = 0, pkru_base = 0; + uint32_t *pkru_ptr = NULL; size_t size = 0; if ((thread_data == NULL) || (tcs == NULL)) goto default_handler; @@ -646,6 +662,15 @@ extern "C" sgx_status_t trts_handle_interrupt(void *tcs) ssa_gpr->REG(di) = (size_t)info; // 1st parameter (info) for LINUX64, LINUX32 also uses it while restoring the context *new_sp = info->cpu_context.REG(ip); // for debugger to get call trace + if (is_pkru_enabled()) + { + // Update PKRU to PKRU_LIBOS, ensuring LibOS has enough access rights at `internal_handle_exception()`. + first_ssa_base = (uintptr_t)tcs + SE_PAGE_SIZE; + pkru_base = (uintptr_t)first_ssa_base + XSAVE_PKRU_OFFSET; + pkru_ptr = (uint32_t *)pkru_base; + *pkru_ptr = PKRU_LIBOS; + } + return SGX_SUCCESS; default_handler: From faf8deaa226b048359e721fb84abb624dc950216 Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Tue, 22 Nov 2022 08:20:25 +0000 Subject: [PATCH 94/96] Enlarge the stack size for non-standard exception handler --- sdk/trts/trts_veh.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/trts/trts_veh.cpp b/sdk/trts/trts_veh.cpp index 6cf444a8d..9fad06135 100644 --- a/sdk/trts/trts_veh.cpp +++ b/sdk/trts/trts_veh.cpp @@ -368,9 +368,9 @@ extern "C" sgx_status_t trts_handle_exception(void *tcs) if (!standard_exception) { - // The bottom 2 pages are used as stack to handle the non-standard exceptions. + // The bottom 4 pages are used as stack to handle the non-standard exceptions. // User should take responsibility to confirm the stack is not corrupted. - sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*2; + sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*4; } else { @@ -593,9 +593,9 @@ extern "C" sgx_status_t trts_handle_interrupt(void *tcs) return SGX_SUCCESS; } - // The bottom 2 pages are used as stack to handle the non-standard exceptions. + // The bottom 4 pages are used as stack to handle the non-standard exceptions. // User should take responsibility to confirm the stack is not corrupted. - sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*2; + sp = thread_data->stack_limit_addr + SE_PAGE_SIZE*4; if(!is_stack_addr((void*)sp, 0)) // check stack overrun only, alignment will be checked after exception handled { From 01180a903ffe3d2c563a4ae0ba4c5a3a2310b22b Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Tue, 22 Nov 2022 06:47:16 +0000 Subject: [PATCH 95/96] Fix edmm compilation in simulation mode Also add edmm api test in the install script. Simulation mode is not test heavily. When official EDMM support is released, this patch should be removed. --- compile_and_install.sh | 16 +++++++++++++++- external/sgx-emm/api_tests/App/App.cpp | 3 ++- external/sgx-emm/ut/Makefile | 2 +- sdk/simulation/trtssim/linux/Makefile | 23 +++++++++++++++++++++-- sdk/trts/ema_rt.c | 3 ++- 5 files changed, 41 insertions(+), 6 deletions(-) diff --git a/compile_and_install.sh b/compile_and_install.sh index b4337128a..d48319d92 100755 --- a/compile_and_install.sh +++ b/compile_and_install.sh @@ -1,10 +1,12 @@ #!/bin/bash +set -e + pushd `dirname $0` > /dev/null SCRIPT_PATH=`pwd` popd > /dev/null # Uninstall old sdk -sudo /opt/intel/sgxsdk/uninstall.sh +sudo /opt/intel/sgxsdk/uninstall.sh || true # Compile SDK and install make USE_OPT_LIBS=3 sdk_no_mitigation @@ -12,4 +14,16 @@ make sdk_install_pkg_no_mitigation sudo mkdir -p /opt/intel cd /opt/intel yes yes | sudo ${SCRIPT_PATH}/linux/installer/bin/sgx_linux_x64_sdk_*.bin +source /opt/intel/sgxsdk/environment + +cd ${SCRIPT_PATH} +make -C psw/urts/linux +cd build/linux +ln -sf libsgx_enclave_common.so libsgx_enclave_common.so.1 +export LD_LIBRARY_PATH=${SCRIPT_PATH}/build/linux/ + +cd ${SCRIPT_PATH}/external/sgx-emm/api_tests/ +make clean +make +./test_mm_api diff --git a/external/sgx-emm/api_tests/App/App.cpp b/external/sgx-emm/api_tests/App/App.cpp index f5a88396f..91d16b503 100644 --- a/external/sgx-emm/api_tests/App/App.cpp +++ b/external/sgx-emm/api_tests/App/App.cpp @@ -202,6 +202,7 @@ void driver(int sid) continue; }else { + printf("ret = %d\n", ret); abort(); } //test_tcs does its own retry @@ -405,7 +406,7 @@ int SGX_CDECL main(int argc, char *argv[]) int ret = 0; //17 threads for 100 iterations passed when this is checked in - ret += test_sgx_mm_functions(247); + ret += test_sgx_mm_functions(2); // FIXME: 247 freeze on NUC. ret += test_unsafe(); sgx_destroy_enclave(global_eid); diff --git a/external/sgx-emm/ut/Makefile b/external/sgx-emm/ut/Makefile index bf67ad4e6..34a7fa2e8 100644 --- a/external/sgx-emm/ut/Makefile +++ b/external/sgx-emm/ut/Makefile @@ -39,7 +39,7 @@ CPPFLAGS += -I$(COMMON_DIR)/inc \ all: test_bit_array test_ema test_public test_emm -test_bit_array: test_bit_array.c ../bit_array.c +test_bit_array: test_bit_array.c ../emm_src/bit_array.c @$(CC) $(CPPFLAGS) $^ -o $@ test_ema: test_ema.c stub.c ../ema.c ../bit_array.c diff --git a/sdk/simulation/trtssim/linux/Makefile b/sdk/simulation/trtssim/linux/Makefile index b25a3672e..cb32c4da6 100644 --- a/sdk/simulation/trtssim/linux/Makefile +++ b/sdk/simulation/trtssim/linux/Makefile @@ -62,14 +62,20 @@ TRTS1_OBJS := init_enclave.o \ trts.o \ trts_ecall.o \ trts_ocall.o \ + ema_init.o \ trts_util.o \ trts_veh.o \ trts_xsave.o \ init_optimized_lib.o \ trts_interrupt.o \ + trts_version.o \ + trts_add_trim.o \ TRTS2_OBJS := trts_nsp.o -TRTS_OBJS := $(TRTS1_OBJS) $(TRTS2_OBJS) +TRTS3_OBJS := ema_rt.o + +TRTS_OBJS := $(TRTS1_OBJS) $(TRTS2_OBJS) $(TRTS3_OBJS) + TINST_OBJS := t_instructions.o \ deriv.o @@ -92,6 +98,9 @@ TLDR_OBJS := $(TLDR_ASM_OBJS) $(TLDR_C_OBJS) LIBTRTS := libsgx_trts_sim.a +LIBSGX_MM_PATH = $(LINUX_EXTERNAL_DIR)/sgx-emm +LIBSGX_MM = libsgx_mm.a + vpath %.cpp $(TRTS_DIR):$(TINST_DIR) vpath %.S $(LOWLIB_DIR):$(TLDR_DIR):$(XSAVE_DIR) vpath %.c $(TLS_DIR):$(TLDR_DIR) @@ -101,8 +110,15 @@ vpath %.c $(TLS_DIR):$(TLDR_DIR) all: $(LIBTRTS) | $(BUILD_DIR) $(CP) $< $| -$(LIBTRTS): $(TRTS_OBJS) $(TINST_OBJS) $(LOWLIB_OBJS) $(TLS_OBJS) $(TLDR_OBJS) +$(LIBTRTS): $(TRTS_OBJS) $(TINST_OBJS) $(LOWLIB_OBJS) $(TLS_OBJS) $(TLDR_OBJS) $(LIBSGX_MM) $(AR) rcsD $@ $(TRTS_OBJS) $(TINST_OBJS) $(LOWLIB_OBJS) $(TLS_OBJS) $(TLDR_OBJS) + $(MKDIR) $(BUILD_DIR)/.libsgx_mm + $(RM) $(BUILD_DIR)/.libsgx_mm/* && cd $(BUILD_DIR)/.libsgx_mm && $(AR) x $(LIBSGX_MM_PATH)/libsgx_mm.a + $(AR) rsD $@ $(BUILD_DIR)/.libsgx_mm/*.o + @$(RM) -rf $(BUILD_DIR)/.libsgx_mm + +$(LIBSGX_MM): + $(MAKE) -C $(LIBSGX_MM_PATH) # ------------------------------------------------------------ $(TRTS1_OBJS): CPPFLAGS += -I$(COMMON_DIR)/inc/tlibc \ @@ -110,6 +126,9 @@ $(TRTS1_OBJS): CPPFLAGS += -I$(COMMON_DIR)/inc/tlibc \ $(TRTS2_OBJS): %.o:%.cpp $(CXX) -c $(filter-out -fstack-protector-strong, $(CXXFLAGS)) -I$(SIM_DIR)/tinst/ $(CPPFLAGS) $< -o $@ +$(TRTS3_OBJS): %.o:../%.c + $(CC) -c $(TCFLAGS) $(CFLAGS) -I$(COMMON_DIR)/inc -I$(COMMON_DIR)/inc/internal -I$(COMMON_DIR)/inc/internal/linux -I$(COMMON_DIR)/inc/tlibc -I$(SIM_DIR)/tinst/ -fPIC $< -o $@ + # Explicitly disable optimization for tRTS simulation library, # since the '_SE3' function has assumptions on stack layout. diff --git a/sdk/trts/ema_rt.c b/sdk/trts/ema_rt.c index 997b1b23e..e232f0947 100644 --- a/sdk/trts/ema_rt.c +++ b/sdk/trts/ema_rt.c @@ -59,7 +59,8 @@ int SGXAPI sgx_mm_alloc_ocall(size_t addr, size_t size, int page_type, int alloc #ifdef SE_SIM (void)addr; (void)size; - (void)flags; + (void)page_type; + (void)alloc_flags; return 0; #else int status = SGX_SUCCESS; From 5f6ed5325a98e479564d2a4396f93c92b5d137e5 Mon Sep 17 00:00:00 2001 From: "Hui, Chunyang" Date: Wed, 23 Nov 2022 07:41:48 +0000 Subject: [PATCH 96/96] Enable vdso and support interrupt mechanism --- psw/urts/linux/sig_handler.cpp | 133 ++++++++++++--------------------- 1 file changed, 47 insertions(+), 86 deletions(-) diff --git a/psw/urts/linux/sig_handler.cpp b/psw/urts/linux/sig_handler.cpp index e1f862f23..e99ea2d4b 100644 --- a/psw/urts/linux/sig_handler.cpp +++ b/psw/urts/linux/sig_handler.cpp @@ -89,6 +89,9 @@ typedef struct _ecall_param_t // Real-time signal 64 is used to trigger an interrupt to an enclave thread #define SIGRT_INTERRUPT (64) +/* Known from the kernel driver. Relative to %rbp. */ +#define SGX_ENCLAVE_OFFSET_OF_RUN 16 + extern "C" void *get_aep(); extern "C" void *get_eenterp(); extern "C" void *get_eretp(); @@ -98,15 +101,18 @@ extern "C" int vdso_sgx_enter_enclave_wrapper(unsigned long rdi, unsigned long r unsigned long rdx, unsigned int function, unsigned long r8, unsigned long r9, struct sgx_enclave_run *run); - +static size_t g_sgx_enter_enclave_symbol_start = 0; +static size_t g_sgx_enter_enclave_symbol_end = 0; void reg_sig_handler(); int do_ecall(const int fn, const void *ocall_table, const void *ms, CTrustThread *trust_thread); +// TODO: For simulation mode, the signal should be handled in the old way. void sig_handler(int signum, siginfo_t* siginfo, void *priv) { + UNUSED(siginfo); SE_TRACE(SE_TRACE_DEBUG, "signal handler is triggered\n"); ucontext_t* context = reinterpret_cast(priv); unsigned int *xip = reinterpret_cast(context->uc_mcontext.gregs[REG_XIP]); @@ -115,14 +121,14 @@ void sig_handler(int signum, siginfo_t* siginfo, void *priv) /* `xbx' is only used in assertions. */ size_t xbx = context->uc_mcontext.gregs[REG_XBX]; #endif - ecall_param_t *param = ECALL_PARAM; //the case of exception on ERESUME or within enclave. //We can't distinguish ERESUME exception from exception within enclave. We assume it is the exception within enclave. //If it is ERESUME exception, it will raise another exception in ecall and ecall will return error. - if(xip == get_aep() - && SE_ERESUME == xax) - { + //Here we can't get an accurate address for AEP. We just use the symbol's range of __vdso_sgx_enter_enclave. + if(g_sgx_enter_enclave_symbol_start <= (size_t)xip + && (size_t)xip <= g_sgx_enter_enclave_symbol_end + && SE_ERESUME == xax) { #ifndef SE_SIM assert(ENCLU == (*xip & 0xffffff)); #endif @@ -130,96 +136,54 @@ void sig_handler(int signum, siginfo_t* siginfo, void *priv) SE_TRACE(SE_TRACE_NOTICE, "exception on ERESUME\n"); //The ecall looks recursively, but it will not cause infinite call. //If exception is raised in trts again and again, the SSA will overflow, and finally it is EENTER exception. - assert(reinterpret_cast(xbx) == param->tcs); int ecmd; if (signum != SIGRT_INTERRUPT) { ecmd = ECMD_EXCEPT; } else { ecmd = ECMD_INTERRUPT; } + // This handler is only used to handle interrupt signal. + assert(ecmd == ECMD_INTERRUPT); - CEnclave *enclave = param->trust_thread->get_enclave(); - unsigned int ret = enclave->ecall(ecmd, param->ocall_table, NULL); - if(SGX_SUCCESS == ret) + size_t rbp = context->uc_mcontext.gregs[REG_RBP]; + size_t* run_context_addr = reinterpret_cast(rbp + SGX_ENCLAVE_OFFSET_OF_RUN); + struct sgx_enclave_run* run = reinterpret_cast (*run_context_addr); + SE_TRACE(SE_TRACE_DEBUG, "in sig_handler, run_addr = 0x%lx\n", run); + +#ifndef NDEBUG + tcs_t *tcs = reinterpret_cast(xbx); + assert(reinterpret_cast(run->tcs) == tcs); +#endif + + __u64 *user_data = (__u64*)run->user_data; + CTrustThread* trust_thread = reinterpret_cast(user_data[1]); + if (trust_thread == NULL) { - //ERESUME execute + run->user_data = SGX_ERROR_UNEXPECTED; return; } - //If the exception is caused by enclave lost or internal stack overrun, then return the error code to ecall caller elegantly. - else if(SGX_ERROR_ENCLAVE_LOST == ret || SGX_ERROR_STACK_OVERRUN == ret) + + void *ocall_table = reinterpret_cast(user_data[0]); + unsigned int ret = do_ecall(ecmd, ocall_table, NULL, trust_thread); + if(SGX_SUCCESS == ret) { - //enter_enlcave function will return with ret which is from tRTS; - context->uc_mcontext.gregs[REG_XIP] = reinterpret_cast(get_eretp()); - context->uc_mcontext.gregs[REG_XSI] = ret; + //ERESUME execute + SE_TRACE(SE_TRACE_DEBUG, "SIGRT_INTERRUPT handle successful\n"); return; } - //If we can't fix the exception within enclave, then give the handle to other signal hanlder. - //Call the previous signal handler. The default signal handler should terminate the application. - - enclave->rdunlock(); - CEnclavePool::instance()->unref_enclave(enclave); } else if (signum == SIGRT_INTERRUPT) { // If not interrupting the enclave, just ignore the signal return; - } - //the case of exception on EENTER instruction. - else if(xip == get_eenterp() - && SE_EENTER == xax) - { - assert(reinterpret_cast(xbx) == param->tcs); - assert(ENCLU == (*xip & 0xffffff)); - SE_TRACE(SE_TRACE_NOTICE, "exception on EENTER\n"); - //enter_enlcave function will return with SE_ERROR_ENCLAVE_LOST - context->uc_mcontext.gregs[REG_XIP] = reinterpret_cast(get_eretp()); - context->uc_mcontext.gregs[REG_XSI] = SGX_ERROR_ENCLAVE_LOST; - return; - } - - SE_TRACE(SE_TRACE_DEBUG, "NOT enclave signal\n"); - //it is not SE exception. if the old signal handler is default signal handler, we reset signal handler. - //raise the signal again, and the default signal handler will be called. - if(SIG_DFL == g_old_sigact[signum].sa_handler) - { - signal(signum, SIG_DFL); - raise(signum); - } - //if there is old signal handler, we need transfer the signal to the old signal handler; - else - { - if(!(g_old_sigact[signum].sa_flags & SA_NODEFER)) - sigaddset(&g_old_sigact[signum].sa_mask, signum); - - sigset_t cur_set; - pthread_sigmask(SIG_SETMASK, &g_old_sigact[signum].sa_mask, &cur_set); - - if(g_old_sigact[signum].sa_flags & SA_SIGINFO) - { - g_old_sigact[signum].sa_sigaction(signum, siginfo, priv); - } - else - { - g_old_sigact[signum].sa_handler(signum); - } - - pthread_sigmask(SIG_SETMASK, &cur_set, NULL); - - //If the g_old_sigact set SA_RESETHAND, it will break the chain which means - //g_old_sigact->next_old_sigact will not be called. Our signal handler does not - //responsable for that. We just follow what os do on SA_RESETHAND. - if(g_old_sigact[signum].sa_flags & SA_RESETHAND) - g_old_sigact[signum].sa_handler = SIG_DFL; + } else { + SE_TRACE(SE_TRACE_DEBUG, "Unexpected error occured\n"); + abort(); } } void reg_sig_handler() { - if(vdso_sgx_enter_enclave != NULL) - { - SE_TRACE(SE_TRACE_DEBUG, "vdso_sgx_enter_enclave exists, we won't use signal handler here\n"); - return; - } int ret = 0; struct sigaction sig_act; SE_TRACE(SE_TRACE_DEBUG, "signal handler is registered\n"); @@ -246,17 +210,6 @@ void reg_sig_handler() // interrupts sigaddset(&sig_act.sa_mask, SIGRT_INTERRUPT); - ret = sigaction(SIGSEGV, &sig_act, &g_old_sigact[SIGSEGV]); - if (0 != ret) abort(); - ret = sigaction(SIGFPE, &sig_act, &g_old_sigact[SIGFPE]); - if (0 != ret) abort(); - ret = sigaction(SIGILL, &sig_act, &g_old_sigact[SIGILL]); - if (0 != ret) abort(); - ret = sigaction(SIGBUS, &sig_act, &g_old_sigact[SIGBUS]); - if (0 != ret) abort(); - ret = sigaction(SIGTRAP, &sig_act, &g_old_sigact[SIGTRAP]); - if (0 != ret) abort(); - sig_act.sa_flags = SA_SIGINFO ; // Remove SA_RESTART and SA_NODEFER ret = sigaction(SIGRT_INTERRUPT, &sig_act, &g_old_sigact[SIGRT_INTERRUPT]); if (0 != ret) abort(); @@ -269,7 +222,7 @@ extern "C" int enter_enclave(const tcs_t *tcs, const long fn, const void *ocall_ extern "C" int stack_sticker(unsigned int proc, sgx_ocall_table_t *ocall_table, void *ms, CTrustThread *trust_thread, tcs_t *tcs); -void* get_vdso_sym(const char* vdso_func_name) +void* get_vdso_sym(const char* vdso_func_name, size_t *size) { void *ret = NULL; @@ -305,6 +258,9 @@ void* get_vdso_sym(const char* vdso_func_name) auto vdname = dynstr + sym.st_name; if (strcmp(vdname, vdso_func_name) == 0) { ret = (vdso_address + sym.st_value); + if (size != NULL) { + *size = sym.st_size; + } break; } } @@ -315,7 +271,6 @@ void* get_vdso_sym(const char* vdso_func_name) return ret; } - static int sgx_urts_vdso_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9, struct sgx_enclave_run *run) { @@ -402,7 +357,13 @@ static void __attribute__((constructor)) vdso_detector(void) #else if(vdso_sgx_enter_enclave == NULL) { - vdso_sgx_enter_enclave = (vdso_sgx_enter_enclave_t)get_vdso_sym("__vdso_sgx_enter_enclave"); + size_t size = 0; + void* start_addr = get_vdso_sym("__vdso_sgx_enter_enclave", &size); + g_sgx_enter_enclave_symbol_start = (size_t)start_addr; + g_sgx_enter_enclave_symbol_end = g_sgx_enter_enclave_symbol_start + size; + SE_TRACE(SE_TRACE_DEBUG, "get_vdso_range = [%x, %x]\n", g_sgx_enter_enclave_symbol_start, g_sgx_enter_enclave_symbol_end); + + vdso_sgx_enter_enclave = (vdso_sgx_enter_enclave_t)g_sgx_enter_enclave_symbol_start; } #endif }