diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 0000000000..6b01ef3cff --- /dev/null +++ b/.bazelrc @@ -0,0 +1,9 @@ +build --java_language_version=17 +build --tool_java_language_version=17 +build --java_runtime_version=remotejdk_17 +build --tool_java_runtime_version=remotejdk_17 + +test --test_output=errors + +common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ +common --registry=https://bcr.bazel.build \ No newline at end of file diff --git a/.devcontainer/S-CORE/Dockerfile b/.devcontainer/S-CORE/Dockerfile new file mode 100644 index 0000000000..ec8d23cf68 --- /dev/null +++ b/.devcontainer/S-CORE/Dockerfile @@ -0,0 +1,91 @@ +# Set the base image with a default version +FROM debian@sha256:731dd1380d6a8d170a695dbeb17fe0eade0e1c29f654cf0a3a07f372191c3f4b AS builder + +# Set noninteractive env for apt-get to avoid prompts +ENV DEBIAN_FRONTEND=noninteractive + +# Define arguments for versions +ARG GCC_VERSION=11.3.0-12 +ARG LLVM_VERSION=16.0.6-15 + +# Update package list and install dependencies +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + wget=1.21.3-1+deb12u1 \ + git=1:2.39.5-0+deb12u2 \ + git-lfs=3.3.0-1+deb12u1 \ + graphviz=2.42.2-7+deb12u1 \ + default-jre=2:1.17-74 \ + libtinfo5=6.4-4 \ + g++-${GCC_VERSION} \ + gdb=13.1-3 \ + googletest=1.12.1-0.2 \ + gcovr=5.2-1 \ + cmake=3.25.1-1 \ + clang-${LLVM_VERSION} \ + clang-tidy-${LLVM_VERSION} \ + clang-format-${LLVM_VERSION} \ + doxygen=1.9.4-4 \ + curl \ + make \ + python3 \ + python3-pip=23.0.1+dfsg-1 \ + python3-venv \ + pipx \ + locales \ + ssh-client \ + && apt-get clean && \ + rm -rf bazel-bin bazel-inc_json bazel-out bazel-testlogs && \ + rm -rf /var/lib/apt/lists/* + +# Generate and set the locale +RUN echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && \ + locale-gen en_US.UTF-8 && \ + update-locale LANG=en_US.UTF-8 + +# Environment variables for locale +ENV LANG=en_US.UTF-8 +ENV LC_ALL=en_US.UTF-8 + +COPY ./S-CORE/requirements.txt ./S-CORE/requirements.txt +RUN python3 -m venv venv && \ + . venv/bin/activate && \ + pip install --require-hashes -r ./S-CORE/requirements.txt + +# Specify default versions via update alternatives +RUN update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-16 100 && \ + update-alternatives --set clang-tidy /usr/bin/clang-tidy-16 && \ + update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-16 100 && \ + update-alternatives --set clang-format /usr/bin/clang-format-16 && \ + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 100 && \ + update-alternatives --set g++ /usr/bin/g++-11 + +# Install bazelisk tool +RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.26.0/bazelisk-linux-amd64 && \ + chmod +x bazelisk-linux-amd64 && \ + mv bazelisk-linux-amd64 /usr/local/bin/bazel + +# Install buildifier +RUN wget https://github.com/bazelbuild/buildtools/releases/download/v8.2.0/buildifier-linux-amd64 && \ + chmod +x buildifier-linux-amd64 && \ + mv buildifier-linux-amd64 /usr/bin/buildifier + +# Create non-root user +ARG USERNAME=developer +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \ + # Add sudo support + && apt-get update \ + && apt-get install -y sudo \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + +# Default user +USER $USERNAME + +# Install trudag using pipx +RUN pipx install git+https://gitlab.com/CodethinkLabs/trustable/trustable@9957f12171cb898d83df5ae708fdba0a38fece2e && \ + pipx ensurepath diff --git a/.devcontainer/S-CORE/devcontainer.json b/.devcontainer/S-CORE/devcontainer.json new file mode 100644 index 0000000000..4fd319493f --- /dev/null +++ b/.devcontainer/S-CORE/devcontainer.json @@ -0,0 +1,49 @@ +{ + "name": "SCORE Dev Container", + "build": { + // Sets the run context to one level up instead of the .devcontainer folder. + "context": "..", + // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. + "dockerfile": "./Dockerfile", + "args": { + //specifying the base image -> only debian based images supported, as apt-get is used + "BASE_IMAGE": "debian:12", + // version for g++ compiler + "GCC_VERSION":"11", + // version for clang-tidy and clang-format + "LLVM_VERSION": "16" + } + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.cpptools", + "ms-vscode.cmake-tools", + "ms-vscode.makefile-tools", + "ms-vscode.cpptools-extension-pack", + "hediet.vscode-drawio", + "jebbs.plantuml", + "streetsidesoftware.code-spell-checker", + "BazelBuild.vscode-bazel", + "eamodio.gitlens", + "ms-python.python", + "ms-python.vscode-pylance" + ] + } + }, + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Uncomment the next line to run commands after the container is created. + "postCreateCommand": "bash .devcontainer/S-CORE/post_create_script.sh" + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "devcontainer" +} diff --git a/.devcontainer/S-CORE/post_create_script.sh b/.devcontainer/S-CORE/post_create_script.sh new file mode 100644 index 0000000000..4c3b6da47a --- /dev/null +++ b/.devcontainer/S-CORE/post_create_script.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +python3 -m venv .venv +source .venv/bin/activate + +# Install trustable +pip install --require-hashes -r .devcontainer/S-CORE/requirements.txt +pip install git+https://gitlab.com/CodethinkLabs/trustable/trustable@9957f12171cb898d83df5ae708fdba0a38fece2e diff --git a/.devcontainer/S-CORE/requirements.in b/.devcontainer/S-CORE/requirements.in new file mode 100644 index 0000000000..24fcf348d2 --- /dev/null +++ b/.devcontainer/S-CORE/requirements.in @@ -0,0 +1,5 @@ +sphinx==8.2.3 +sphinx-design==0.6.1 +sphinx-needs==5.1.0 +sphinxcontrib.plantuml +pytest==8.4.1 \ No newline at end of file diff --git a/.devcontainer/S-CORE/requirements.txt b/.devcontainer/S-CORE/requirements.txt new file mode 100644 index 0000000000..d66f354c75 --- /dev/null +++ b/.devcontainer/S-CORE/requirements.txt @@ -0,0 +1,461 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --generate-hashes /workspaces/json/.devcontainer/S-CORE/requirements.in +# +alabaster==1.0.0 \ + --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ + --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b + # via sphinx +attrs==25.3.0 \ + --hash=sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3 \ + --hash=sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b + # via + # jsonschema + # referencing +babel==2.17.0 \ + --hash=sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d \ + --hash=sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2 + # via sphinx +certifi==2025.8.3 \ + --hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \ + --hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5 + # via requests +charset-normalizer==3.4.3 \ + --hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \ + --hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \ + --hash=sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154 \ + --hash=sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601 \ + --hash=sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884 \ + --hash=sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07 \ + --hash=sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c \ + --hash=sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64 \ + --hash=sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe \ + --hash=sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f \ + --hash=sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432 \ + --hash=sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc \ + --hash=sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa \ + --hash=sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9 \ + --hash=sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae \ + --hash=sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19 \ + --hash=sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d \ + --hash=sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e \ + --hash=sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4 \ + --hash=sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7 \ + --hash=sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312 \ + --hash=sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92 \ + --hash=sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31 \ + --hash=sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c \ + --hash=sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f \ + --hash=sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99 \ + --hash=sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b \ + --hash=sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15 \ + --hash=sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392 \ + --hash=sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f \ + --hash=sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8 \ + --hash=sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491 \ + --hash=sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0 \ + --hash=sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc \ + --hash=sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0 \ + --hash=sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f \ + --hash=sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a \ + --hash=sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40 \ + --hash=sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927 \ + --hash=sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849 \ + --hash=sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce \ + --hash=sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14 \ + --hash=sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05 \ + --hash=sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c \ + --hash=sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c \ + --hash=sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a \ + --hash=sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc \ + --hash=sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34 \ + --hash=sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9 \ + --hash=sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096 \ + --hash=sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14 \ + --hash=sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30 \ + --hash=sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b \ + --hash=sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b \ + --hash=sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942 \ + --hash=sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db \ + --hash=sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5 \ + --hash=sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b \ + --hash=sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce \ + --hash=sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669 \ + --hash=sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0 \ + --hash=sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018 \ + --hash=sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93 \ + --hash=sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe \ + --hash=sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049 \ + --hash=sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a \ + --hash=sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef \ + --hash=sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2 \ + --hash=sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca \ + --hash=sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16 \ + --hash=sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f \ + --hash=sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb \ + --hash=sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1 \ + --hash=sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557 \ + --hash=sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37 \ + --hash=sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7 \ + --hash=sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72 \ + --hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \ + --hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9 + # via requests +docutils==0.21.2 \ + --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ + --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 + # via sphinx +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 + # via requests +imagesize==1.4.1 \ + --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ + --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a + # via sphinx +iniconfig==2.1.0 \ + --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ + --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 + # via pytest +jinja2==3.1.6 \ + --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ + --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 + # via sphinx +jsonschema==4.25.1 \ + --hash=sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63 \ + --hash=sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85 + # via sphinx-needs +jsonschema-specifications==2025.4.1 \ + --hash=sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af \ + --hash=sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608 + # via jsonschema +markupsafe==3.0.2 \ + --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ + --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ + --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ + --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ + --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ + --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ + --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ + --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ + --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ + --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ + --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ + --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ + --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ + --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ + --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ + --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ + --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ + --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ + --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ + --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ + --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ + --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ + --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ + --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ + --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ + --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ + --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ + --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ + --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ + --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ + --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ + --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ + --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ + --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ + --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ + --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ + --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ + --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ + --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ + --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ + --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ + --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ + --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ + --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ + --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ + --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ + --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ + --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ + --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ + --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ + --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ + --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ + --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ + --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ + --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ + --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ + --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ + --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ + --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ + --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ + --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 + # via jinja2 +packaging==25.0 \ + --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ + --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f + # via + # pytest + # sphinx +pluggy==1.6.0 \ + --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ + --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 + # via pytest +pygments==2.19.2 \ + --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ + --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b + # via + # pytest + # sphinx +pytest==8.4.1 \ + --hash=sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7 \ + --hash=sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c + # via -r /workspaces/json/.devcontainer/S-CORE/requirements.in +referencing==0.36.2 \ + --hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \ + --hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0 + # via + # jsonschema + # jsonschema-specifications +requests==2.32.5 \ + --hash=sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 \ + --hash=sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf + # via + # requests-file + # sphinx + # sphinx-needs +requests-file==2.1.0 \ + --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ + --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c + # via sphinx-needs +roman-numerals-py==3.1.0 \ + --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ + --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d + # via sphinx +rpds-py==0.27.0 \ + --hash=sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b \ + --hash=sha256:05284439ebe7d9f5f5a668d4d8a0a1d851d16f7d47c78e1fab968c8ad30cab04 \ + --hash=sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51 \ + --hash=sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295 \ + --hash=sha256:08680820d23df1df0a0260f714d12966bc6c42d02e8055a91d61e03f0c47dda0 \ + --hash=sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d \ + --hash=sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e \ + --hash=sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd \ + --hash=sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5 \ + --hash=sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03 \ + --hash=sha256:11e8e28c0ba0373d052818b600474cfee2fafa6c9f36c8587d217b13ee28ca7d \ + --hash=sha256:130c1ffa5039a333f5926b09e346ab335f0d4ec393b030a18549a7c7e7c2cea4 \ + --hash=sha256:1321bce595ad70e80f97f998db37356b2e22cf98094eba6fe91782e626da2f71 \ + --hash=sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9 \ + --hash=sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34 \ + --hash=sha256:15ea4d2e182345dd1b4286593601d766411b43f868924afe297570658c31a62b \ + --hash=sha256:181bc29e59e5e5e6e9d63b143ff4d5191224d355e246b5a48c88ce6b35c4e466 \ + --hash=sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1 \ + --hash=sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303 \ + --hash=sha256:190d7285cd3bb6d31d37a0534d7359c1ee191eb194c511c301f32a4afa5a1dd4 \ + --hash=sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4 \ + --hash=sha256:1d66f45b9399036e890fb9c04e9f70c33857fd8f58ac8db9f3278cfa835440c3 \ + --hash=sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c \ + --hash=sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec \ + --hash=sha256:2406d034635d1497c596c40c85f86ecf2bf9611c1df73d14078af8444fe48031 \ + --hash=sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e \ + --hash=sha256:25a4aebf8ca02bbb90a9b3e7a463bbf3bee02ab1c446840ca07b1695a68ce424 \ + --hash=sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97 \ + --hash=sha256:299a245537e697f28a7511d01038c310ac74e8ea213c0019e1fc65f52c0dcb23 \ + --hash=sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd \ + --hash=sha256:2e307cb5f66c59ede95c00e93cd84190a5b7f3533d7953690b2036780622ba81 \ + --hash=sha256:2e39169ac6aae06dd79c07c8a69d9da867cef6a6d7883a0186b46bb46ccfb0c3 \ + --hash=sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432 \ + --hash=sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae \ + --hash=sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5 \ + --hash=sha256:33ba649a6e55ae3808e4c39e01580dc9a9b0d5b02e77b66bb86ef117922b1264 \ + --hash=sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828 \ + --hash=sha256:343cf24de9ed6c728abefc5d5c851d5de06497caa7ac37e5e65dd572921ed1b5 \ + --hash=sha256:36184b44bf60a480863e51021c26aca3dfe8dd2f5eeabb33622b132b9d8b8b54 \ + --hash=sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79 \ + --hash=sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89 \ + --hash=sha256:419dd9c98bcc9fb0242be89e0c6e922df333b975d4268faa90d58499fd9c9ebe \ + --hash=sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c \ + --hash=sha256:42ccc57ff99166a55a59d8c7d14f1a357b7749f9ed3584df74053fd098243451 \ + --hash=sha256:4300e15e7d03660f04be84a125d1bdd0e6b2f674bc0723bc0fd0122f1a4585dc \ + --hash=sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff \ + --hash=sha256:44524b96481a4c9b8e6c46d6afe43fa1fb485c261e359fbe32b63ff60e3884d8 \ + --hash=sha256:45d04a73c54b6a5fd2bab91a4b5bc8b426949586e61340e212a8484919183859 \ + --hash=sha256:46f48482c1a4748ab2773f75fffbdd1951eb59794e32788834b945da857c47a8 \ + --hash=sha256:4790c9d5dd565ddb3e9f656092f57268951398cef52e364c405ed3112dc7c7c1 \ + --hash=sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43 \ + --hash=sha256:4c3f8a0d4802df34fcdbeb3dfe3a4d8c9a530baea8fafdf80816fcaac5379d83 \ + --hash=sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1 \ + --hash=sha256:555ed147cbe8c8f76e72a4c6cd3b7b761cbf9987891b9448808148204aed74a5 \ + --hash=sha256:55d42a0ef2bdf6bc81e1cc2d49d12460f63c6ae1423c4f4851b828e454ccf6f1 \ + --hash=sha256:59195dc244fc183209cf8a93406889cadde47dfd2f0a6b137783aa9c56d67c85 \ + --hash=sha256:59714ab0a5af25d723d8e9816638faf7f4254234decb7d212715c1aa71eee7be \ + --hash=sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac \ + --hash=sha256:5d6790ff400254137b81b8053b34417e2c46921e302d655181d55ea46df58cf7 \ + --hash=sha256:5df559e9e7644d9042f626f2c3997b555f347d7a855a15f170b253f6c5bfe358 \ + --hash=sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e \ + --hash=sha256:61490d57e82e23b45c66f96184237994bfafa914433b8cd1a9bb57fecfced59d \ + --hash=sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8 \ + --hash=sha256:64a0fe3f334a40b989812de70160de6b0ec7e3c9e4a04c0bbc48d97c5d3600ae \ + --hash=sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64 \ + --hash=sha256:699c346abc73993962cac7bb4f02f58e438840fa5458a048d3a178a7a670ba86 \ + --hash=sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669 \ + --hash=sha256:6bde37765564cd22a676dd8101b657839a1854cfaa9c382c5abf6ff7accfd4ae \ + --hash=sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3 \ + --hash=sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b \ + --hash=sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0 \ + --hash=sha256:737005088449ddd3b3df5a95476ee1c2c5c669f5c30eed909548a92939c0e12d \ + --hash=sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858 \ + --hash=sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4 \ + --hash=sha256:79af163a4b40bbd8cfd7ca86ec8b54b81121d3b213b4435ea27d6568bcba3e9d \ + --hash=sha256:7aed8118ae20515974650d08eb724150dc2e20c2814bcc307089569995e88a14 \ + --hash=sha256:7cf9bc4508efb18d8dff6934b602324eb9f8c6644749627ce001d6f38a490889 \ + --hash=sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f \ + --hash=sha256:7ec85994f96a58cf7ed288caa344b7fe31fd1d503bdf13d7331ead5f70ab60d5 \ + --hash=sha256:81f81bbd7cdb4bdc418c09a73809abeda8f263a6bf8f9c7f93ed98b5597af39d \ + --hash=sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d \ + --hash=sha256:88051c3b7d5325409f433c5a40328fcb0685fc04e5db49ff936e910901d10114 \ + --hash=sha256:887ab1f12b0d227e9260558a4a2320024b20102207ada65c43e1ffc4546df72e \ + --hash=sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5 \ + --hash=sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391 \ + --hash=sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f \ + --hash=sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f \ + --hash=sha256:8de567dec6d451649a781633d36f5c7501711adee329d76c095be2178855b042 \ + --hash=sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774 \ + --hash=sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156 \ + --hash=sha256:935afcdea4751b0ac918047a2df3f720212892347767aea28f5b3bf7be4f27c0 \ + --hash=sha256:9a0ff7ee28583ab30a52f371b40f54e7138c52ca67f8ca17ccb7ccf0b383cb5f \ + --hash=sha256:9ad08547995a57e74fea6abaf5940d399447935faebbd2612b3b0ca6f987946b \ + --hash=sha256:9b2a4e17bfd68536c3b801800941c95a1d4a06e3cada11c146093ba939d9638d \ + --hash=sha256:9b78430703cfcf5f5e86eb74027a1ed03a93509273d7c705babb547f03e60016 \ + --hash=sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185 \ + --hash=sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d \ + --hash=sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b \ + --hash=sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9 \ + --hash=sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2 \ + --hash=sha256:a4cf32a26fa744101b67bfd28c55d992cd19438aff611a46cac7f066afca8fd4 \ + --hash=sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb \ + --hash=sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726 \ + --hash=sha256:af22763a0a1eff106426a6e1f13c4582e0d0ad89c1493ab6c058236174cd6c6a \ + --hash=sha256:af9d4fd79ee1cc8e7caf693ee02737daabfc0fcf2773ca0a4735b356c8ad6f7c \ + --hash=sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23 \ + --hash=sha256:b2eff8ee57c5996b0d2a07c3601fb4ce5fbc37547344a26945dd9e5cbd1ed27a \ + --hash=sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374 \ + --hash=sha256:b8a4131698b6992b2a56015f51646711ec5d893a0b314a4b985477868e240c87 \ + --hash=sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367 \ + --hash=sha256:ba783541be46f27c8faea5a6645e193943c17ea2f0ffe593639d906a327a9bcc \ + --hash=sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c \ + --hash=sha256:be3964f7312ea05ed283b20f87cb533fdc555b2e428cc7be64612c0b2124f08c \ + --hash=sha256:be806e2961cd390a89d6c3ce8c2ae34271cfcd05660f716257838bb560f1c3b6 \ + --hash=sha256:bec77545d188f8bdd29d42bccb9191682a46fb2e655e3d1fb446d47c55ac3b8d \ + --hash=sha256:c10d92fb6d7fd827e44055fcd932ad93dac6a11e832d51534d77b97d1d85400f \ + --hash=sha256:c3782fb753aa825b4ccabc04292e07897e2fd941448eabf666856c5530277626 \ + --hash=sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd \ + --hash=sha256:cb0702c12983be3b2fab98ead349ac63a98216d28dda6f518f52da5498a27a1b \ + --hash=sha256:cbc619e84a5e3ab2d452de831c88bdcad824414e9c2d28cd101f94dbdf26329c \ + --hash=sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc \ + --hash=sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8 \ + --hash=sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a \ + --hash=sha256:d2a81bdcfde4245468f7030a75a37d50400ac2455c3a4819d9d550c937f90ab5 \ + --hash=sha256:d2cc2b34f9e1d31ce255174da82902ad75bd7c0d88a33df54a77a22f2ef421ee \ + --hash=sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e \ + --hash=sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6 \ + --hash=sha256:d7cf5e726b6fa977e428a61880fb108a62f28b6d0c7ef675b117eaff7076df49 \ + --hash=sha256:d85d784c619370d9329bbd670f41ff5f2ae62ea4519761b679d0f57f0f0ee267 \ + --hash=sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b \ + --hash=sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615 \ + --hash=sha256:dbc2ab5d10544eb485baa76c63c501303b716a5c405ff2469a1d8ceffaabf622 \ + --hash=sha256:dbd749cff1defbde270ca346b69b3baf5f1297213ef322254bf2a28537f0b046 \ + --hash=sha256:dc662bc9375a6a394b62dfd331874c434819f10ee3902123200dbcf116963f89 \ + --hash=sha256:dc6b0d5a1ea0318ef2def2b6a55dccf1dcaf77d605672347271ed7b829860765 \ + --hash=sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2 \ + --hash=sha256:dd2c1d27ebfe6a015cfa2005b7fe8c52d5019f7bbdd801bc6f7499aab9ae739e \ + --hash=sha256:dea0808153f1fbbad772669d906cddd92100277533a03845de6893cadeffc8be \ + --hash=sha256:e0d7151a1bd5d0a203a5008fc4ae51a159a610cb82ab0a9b2c4d80241745582e \ + --hash=sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9 \ + --hash=sha256:e24d8031a2c62f34853756d9208eeafa6b940a1efcbfe36e8f57d99d52bb7261 \ + --hash=sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015 \ + --hash=sha256:e377e4cf8795cdbdff75b8f0223d7b6c68ff4fef36799d88ccf3a995a91c0112 \ + --hash=sha256:e3acb9c16530362aeaef4e84d57db357002dc5cbfac9a23414c3e73c08301ab2 \ + --hash=sha256:e3dc8d4ede2dbae6c0fc2b6c958bf51ce9fd7e9b40c0f5b8835c3fde44f5807d \ + --hash=sha256:e6491658dd2569f05860bad645569145c8626ac231877b0fb2d5f9bcb7054089 \ + --hash=sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433 \ + --hash=sha256:f0396e894bd1e66c74ecbc08b4f6a03dc331140942c4b1d345dd131b68574a60 \ + --hash=sha256:f09c9d4c26fa79c1bad927efb05aca2391350b8e61c38cbc0d7d3c814e463124 \ + --hash=sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb \ + --hash=sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410 \ + --hash=sha256:fae4a01ef8c4cb2bbe92ef2063149596907dc4a881a8d26743b3f6b304713171 \ + --hash=sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e \ + --hash=sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42 \ + --hash=sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe + # via + # jsonschema + # referencing +snowballstemmer==3.0.1 \ + --hash=sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064 \ + --hash=sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895 + # via sphinx +sphinx==8.2.3 \ + --hash=sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348 \ + --hash=sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3 + # via + # -r /workspaces/json/.devcontainer/S-CORE/requirements.in + # sphinx-data-viewer + # sphinx-design + # sphinx-needs + # sphinxcontrib-jquery + # sphinxcontrib-plantuml +sphinx-data-viewer==0.1.5 \ + --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ + --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 + # via sphinx-needs +sphinx-design==0.6.1 \ + --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ + --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 + # via -r /workspaces/json/.devcontainer/S-CORE/requirements.in +sphinx-needs==5.1.0 \ + --hash=sha256:23a0ca1dfe733a0a58e884b59ce53a8b63a530f0ac87ae5ab0d40f05f853fbe7 \ + --hash=sha256:7adf3763478e91171146918d8af4a22aa0fc062a73856f1ebeb6822a62cbe215 + # via -r /workspaces/json/.devcontainer/S-CORE/requirements.in +sphinxcontrib-applehelp==2.0.0 \ + --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ + --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 + # via sphinx +sphinxcontrib-devhelp==2.0.0 \ + --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ + --hash=sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2 + # via sphinx +sphinxcontrib-htmlhelp==2.1.0 \ + --hash=sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8 \ + --hash=sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9 + # via sphinx +sphinxcontrib-jquery==4.1 \ + --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ + --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae + # via sphinx-needs +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via sphinx +sphinxcontrib-plantuml==0.30 \ + --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf + # via -r /workspaces/json/.devcontainer/S-CORE/requirements.in +sphinxcontrib-qthelp==2.0.0 \ + --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ + --hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb + # via sphinx +sphinxcontrib-serializinghtml==2.0.0 \ + --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ + --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d + # via sphinx +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 + # via referencing +urllib3==2.5.0 \ + --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ + --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc + # via requests diff --git a/.dotstop.dot b/.dotstop.dot new file mode 100644 index 0000000000..e8c2d8c40a --- /dev/null +++ b/.dotstop.dot @@ -0,0 +1,475 @@ +# This file is automatically generated by dotstop and should not be edited manually. +# Generated using trustable 2025.8.5. + +digraph G { +"TT-CHANGES" [sha=e276949659e77f8f453a9b32798f607abdfda44450bb10bfbca7a20d68835f7a]; +"AOU-01" [sha="0f00a2c7c6ef352a52c2e9e2c45102c97e8906fa5f0e7ddfcbc252af551cd179"]; +"AOU-02" [sha=e8e0ae5f062d031ce5517f939a2223198e9cc8f12aba943f42f01f5def05d1a2]; +"AOU-03" [sha="9175975af29dcb8a7c94a1b4bf18a4f36f6ffac904351c6dd84bcbb7ee4abd9b"]; +"AOU-04" [sha="9a5f50d14e036a085c64e954936bd579f4fd7d816aaddedb39da34095877e82b"]; +"AOU-04_CONTEXT" [sha="466c72fcf27a1cbf322c3c4086cc7cf19825b314236567372289204616d2d56a"]; +"AOU-05" [sha=ff2205fede4cc18efe9162008881729cf37fb3f5b0a41ff3d9143aca9d656ad3]; +"AOU-06" [sha="20799cb377d8ad56d8bb6231c7246d2fc25c224fad0c9c9b4b83e25710d7a589"]; +"AOU-07" [sha=bff000a8ba9254956cdab9045e6f7b38e3019180eb95011cf8541e9928ad8d44]; +"AOU-08" [sha=f25753b314cc921d175ffff9be39cf5a43bba651c5bdcba0f172aea02d792b34]; +"AOU-09" [sha=adce35b590ac6450379cadba0c6cb38874626134066b06823c3c2d18f7ce2cc7]; +"AOU-10" [sha="36eb0304bb55d055e342424424efa617b095b582d1e87d71818095dc1212ece7"]; +"AOU-11" [sha="6a5174cee0e66070ab1c39a14c0aede872e56fb928555236ab3bffe1876b7048"]; +"AOU-14" [sha="4bff38e002c0df47062d7ab632094ec5ee0377c0230647e7acd54945ea59ddac"]; +"AOU-15" [sha=f4d2fdcacc3c3bd743af3ff5c2a08ceb650e59015ed846c35ddb4de104e80cec]; +"AOU-16" [sha="4e08bac839cba01a4dc2a794bd2d1254d388945c6b79293684e00b9f8d03f31e"]; +"AOU-17" [sha=ce26eef503179bbedca706f43fedc289d8c56580c4b885b3abab440d2de25870]; +"AOU-18" [sha="672193d261861effb34f845d6e3421f47ce85e79d7ef34f62ce5c10bef68e796"]; +"AOU-19" [sha="877c346448621a81b2d1418e610575bfcd556d2f56a14f23f289b90828907928"]; +"AOU-20" [sha="d17ea5c6a3f07b516abc7c51d4b15b1a130a28602fbd4eb6872006a2e5f939d0"]; +"AOU-21" [sha="d1712fcf7a0a00659021a69784f697b5d7b404aeadb7be66553218ba2e6c0bf7"]; +"AOU-22" [sha="32fa7dd195ae91e0095296f05e6201abce2b74876be70623a85055e9566a2b58"]; +"AOU-23" [sha="395e92cd380e016ebb3c6eea32a326084fc21268c3f742e1a5da8ae2aae84ff3"]; +"AOU-24" [sha="90739f0593fc0a45bef179240b7be81c565292b8cc3ea84b2f8256a9369e4a32"]; +"AOU-25" [sha="9aba193f20fa8a9ef9b14191adaa545ea3bf88525509d765dd3bb9af8dd29704"]; +"AOU-26" [sha="b4b257a5dae03a485b041d4a83ba9dbe965841ccd0891f67ab3e7d8db6c09db1"]; +"AOU-27" [sha="43da5040f2cab643f2bbb4a0abcf67fba91d68108786604f44f6ef6a91f6681a"]; +"AOU-28" [sha="76887b683a1e5a2f36a029bf80c7489226d1728e6f148f69f7b2167211f1f443"]; +"AOU-29" [sha="cb3302e313dae237d67bf249c61419232a83aa6245af30a0f33c795ab1b3a8f5"]; +"JLEX-01" [sha="1cd16efee6db392909248a77c773e0c5aa76b4c05001ab0b981201004f2162b0"]; +"JLEX-02" [sha=f40046dae47252d62f28c935db90460772263ced0fa9ed1a5e1383436b89daa9]; +"JLS-01" [sha=dce41089598ceff4a3ce3df7a8d3d16183346b785d791d60c8292f51c055946d]; +"JLS-02" [sha=c9e0a37da8c4966962eb93e415e4f0ece9a7d5bfb1fa92a1d0fdcbc1609792af]; +"JLS-03" [sha=ada5a3ba5276cb99217751c67b424cdbd3622f82eb1126f08d425b641e1616e7]; +"JLS-04" [sha=b94e4184cbcedf757d2ddcba96fa81e7274e7631eac355602fdd668c707a696d]; +"JLS-05" [sha="1db1db9bb019291ccb0a896433d906206681c77d1d84aefffd9e965eda6a4dfe"]; +"JLS-06" [sha="76f6dc3eb872223fd59c12baaafd9abe0f801e55767827d764c7d1dbe072c19c"]; +"JLS-07" [sha="63c8c395f271d9d0d26be1111bc84a9cb671646503c14f24faad2bc9a751cda3"]; +"JLS-08" [sha=cceeec93241b6d6ee80fc01c0af03b9d7737fb352e1ddcd98d04a45d75334f79]; +"JLS-09" [sha="346ab52001e8e72c738145a5a8b8f41094a1b873c509eff1d0d25003e066cd56"]; +"JLS-10" [sha=f1306e4884f5b881acf63d5525bb90fffc88039c8454a3184bf9ce6391690903]; +"JLS-11" [sha="87b8d1dde6f1f4ebf33e0ebb873abf969064a8c4c70583c94c63652a877ad73e"]; +"JLS-12" [sha=fe6e433c6cd5259216af447e7122d17ad446a0b37ee957eb9822e381865fe32e]; +"JLS-13" [sha=e6e7c777b9cadcc65d12fc5782cf21fc688e494024bfb186ae7c20db6a46462a]; +"JLS-14" [sha=e94a75986187fec162e8bd7fd270dbf2aae329dd7d8660df63521637b982286a]; +"JLS-16" [sha=cb91a56d69836efd4982b2268e204394ae83a694605dc94b71f3ed0c66890329]; +"JLS-17" [sha=c527a605b2ae7a54ab429bb8e554263bab5a562394f5ba73305aa490f7351c83]; +"JLS-18" [sha="58788ef0ea0b9fba710e806de3f255da6c12fbbd12fa7edad07e65c2dbdedf94"]; +"JLS-19" [sha=ac20a2570ed1ca6c0d93ad40f6d93cbac24648b73c144fcb7c92e65ebd2ef411]; +"JLS-20" [sha="33dc0295a6524876c9b641b6ce685c1ddc90f01d780fb76d449c01b51fdc042a"]; +"JLS-21" [sha="742c39b2ba811494cd8cb40199c9a8a0c22c2b29650d6b2e546c21f7bce50ceb"]; +"JLS-22" [sha="252e58151a45b158bae379ceb08aadb6e587e505aac67f2ecc18e43040a1d1de"]; +"JLS-23" [sha=cfd7cb4aa93fbb758807ffe106f96e7b82327ab4d758e69e1f62196c3bc86bd2]; +"JLS-24" [sha=b16224d3ab676c00b313ae91760072d92aed9f20da99b363621effa3e033e012]; +"JLS-25" [sha="8bb517191450f370679dbafd85342e1bbcf797cc84f2a6f1fc119568b534d5e0"]; +"JLS-26" [sha=cf1b73b375697ee56d9788aab79ed01b2730b126a2cc4d7041c9525113e7ed7c]; +"JLS-27" [sha="efd4b438331c155eebaec96cd1eda337567794f8696b327562aaaed5fa8ded69"]; +"JLS-28" [sha="40fac5685e2d4ca1e4736323cba527340d6164b2193e4a43379f6aff0263ac61"]; +"JLS-29" [sha="b19382add018e4356ff2cd5be50d0fae4c1b4f6e5ce90f643789a5b851a12cb4"]; +"JLS-30" [sha="42dbffcdacb67e9af2bd338a1334ca5041d2f51ac48889f7eefd5a5548ed7e91"]; +"JLS-31" [sha="2b941deedbdeff697d063b3f1cf2f97340bff79f16f934bb8be21824e8ccd5c9"]; +"JLS-32" [sha="1ae95153e4082a8aec30a54b320ea8b16b77ed62429a7832718f575fa8ac8fdf"]; +"JLS-33" [sha="16ebc7717e389ac1ca349ead591b4dc5b65997e8c5f78d58d6293cd75bbe0d28"]; +"JLS-34" [sha="3484d9766deace45ecbc0d6892c7114ce7d97a51836399887500a318b3a88dc6"]; +"JLS-35" [sha="b11006d1d5708c3aba84d4f06834ad965d6aebde8619306389a4f8fa655b2dcf"]; +"NJF-01" [sha="548dc86014e093974f68660942daa231271496a471885bbed092a375b3079bd8"]; +"NJF-02" [sha="6ea015646d696e3f014390ff41612eab66ac940f20cf27ce933cbadf8482d526"]; +"NJF-03" [sha="4bd1f8210b7bba9a248055a437f377d9da0b7576c5e3ed053606cf8b5b2febe3"]; +"NJF-04" [sha="72bdd7cdc074bb0663c90bcd892fd7a19b938b1b45c7be35907c1e14629065a6"]; +"NJF-05" [sha="03a865b41d58662602bc76ec1321efe792fce5537ffad1a1d28563b5a1578db5"]; +"NJF-05.1" [sha="9c02b8f95f1d0e9ed47b74187b5fb436e56265dc9f565a39ab76cbe7d584b381"]; +"NJF-05.2" [sha="172f35b9a992dac0ef88f7f817607f1214c9f07e073bb6fba4c4244c00dc42e4"]; +"NJF-05.3" [sha="be8090cef51e0ffbfa153db7b936e09c6a3fd6622bc7c97bc9aa754511e6bcbc"]; +"NJF-05.4" [sha="dc0171e8cda48d88cfdaf0a49f7415a61b4b5ae5674c225036f1f1232820384d"]; +"NJF-05.5" [sha="f07eb697ba3aafb6d57d4d133530ef18fcf24d38b328d94a50c87cf8688d92ae"]; +"NJF-05.5_CONTEXT" [sha="2eb79156ba6fdfe9201083ee180c5ffb968462e47e62c5af61da4501d6cc5271"]; +"NJF-05.6" [sha="bac7fe917281dcfbe508dbcf3bd8f5f306c6d3accae9c483fdfb16d6dd4d421c"]; +"NJF-05.6.0" [sha="353b631ea3f819134517c084f4d9d89994e280e7aa34dcb7605e1dcd23b775d0"]; +"NJF-05.7" [sha="76c90a33905d4e4f619bbb7e61ec5111eaae55294bda658648256fba600db783"]; +"NJF-05.7.1" [sha="3c5a03f59a34ba3256fe26fa0526b916f515c2c18bdb35b6b8f5d85b854c0a2f"]; +"NJF-05.7.2" [sha="4c67f4bd9f952d63278902687a1d1ecbe7dd1665f0a75aa837c32f9f1cad7977"]; +"NJF-06" [sha="c0045fd9c304e208eef0d24cbf095df06e29625f2cf2ecf3e6a7a697d92afbab"]; +"NJF-06.1" [sha="e27d98ac06cc4c5eab2af9068f70fa0ce2ef6535ef4e7325d13aa185c1f1dbc9"]; +"NJF-06.2" [sha="9b4726beceff80b02496e4e3b9515a0f442503f6516aa546035b386b3c91ec12"]; +"NJF-06.3" [sha="d96167055bf83441a0939f54fee142051f8e101c0747b2edcda78000d426e1ab"]; +"NJF-06.4" [sha="6b7e35a8d2349198ef04ee6cf75fbe5191fa930cafdacdaffbdcb2a2abda467e"]; +"NJF-06.4.1" [sha="405aaa29d7872555dc4e56fe9b7001cfea974ac241ab30c3aa48cef1fb559034"]; +"NJF-06.4.2" [sha="9c484d1ef9005cf5caae4f2ee354813fd759b79dc044fb470e1a4690d251d31f"]; +"NJF-06.4.3" [sha="830f22381cfb030c8ba18471cbeb6ff29e7d56b61ff504ad5b4978d63b971dbc"]; +"NJF-06.5" [sha="58f3c6b84c42ef8cd41061cb5740fa90d4fed0686e49e9d9799bf51f14b48391"]; +"NJF-06.5.1" [sha="8b9ca021bd30bf676f738052ee62191f75a180ce6d1895cf9e9c9117caea3cea"]; +"NJF-06.5.2" [sha="c3a826ce79831cfea743c3266ab67c23ca4f4b8cc5f28ce7568d4a143a7d223e"]; +"NJF-06.5_CONTEXT" [sha="ceda4128f447965a4548cf10f05866fd27e81bd85cdb6029663a93f8261b94af"]; +"NJF-06.6" [sha="8e27724820113d6b22ce65529d2552de1040ff4e9bb27a1a3e9baa9728b9f1ce"]; +"NJF-06.6.0" [sha="7b0356c311c52640334d02e9bff171a9ee8b220a1edd7c0826d74bbef1b799b4"]; +"NJF-06.7" [sha="fc4ecc622f6d4a920ceed31d43cb3f51011f4f2a6f8abfcf84d419b7a37c4054"]; +"NJF-06.7.1" [sha="3c2bdd189b430e001333e8faa8a955dc2b38daf46f8106366cea31abf6f0d668"]; +"NJF-06.7.2" [sha="f1552b1bc992c1901bde9767e1ccd2685f57615acf631f0891c82a48c7c006cf"]; +"NJF-07" [sha="187732eb19d80f473b6ad181a2959fbe96b4fc8eefdd4b269f4ea0865970b6e4"]; +"NJF-07.1" [sha="55d614153a4279112da50d50c5186cf0a0c7b9137458cfb885aba4356cdea3e1"]; +"NJF-07.2" [sha="26759a8f444be1dbb077613ab9e77b88d06a86abe5ab90818b0300c00478cc1f"]; +"NJF-07.3" [sha="f848d075a7efd0e8a3d056dfc0d75d90b68dd2fc4cf5fa4a545151961b98015e"]; +"NJF-07.4" [sha="6b03a26d8cd2e697c54a09ded86479d628caa35e6a563ea183d14309cb59fe54"]; +"NJF-07.5" [sha="e8566d114f95cb2b565dd857c7f4ee96f8124451267766557cf179b23898517d"]; +"NJF-07.6" [sha="8482d9045c9f1cb7cbbd4a4fe6542d7712368112115e2efa87e2d4a6e4cdf0a5"]; +"NJF-07.7" [sha="0e2809c9bf9a2973ed8ac804e6013757d1d37e8b32abeb1e4ff72eb0adb567c4"]; +"NJF-07.8" [sha="bbacca70742da39fa0d8e601569f0f665d8325e47c35bef3740a7a15ef2b9800"]; +"NJF-07.9" [sha="f253a15cb252dabe60ed8ebe01ba17d68be0043be87cd96e472725f1177491cd"]; +"NJF-08" [sha="0d57eb6a9242045903da75764d616751477a215caedbf578344037a0b06676ee"]; +"NJF-08.1" [sha="11356b6386148634facf1c0256a27674024d1ba87047eccc5e36ca1f98521524"]; +"NJF-08.2" [sha="6ee61a26db3346d62f7fb3f478f6719a40064a3276bf74420044223590f91b34"]; +"NJF-08.3" [sha="04003897c08f342a96aaf3555f95bb2a8b424676b3962b99f8bccd2dd6886219"]; +"NJF-08.4" [sha="bfe62c43db172af49e718b18db6a04c4e42c9097b4a9765dd8ae3715ced42748"]; +"NJF-08.5" [sha="f2b61e77e5b3b8c9ffd1eb5306757f83ba7df7a403a63b176922609f3e5612e8"]; +"NJF-08.6" [sha="97c455dbc819102f6f02335b41afa7b39fff8da7f5ab256a1e71aff70c667b1c"]; +"NJF-08.7" [sha="487789064cb51388bf32b6df58487b7981d304a7656cb2541285a2759d29b36b"]; +"NJF-08.8" [sha="b19bf6a231816418bb16e0c1cf437db461c509605184f446d3cb1716e3f57e38"]; +"NJF-08.9" [sha="cb19d5ab85029e5e062eb22e8237502eaee780b7d7820837ffad4c97b59cdee7"]; +"NJF-12" [sha="b32df413dc8445568361e6e6d17bc546418d4b87c5d11fbcde6c964c1888bf77"]; +"NJF-12.1" [sha="a674cbf274a4ca36b7a255bc97a38ccf27e2b1fed4bd864f2fc520641a3cd2b6"]; +"NJF-12.2" [sha="c1425f1a9c00762fbb2d3b0a102517f7b4ac11d0263d72bf37a55acf660b1978"]; +"NJF-12.3" [sha="1c39b3e2957dfe427fed733cc11c4809fe462f2a84dd8c2d3e9f9f403381b87a"]; +"NJF-12.5" [sha="617050b4d32fe1d77f314b6b0847f0b227e5e74eee661543574d0eb1e61229aa"]; +"NJF-12.6" [sha="11022773b489e300e2c1c1dceb723454474cfbeb4f0b7eeffe68f2be170a6eeb"]; +"NJF-13" [sha="98ebeb19c546dc79927ac6fc525bacdce9d8232ecb51a5400788de31bfe58ff2"]; +"NJF-14" [sha="d686e8a2ee5586c82c313771a2458980950710243245d4bad54401f3fe13b437"]; +"NJF-14.1" [sha="e3b1be60334b8771b4b798353778bc7128b96de8880c0ef8eb2f02b4acaf11ab"]; +"NJF-14.2" [sha="d0e8d7acb4b1ae15b1a7933338c67b903e654cbe71b8ecdb45b13ac17d680cb6"]; +"NJF-14.3" [sha="ca5d0e466cb8daafb585c6a81b6e807b712d64e5fb3d05bc439ac416a70ca264"]; +"NJF-14.4" [sha="b03ac3f732e1d1b67e02bf2f9861784dfa35b0d3c85cdd8c2af4a35d29422d66"]; +"NJF-14.5" [sha="637c942c9de96154d2aefa0256aad9075211aa3ebf841c416316708f99bf10e4"]; +"NPF-01" [sha="2901988de4ad14d84c452798ecad5e36d6504a003d47a4939e54dca3d8261762"]; +"NPF-01.1" [sha="17257d31617eb053f59eedc9f59e2ab7bf0f8dad653382b3be305179f11b397e"]; +"NPF-01.2" [sha="e228d62eecd258043ef008453871bb91e537ad86b9c6ca31f922098a9227625c"]; +"NPF-01.3" [sha="8ebaa095edb88fbf6be220eeca7ff9b38763bd1120f04619b0764129e98a4f10"]; +"NPF-01.4" [sha="87846a5b24172faef5c00209678787d42376c792ee7ad86b6b983dd08b08d6b4"]; +"NPF-01.5" [sha="22ef08f8152d7aac8d37b91c999e247657491943d402e3fe0b637527ce6e2d0d"]; +"NPF-02" [sha="41e403fe2574dd140edd75d7a6d9f4a288cb8b9d75951580307509c82909f0cc"]; +"NPF-02.1" [sha="b3dda5fff206ce6e93a744e360413efa27005d379c0e08b846f2dfdbcc72d1da"]; +"NPF-02.10" [sha="f5e83432d8560e50cf553ea5445d5f9ced7908c7d9941e567d533f2cc374bc5c"]; +"NPF-02.11" [sha="4bf5163b2fcd20d20ec6a851badc58ab700e3848e63e977a89d35cc4f4abf3c3"]; +"NPF-02.11_CONTEXT" [sha="0aa02a53938108602ec3385cf8d3facf5a06a4797c6b247528ea5b75c84dd29a"]; +"NPF-02.12" [sha="65b80f81bbe1b95b25323983d4588c591007eca92ed25f3fd7503f018abb9462"]; +"NPF-02.2" [sha="cea5dddd5bfcc64d8339f314a9814080ee67cb9cd6cb07c9b908c56469d88f42"]; +"NPF-02.3" [sha="124ac652a17e1b49ad60bdc75ee5721f4e373d8ecf4b07e85eb9cdcb35ad8dca"]; +"NPF-02.4" [sha="d276f710dcbeae89cdb625e87df5d4d028759eca117d44bca852180222097dcd"]; +"NPF-02.5" [sha="82fe32a9ed298c7a4376ea13ff8d6241c4d7e117f4929f3d9dfe430cf5cd9498"]; +"NPF-02.6" [sha="4917c952d729adea10df55cf430fec37ea812573c6d794ca2917d13495dadf6d"]; +"NPF-02.7" [sha="bb3307c917594a152c13de27f853443a54a6f00a807ac66f7ce3d27f8b9b2b46"]; +"NPF-02.8" [sha="cc2072fbbacd97c4cb545d54f36bc5415be139973d3927570d6aaab40513378d"]; +"NPF-02.9" [sha="8dc0c25dd85b78528ec3ada93d96adca246c4e8a34d40abee7a3f03334554c27"]; +"NPF-03" [sha="106ab82526356d9f76e56d03260ec8aef025d82e732ec1407b83f7459641f513"]; +"NPF-03.1" [sha="01df39e92c5cbd53682f52b32a533f085d5211bc245f4fef767350427087acea"]; +"NPF-03.2" [sha="4503740220bfec9a6350540000b3e3996eede1ffcb369dd7c417bf50e235ae10"]; +"NPF-03.3" [sha="a15eb546556faef039cad755a34be09d2fd6ee2ea486e3b6ca8db47e1daab710"]; +"NPF-03.4" [sha="cfd34a8c4731459dc9c9023b3819461ad04afd17501b4be61c18f4893810af6f"]; +"NPF-03.5" [sha="db88ab61e0e0f5400586325c68550bec0fdbec7bf9ebef1b19c2616c3cae6338"]; +"NPF-03.6" [sha="09cae32fea0c803150a25fb9d36e3f9e9cab405d468fc3e275da864d0f4049a9"]; +"NPF-03.7" [sha="1839020c28db51204c7d669cf8408a403b0f65d9d40b4ce0db0c52b9ca5309bb"]; +"NPF-04" [sha="60e5e97a864928518ce07d779ac9fc9130d06f82b680b986bbf73054fc52affe"]; +"NPF-04.1" [sha="69f785ba9348217f97348b9f96247e25b3b9213745cc0187e20b08c2c6291143"]; +"NPF-04.2" [sha="23fc9e58fd0396994b33e2195ebf9a606d4df9e7d052a741d645a26007aa80eb"]; +"NPF-04.3" [sha="e400cc6ebfcbf6de07a97af69baf44058efb9a7d559315d5d833987236141eae"]; +"NPF-04.4" [sha="c4d68c6940c198521b0d85b7c910833646bbc0597025b3d181d2ea903de77517"]; +"NPF-05" [sha="466ccbd80e4e29218a1a3789ca99e5a1b60ec5fe6e12b37a454c5c7c20d24973"]; +"NPF-05.1" [sha="7972748f0fc78224ef09757ee3b8e68dfdf400637f8da891f616d9387b94744f"]; +"NPF-05.2" [sha="d19ce1210b9775c82db050b9f08ee6f805ee56bdfeeb89c36614784222261036"]; +"NPF-05.3" [sha="a143a6d2343ae469ae01a672706b2c6136e52ecef29b7791e7f96d3adeddee04"]; +"NPF-06" [sha="9c8fac55c919707b5a1d7804036a4ccc7d3bcc9ba651b9524454581a52dbd77e"]; +"NPF-06.1" [sha="d81fa9d29b70d5cce98118fa82f700b70dd78f2d7f6c266552d76deabd273af8"]; +"NPF-06.2" [sha="6ad4d7ab66419db2f1b8f18462bae68478fec227f07abd6a65069251f5558d5a"]; +"NPF-06.3" [sha="d74364c6eb2c3eeede3c3f132c6c4ce4493c1971ade75e5a030fbfd9e1c11f5a"]; +"NPF-06.4" [sha="86cc838833cabcad168c049508f6eadebace6ad730dbc0e40cfd37f1fbe10f0a"]; +"NPF-07" [sha="60dcc75d9bb21fa3e8ba2d80c3e12bda2a51f33ab7755cea9170a7ae96050b63"]; +"NPF-07.1" [sha="45b9e2cb7f5ede285a0007ff4f17a023530f557dbe2b1e2bbc299ad5f2bcf4b9"]; +"NPF-07.2" [sha="85bac5eae5173e89bdc67c8c724fbc40b57d69797d31bf1f0c5c798b7a148f0d"]; +"PJD-01" [sha="9b2099ad82514f2de78b9509e0b3c193054374185de1beacbce359729928d8e6"]; +"PJD-02" [sha="2ad95a7543626869ed3195b95256c03a991df49e51acc6b54513a1ed32d63dc8"]; +"PJD-03" [sha=fcc8152c7a04032d91becabc5577409773b64c381d09bdab7be954ca1916db64]; +"TA-ANALYSIS" [sha="76cb774300c75d7b6a24c872fe691f2f0cd1d36cc4a2493e835f6b861bc79c34"]; +"TA-BEHAVIOURS" [sha="3ec27e29aa991978efe6a56267b98c2a08b27a4aff693d5cf2b01dfe72276570"]; +"TA-CONFIDENCE" [sha=afda09331b2fc3b8d9b1cd921bee66251a65e5543a473c61eb03f9ea11d57eb5]; +"TA-CONSTRAINTS" [sha=cdee0ae34c33110044975efc981e4ac4d63d824aaaac78233b1f3828ef070da3]; +"TA-DATA" [sha="796e4b5851340906568a47c4436c1fa67ca1c48c98a8b6a16b19283d04cea799"]; +"TA-FIXES" [sha="08f069034d1399c43c4321c5f05de72125f4155a8b68b9bbb2029cb679e6ad48"]; +"TA-INDICATORS" [sha=a5392ef1e3e40d82ef9a304d1304bf007fa85d961ab8ea268231cb212d763477]; +"TA-INPUTS" [sha="6edcb6e0ea0a918f611d6644da7a28dd5c924a210984cd913e7ff558677a6ea6"]; +"TA-ITERATIONS" [sha=c445bfe866db71df67d4e87353d674b62abce19b52048fac37284d8065d67678]; +"TA-METHODOLOGIES" [sha=d24f6055c79268b1f6e4bdf73951719f192d6f492a7376f85b349a95ccb2a319]; +"TA-MISBEHAVIOURS" [sha=d24fcdeee0ae0fa696f272dc39c8e9e37cce7fb9b2cfd07bcd1451b765be5c6e]; +"TA-RELEASES" [sha="3c924109e9916fb154eadbc2d733a8413ae551a1282b73de389b9ad7540a4e75"]; +"TA-SUPPLY_CHAIN" [sha="0629a5a339322874ad3d51c0c14219ede72195bf514abac82c95ebc3a685ae2c"]; +"TA-TESTS" [sha=afa5e61fc86f70f99f6c60b4f2b51ba7d486705f197048a7dc3fc8fea225385c]; +"TA-UPDATES" [sha="9f0554c79d125a37c7e68b9efbb022dc4853a3e2f87c7d224d30c51f5b9b8435"]; +"TA-VALIDATION" [sha="20f6d87b89d9fbffe427a734ab70906c26ad787e53032a8c4f4d599227704be6"]; +"TIJ-01" [sha="f14e40946ba0f957437e1d4eecd71b9625518b6fdee282e1dab4bb1b247f131a"]; +"TIJ-01.1" [sha="5e43e22dd59a3ffb8e2304053f3150e2eb6ed44b3456acfade28df386b3a1e1c"]; +"TIJ-01.2" [sha="2ca07a1a8d1fc3bd41de5866e22cfc08db1dcbd0ac3b203725d1ef3caa6e6cdd"]; +"TIJ-02" [sha="7b26a640ea0631b2b5abec02fea2e5fa245667e361b7e94be536c81240bdffcb"]; +"TIJ-02.1" [sha="55ebd0ed866d6024f693a4c029af94e13b5d288d3838e783fb5a9bb8117ee53a"]; +"TIJ-02.2" [sha="51cbc1d325ce9d7a37d32a0f09e77110ca677c4a9b746c5c9c5b005065640191"]; +"TIJ-02.3" [sha="f24980a95b72e94c33f8509278bc812a9aa41ad6aaf305f444f98417a26b8128"]; +"TIJ-02.4" [sha="0a3a4ba3f4c11f0ed01ad571e1f4e56d8372a3a7fa08343443e64c5f0cbce960"]; +"TIJ-02.5" [sha="dd2f6c8bf4655923f04d1165febf472994aa4b88c614fbb884eb0119eefd2986"]; +"TIJ-03" [sha="841a669b93bcbb62c9aa82b32ec90e55d1b371e089e11e06607805a9b6a0d16d"]; +"TIJ-03.1" [sha="ab5c3385486411844eccfd704f8c994b17f03f4a101f1df97a9ca968bce7b8a0"]; +"TIJ-03.2" [sha="dd61879bb6ab4d3f8b6560656babc9b104112958273c78d6968c6e5fd1d81055"]; +"TIJ-03.3" [sha="647524ce6d0f1f737773ec535e93783084708d13fc62964bdb872c445c563231"]; +"TIJ-03.4" [sha="d2cd0ac0c26e3964dff9f2577685a09aaed9fdba66f7e9a52cd5d48df7ea9e3d"]; +"TIJ-03.5" [sha="99071d9d3fa4993ed244c66f5f2c25158127a12be269549e3911538b8dab218f"]; +"TIJ-04" [sha="809ee8814a51b040a0e7ffaf3d511e8d9e1adf06f2e5b815bfc2bb7b97ed6c53"]; +"TIJ-04.1" [sha="4990ebe15f6c0341b888d153e9ced2f5c701a370543a6ceebd9a2e20efa8a5a5"]; +"TIJ-04.2" [sha="b027882517087cf57c1dd648b63164e186a19d2bde01b3dea7f538866bc0abe4"]; +"TIJ-04.3" [sha="85ebfb650bbe74e9a0c6a925c65ea0538c0a26feed7f70ae23f6d1bb8c527b24"]; +"TIJ-05" [sha="0d4292965afdeef6ec0ac46f3cd5046f94d0bdb6d97019b9d66f18581af184a6"]; +"TIJ-05.1" [sha="13f7c5e1061e5c9c5cc6e1db2c174742094756b744e078ea68b416d48291b111"]; +"TIJ-05.2" [sha="9305d4402b18ee18637160134d9f081ec00db70f688800e4a5cdb04e404c0e77"]; +"TIJ-05.3" [sha="867bb999a85e11651361fc503d5b9dbce8f02c0df1344b17a36e4f4de076a2c9"]; +"TIJ-05.4" [sha="2f732cbec8e0396b36f949d2296a215839a0fc0d5b33eab94c4dd86fa5475d8c"]; +"TIJ-05.5" [sha="3c3d25a4bd57eb60169d67731ba401770cfb5f7d82486a5eaaf041a0a18a1703"]; +"TIJ-06" [sha="bad18df20df71bad0ab129972668905371100cb1fb5a8e41b6cee32c983757de"]; +"TRUSTABLE-SOFTWARE" [sha="6513a163e6eac72c930253e4bc0404be5ea625d1dec436104991a565df4baaa3"]; +"TT-CONFIDENCE" [sha="507f0a905667e1d91265a4e577534eb51ebc0b3e84ffc1956cd0d80dc61b6d3a"]; +"TT-CONSTRUCTION" [sha="3752c6a34c0cc3ef905e000e635a7373748976744f3d1f75f1c2ed172366e350"]; +"TT-EXPECTATIONS" [sha="362eb86c872fb76b2a1075ff978252112bbad0a5fb3041895381f8c76b64c5e6"]; +"TT-PROVENANCE" [sha="4607bf9c0527508673fa37c110b9cf5f2ff0567c324cf728623f1e8ff094be32"]; +"TT-RESULTS" [sha="382987171ac6dc0d1114f5c0dbb77452300c14596514bbab126ae1f7d1bbb842"]; +"WFJ-01" [sha=f826a622e19578c15e165ae5d6b1e31f8ec850140a2f1ccaf513f850919d33ee]; +"WFJ-02" [sha=a3928233d7b8f5803696e469a337d19072680a06480a898471928b5ebe55d98b]; +"WFJ-03" [sha="761472e4e063ce91d94004e258aa652b5352abb41e8f06ffe20eaeaf10773e1b"]; +"WFJ-04" [sha=b3f70654d2af3deb23a1ae36f98c74bf0407fdc096e4c0cd3b237656f48caae3]; +"WFJ-05" [sha=b69dab4f37d6ba3456e2e5cdd7ee876bac9adfe41867e5d34466a6f378d5330b]; +"WFJ-06" [sha="4d2cce160d2ba9411250ecafed7860f1dd61974ad3cbde9e06a47b3160b5df82"]; +"WFJ-07" [sha=d5574f1c3e4d3c15c6d5bc517cfa4f345f06625fb1688a185a1dcc22e7149df7]; +"WFJ-08" [sha="6897f40df85da4d74a61476dc4403d56c6df324a3c6fe9bd984a6d20941e479c"]; +"TT-CHANGES" -> "TA-FIXES" [sha=d9dc2ad1dcbfde839249e8df9eb89ef978bdfee7b7b4933fae12e10cbc91762b]; +"TT-CHANGES" -> "TA-UPDATES" [sha=f8948db2f344f4cdd5bdc71dc54e63b446f40af09235c37f5d5cf59dcfdfbfa0]; +"JLEX-01" -> "WFJ-01" [sha="11b32bc907d7d32480c79e5254b00d0085e0c0004cc6e6a181956193b4b2994c"]; +"JLEX-01" -> "WFJ-02" [sha="2aff8463b59cb1ad59ad3c2030c56df5310d89a481b74ee57bcd51301301a554"]; +"JLEX-01" -> "WFJ-03" [sha="42bbbf1cf290f97754ead132781123cecac0c82fd7c64bd573482ff7b9c4d977"]; +"JLEX-01" -> "WFJ-04" [sha=cf884c5fda5afcab2344ddcbe104e150af10a928c489253766e5110d4dda0c00]; +"JLEX-01" -> "WFJ-05" [sha="38075a8b1ad506173bbbe9a75569e3ecb8fa83c791813aeef538dbb96fffaf44"]; +"JLEX-01" -> "WFJ-06" [sha=c4e0b72ab4cfa28cbe062b6e9f2fec5ddf3269da84b469635c041595605fd7e2]; +"JLEX-01" -> "WFJ-07" [sha="3a340a4a30452bfd3149680d4f83f136f7f71237c7cfa16d38e9a4ac6b58a512"]; +"JLEX-01" -> "WFJ-08" [sha="80844115bccc3a6e031decd0d622bb577e62b2c02a991de38a86092a451f2f66"]; +"JLEX-02" -> "PJD-01" [sha="68498a21fedc131c632ea01e9940e3e31ddce311abcccb2326c12961512324aa"]; +"JLEX-02" -> "PJD-03" [sha="85f84739619779248e1d3008f46a076fd8402b1f49b57c2c0bc7d061ac16dd85"]; +"JLS-24" -> "NPF-07.2" [sha="cf75897c7913bf41c032c8cd77f4acd3a0309202751ecc6924382733f2564d83"]; +"JLS-24" -> "TIJ-01" [sha="45ca0773bac381a23a26760394c77a8ee56038c50efa8e163ea5cc9e33d44cac"]; +"JLS-24" -> "TIJ-02" [sha="79c6b04115088c9e1736d97659efd0f8f1b492f023968d31f0936f3ac70a4c34"]; +"JLS-24" -> "TIJ-03" [sha="cc695a497c23efc144c236df2dd170e550ea169de043debe07aae24b0c233385"]; +"JLS-24" -> "TIJ-04" [sha="13e8b6b8802b2caccdf3ce89dbb6fbb645688888e886eea3937643e7b0479a24"]; +"JLS-24" -> "TIJ-05" [sha="75980155c182dcaa3298cf2fd6cd8d328d31ae081c78e300cc75a51b0136ceff"]; +"JLS-24" -> "TIJ-06" [sha="9a1ac607f2051801a39ddab325cb6bbcbc178abebfa8e1e6397c12cec99d531b"]; +"NJF-05" -> "NJF-05.1" [sha="05348afa175a4660f04bc0ac52fb14753af07acc3f62bb6a5309bbf9114a2110"]; +"NJF-05" -> "NJF-05.2" [sha="a78527f08dba706b3ac22d9721f746f687ad81dfc9df5a7700625c7ff964b0f1"]; +"NJF-05" -> "NJF-05.3" [sha="79b6420d97afeaf3838359a84be73b6c9d21f1e8c78ef9ef2cc6619d35e198f3"]; +"NJF-05" -> "NJF-05.4" [sha="0c65e0ee65a59e48fb27d23657e81991bc7d113c61e1b2de0b729f16daab644f"]; +"NJF-05" -> "NJF-05.5" [sha="132c0af8afd6315f760a7fe31648bbfb266abdda4accbb062d7fe9cc6757086c"]; +"NJF-05" -> "NJF-05.6" [sha="fb5d80d658cf7419ad504d09f4c906c575e97d58f78bdf77f3b7e28796e7d783"]; +"NJF-05" -> "NJF-05.7" [sha="0752b6fde78b30747ad945074509664220e408751191a432891b012b7b264a17"]; +"NJF-05" -> "NJF-05.6.0" [sha="7444739bb6eaf619dc2a9f296183f7d19222e21d986225aa65f1008d29b3a6ad"]; +"NJF-05.7" -> "NJF-05.7.1" [sha="1574b667671beb106d90ab9b271c7b427b457001237c1fe4c611d21c1b4f51a5"]; +"NJF-05.7" -> "NJF-05.7.2" [sha="c251be714d3fb6c41757c85738a7484726745f3c523663e121eb40a180b7f5a7"]; +"NJF-06" -> "NJF-06.1" [sha="80e228e2ed380f48e691e5516764d666e074736f913f7482e6c2c0e43fb3792a"]; +"NJF-06" -> "NJF-06.2" [sha="95b2d686865e9dba9bee2be3768557f6905e6b2675a0efb55bdbf5b2aae6fc65"]; +"NJF-06" -> "NJF-06.3" [sha="2994c95618432d4abf2695775d6aee747f7c527e6df1cf4887898273dd7c41b7"]; +"NJF-06" -> "NJF-06.4" [sha="aec981ed17b9b23ebd809122d8fd5d276624b55107298faac6788594c7421b15"]; +"NJF-06" -> "NJF-06.5" [sha="bf35b17b04f1897ca9cfc4d8a7cd7e6ebe5cbbe2a0cd14526d8b2f8941a51824"]; +"NJF-06" -> "NJF-06.6" [sha="8da992a6bb39172ea45fc1b880121987c637c53fe562487e201f84520369adf1"]; +"NJF-06" -> "NJF-06.7" [sha="a01b7e6a9bd301fcfe13beea3d1602210244f5f3d2de348d5233f9e0b24584a6"]; +"NJF-06" -> "NJF-06.6.0" [sha="56a38b56749195c0c886e0569c72ed22a2ad9cec707210aa9076e18f1634a2f8"]; +"NJF-06.4" -> "NJF-06.4.1" [sha="b01406356bc5aae7904e78201fab9e3c369ca39283eeabb00ef86fcd1af81628"]; +"NJF-06.4" -> "NJF-06.4.2" [sha="158172e866a77da9c70cbab9c1a61921dafb25e29a5cd8d9c3fe5bcdf15ab47c"]; +"NJF-06.4" -> "NJF-06.4.3" [sha="fb0041b3763ae322685954685ca786758131399a062a785574544bb52424a024"]; +"NJF-06.5" -> "NJF-06.5.1" [sha="fc0bfae84fdbb03dc8b6ad998be0177246f5c682bc9c6ae57575ab4c935a9ce3"]; +"NJF-06.5" -> "NJF-06.5.2" [sha="d5569e956ed2b0d3f525689bece7ac9288569d84e0e22899a364bd911f7719fe"]; +"NJF-06.7" -> "NJF-06.7.1" [sha="0871da49bfb6899b6f06531b34c0b5688b4e536370d5f4b6111841e0c5ada02f"]; +"NJF-06.7" -> "NJF-06.7.2" [sha="f61891fd58eedd99a89123f587ab92ffc0031fa6f8117998f8b6661aab3177b9"]; +"NJF-07" -> "NJF-07.1" [sha="1128cc6e5480d3725aaa37b42ccd78988940f61e5568d9adb20aca167e41e0da"]; +"NJF-07" -> "NJF-07.2" [sha="abb047c901a6547122aed8d47043588b9e7d5f4b6ab5f982d2ef0a2f535fadfb"]; +"NJF-07" -> "NJF-07.3" [sha="3585861b5e53bbf4ae11fa0215f45e4634069ac723af7014bf67ed61c8e9dc99"]; +"NJF-07" -> "NJF-07.4" [sha="3ecdd4d25bd59c5fd936281576f4ce023b3f15368d6427d154bc25c52d17e4e3"]; +"NJF-07" -> "NJF-07.5" [sha="d845ee8e44fc50c7860ad0ea4e45413e475f80d0654c724a64178c4c620191b3"]; +"NJF-07" -> "NJF-07.7" [sha="dc9c9c7f9c57528f6c374755f9ed2e9fcaea16e6ba86cd0155175e4f976565a4"]; +"NJF-07" -> "NJF-07.6" [sha="c5b49e22c215af56c57f80f20b75981d7fd4d900a56921bf749c4b637e19c6ad"]; +"NJF-07" -> "NJF-07.8" [sha="b9932dbc8a10513f16b34ff990489199bb2cb983390328d126450709c20a8ee5"]; +"NJF-07" -> "NJF-07.9" [sha="eb17d78a7a040f84affcf45ae365210641685981371c86628aebe57d1a627efe"]; +"NJF-08" -> "NJF-08.1" [sha="07b82c43480ff1bc6fa40e12cf3c683ac0f325fdabb7e9fcec119c93d4092ad1"]; +"NJF-08" -> "NJF-08.2" [sha="ccb4af6360e3bb15ccdb2765a2bbfd83f3dfce714b486c8380313dbdeaca9da2"]; +"NJF-08" -> "NJF-08.3" [sha="a375b6167407d74251ff115582349d1607dba60202311a314b24e3e93f58cae5"]; +"NJF-08" -> "NJF-08.4" [sha="a385bcfb290b065c7f7056a39379171746b4a093df4c32254e4038134edb8fe8"]; +"NJF-08" -> "NJF-08.5" [sha="4c6cf0a968a6acde00b35b5a3208d7b5018d11f2921bb33315fa0d29c661d21f"]; +"NJF-08" -> "NJF-08.6" [sha="9aa3893617810dcaa38d0d8a1db8427d2da6ad80e4d742cc11860d29e8117162"]; +"NJF-08" -> "NJF-08.7" [sha="f1d96f513fce39d0dee8d1b539d8638323b054bfb9e83d7c30d7cb1989e7827a"]; +"NJF-08" -> "NJF-08.8" [sha="f7a1eceed3eb8da5d64b2957dbd091fbe1ebd3e96018347897085e7a3e8e471b"]; +"NJF-08" -> "NJF-08.9" [sha="e97225da45635a8b6253c2c7b965b3293f09c90e777bc4b437bd2b5a231cb2de"]; +"NJF-12" -> "NJF-12.1" [sha="e8a1ecdb2b9c4c9b3913d3dcb817aed3b7e9ff8c9e3cd73e1b3f8266738bdb50"]; +"NJF-12" -> "NJF-12.2" [sha="5cf23fc24541ce1845c0899245620913c9939bd44bce6013d2adaea6b95b80df"]; +"NJF-12" -> "NJF-12.3" [sha="40d00d9cb85016409ea48bf11c0a7824291c40b04f1e648e01938b3789cccd67"]; +"NJF-12" -> "NJF-12.5" [sha="05d1951c1b9c63e9655ba3c0c4a49a1f82e2783aacc14c02bb980734777fd9d8"]; +"NJF-12" -> "NJF-12.6" [sha="6b297494f88a48939e1ebf94f583778e4f059768f278f9553337b3fed542e3cf"]; +"NJF-14" -> "NJF-14.1" [sha="6fc71638e5661f92f03f999112cbf9b1238ebb0d7127a5c9049fa434f46ab10e"]; +"NJF-14" -> "NJF-14.2" [sha="8947536abc1b26ba16a889085e873dc0404809637862034a138dd2e4c8713e5b"]; +"NJF-14" -> "NJF-14.3" [sha="bfe21b5134b6fbe050a89af3c20916a0dfe302af7d4e475ceb78e7619bcb8499"]; +"NJF-14" -> "NJF-14.4" [sha="43620086926cda5d04bf77274d1098046b37cdca6f8e7f6ba3c9b0f87f582ef9"]; +"NJF-14" -> "NJF-14.5" [sha="e9d5a492a22dd6116a7ba99ed52ab38fcfa0711f4d78517582da50741da5daad"]; +"NPF-01" -> "NPF-01.1" [sha="1ed82ec30ff344a4ad557def3adb08258c1aacdd9b547872542ed6e123812dba"]; +"NPF-01" -> "NPF-01.2" [sha="5a0266ba21a00bbddb092bf6be51bf186792ced77647748ca1fbb7f03a29f75b"]; +"NPF-01" -> "NPF-01.3" [sha="c97acccaf495dcac221f13d9c9efa5750e54d15b16c18afe2d4f6eda1b48ce1d"]; +"NPF-01" -> "NPF-01.4" [sha="44254213ddc3ad5a0ca4fabacbe6d3d1affdbb72de9e75cb22e16cde6c7c0e81"]; +"NPF-01" -> "NPF-01.5" [sha="7c4da10ac02b9ff272e3ae603249ada8ea12b011678cebddd2960d4de5488c08"]; +"NPF-02" -> "NPF-02.1" [sha="e585c16ea12ceba7e45349a863dda48dfdb8c538a8a89c7bac005a22659bcf67"]; +"NPF-02" -> "NPF-02.2" [sha="740436159f539a20592e3a125ac53fb803f7308cef6427960ff8a5a240c3b1e9"]; +"NPF-02" -> "NPF-02.3" [sha="8bbb8434d35dbabef9aed3b259bc18c2948bfb939f4a16c687d6398a56edb33c"]; +"NPF-02" -> "NPF-02.4" [sha="a9c9ff12fff921cf4ca7ec48e47c36d6f5d1793280ed83979c1760450a2d075c"]; +"NPF-02" -> "NPF-02.5" [sha="e49f51da6cff1776cb5ae9334e965936ad8bf03f121f67959fc0e72082de38b1"]; +"NPF-02" -> "NPF-02.6" [sha="f9f443867b2d409022886cdbe313eba558c3eec74c651060c8bc4b2341191a11"]; +"NPF-02" -> "NPF-02.7" [sha="892124444a73560a57b10331105799a41c3b4412ccef33b7c5d482a44668b7c9"]; +"NPF-02" -> "NPF-02.8" [sha="4761f7058756c1442363d34153090e64620ffec37e654a4ddc5ab6b2a0b7f5d2"]; +"NPF-02" -> "NPF-02.9" [sha="991cab68071d5ffad145936fed7101689f1f47f12c75521b5ba3d16d50cf6868"]; +"NPF-02" -> "NPF-02.10" [sha="31d102a9b50cce35ed04b5216c93ed5345af9a4a998feddb839cfa3191717b47"]; +"NPF-02" -> "NPF-02.11" [sha="d0c88c641531c561674336c9aff4ca5ede8ad135b3de10e1dcd258ba3e488f25"]; +"NPF-02" -> "NPF-02.12" [sha="659b5b34ac95d660cc57b8265ead6bd565262ea03b8afe52994fa87237676bf4"]; +"NPF-03" -> "NPF-03.1" [sha="3381ff8d6767c4c112dea54eac85b442c295de22f4820b38f7f7e7e5f619b1e7"]; +"NPF-03" -> "NPF-03.2" [sha="1ff53239760c6426e1bde2b89c331777fe84079a9522cc6d585cbe675ec73b7e"]; +"NPF-03" -> "NPF-03.3" [sha="e0274a4aa11bf7e56e1cb111e451b62c156764568b4c7c34371cf93a7773c917"]; +"NPF-03" -> "NPF-03.4" [sha="4370c109669cc313ca7e9dccfc9779699c334dee4bc048c3c4f2c181dec30422"]; +"NPF-03" -> "NPF-03.5" [sha="56def8fcbf683620568a7418c081a66af1eed9cde258722423a8eecf25e50f78"]; +"NPF-03" -> "NPF-03.6" [sha="38f040654c11539a3b40e163e47f1376ad897a9dc7c5aad33a0a8857eaa237c4"]; +"NPF-03" -> "NPF-03.7" [sha="9408c9acd90ca8e153bf0045ed6d7a8e55aa2360093051063536eb316169c7bd"]; +"NPF-04" -> "NPF-04.1" [sha="3aa28253b8e7aa18bb70e9770c56747830b2ef7cc2493c9fd51174d1ec9d2ac9"]; +"NPF-04" -> "NPF-04.2" [sha="7a3eb901b4d08cf88d22feb3cee97fc657f69c2a05674ccc34bc240beba8bc4c"]; +"NPF-04" -> "NPF-04.3" [sha="2fad2f3882e079b193606a5572054b808c55534d4d2d7f1bcc7d15914521f6ca"]; +"NPF-04" -> "NPF-04.4" [sha="8945a7551af938401ca0f6e14f31dfffd54006d10063f0ace528c528ff80dc90"]; +"NPF-05" -> "NPF-05.1" [sha="6a7724f03f063f8f26a82bf32d3df188255db398c0b77ac1357dc54e3c2c4936"]; +"NPF-05" -> "NPF-05.2" [sha="114f40606a53c3db1855cd395ea05923500fbb6bdafdc9b789d7f6912ae381a4"]; +"NPF-05" -> "NPF-05.3" [sha="ebc924da19728fa082de7b7eb0f475e158ad692d9491164c21a2068d73a125fc"]; +"NPF-06" -> "NPF-06.1" [sha="89c07cc7d98e87e92bb36b904d0a7871f92ea73edc28a85d50e8330a4215c28e"]; +"NPF-06" -> "NPF-06.2" [sha="e70a4b33ab3b17341b030dcdbb21d65465984fa8dcba120e515fc3b72bff2cfc"]; +"NPF-06" -> "NPF-06.3" [sha="b839c997ff91c73ed3912c2587603602b6e71ecc946c1ab7747f5869c0937c42"]; +"NPF-06" -> "NPF-06.4" [sha="54f50e706e9efd7c7850f9e16f803522f016308a7eaeb436f41c62c877bada07"]; +"NPF-07" -> "NPF-07.1" [sha="4e9d52c62edfcf9d7ce63f2a43169bf08ca74006c4cd2cc135ae6adae0f14be2"]; +"NPF-07" -> "NPF-07.2" [sha="96f909184665c1931e931b4d899457f0e860580d0d466f9c5788b0e103d5c232"]; +"PJD-01" -> "NPF-01" [sha="42e8865c09d75823bbc873a9cf5b6c4d22a5a2ca50173538985680599af5fc2d"]; +"PJD-03" -> "NPF-02" [sha="93bac5a2600a55fab08a83b929b0c413b467c517b5335a64d7b3cf453696f132"]; +"PJD-03" -> "NPF-04" [sha="cf8ab9711b44c48641b0eb80aaa89813bfbc1d03d13c0f89303f9d570ff1cd26"]; +"PJD-03" -> "NPF-05" [sha="f98b9078f5bc79010b4a1fadee1d11b8c7304b42a86d9b0ed8778f6e805a3189"]; +"PJD-03" -> "NPF-06" [sha="485d51a5a36e33e3c058d5e810b4070e20b2b6e833b6f3f3e23a7a5cb8f66567"]; +"PJD-03" -> "NPF-07" [sha="d57e01f56da2038315b5bf19f8372f0a5220a64a3c39054c64dfa0095c74add2"]; +"PJD-03" -> "NPF-03" [sha="f9e040e4e39bbde5db6927301afa3119c16f8534abb2d5497d1a6e07d4188b33"]; +"PJD-03" -> "PJD-02" [sha="34c3d9163590aec428486eef28fa3895a3c425b2ceb826d3c1d592d34eefe14e"]; +"TA-ANALYSIS" -> "JLS-17" [sha=eac76c7d417f623d9dd1a220fc0b3ec6e2c2b9e16781eefae6d21dbd35d1402d]; +"TA-ANALYSIS" -> "JLS-26" [sha="250c8c20314a242c1daac1278ef5a96d97bce8d943318430a91ee30580b5b29d"]; +"TA-BEHAVIOURS" -> "JLEX-01" [sha="8cd931ef61b7012140344adf54469e943bfc690ee54f12db12777464880061db"]; +"TA-BEHAVIOURS" -> "JLEX-02" [sha=cb26451e31a56b1eb51a4d45283ba4a7c6e898efbd045b59cba10d3c6aa093de]; +"TA-BEHAVIOURS" -> "JLS-03" [sha=cf9211c07452914cb2d0b455f859b26cb2724423eae5187e8cbfdff06d1b5ba3]; +"TA-CONFIDENCE" -> "JLS-08" [sha="506164051180023c8533ea1f6dedf1bad894c3ee6020ff16b002e33b109c2791"]; +"TA-CONFIDENCE" -> "JLS-09" [sha="80bbde95fc14f89acf3dad10b3831bc751943fe4a1d79d5cbf4702416c27530f"]; +"TA-CONFIDENCE" -> "JLS-20" [sha="1bfd214ab8186a3c095262ae503451b8d71ada8db5b13ecc7b906739a05bc102"]; +"TA-CONSTRAINTS" -> "AOU-04" [sha="9466008edc5257d5d6ad6cae05eadbd7e6c63ed10f45f9bbe9166dc5af5db294"]; +"TA-CONSTRAINTS" -> "AOU-05" [sha="ead38077bd84ce52bc7ce9ab1be36ef6d1b62aa7bd30b2a5d5eea3aedfe9da3c"]; +"TA-CONSTRAINTS" -> "AOU-06" [sha=bb3ac58ca7f67d9676503a6c71660abd650268e02d6773cb57dfa07d0743fb40]; +"TA-CONSTRAINTS" -> "AOU-07" [sha="20a82289bbbdf1a9a011afb8b3100c8c14813d36d8bbc019ad017b1b4f4917c7"]; +"TA-CONSTRAINTS" -> "AOU-14" [sha="2fbd87f1b62fa9716033289b62dd7ce94d6e6c8ae5af05e788089f0868ad2dd9"]; +"TA-CONSTRAINTS" -> "AOU-16" [sha="c282ac22782f79b8f05c90d0c19193d7843b5e76dbdf6ed3d7efe6e0055b90be"]; +"TA-CONSTRAINTS" -> "AOU-20" [sha="2827fadc5577feceaad0eec452dd2e561cadb3a9161fdcd40b126a049a36a772"]; +"TA-CONSTRAINTS" -> "AOU-21" [sha="f2624c417be0c4cbeaaa87df2116c0f75ac1a5e7837d6a40b1625a816f9397c8"]; +"TA-CONSTRAINTS" -> "AOU-01" [sha="595da090bb31bf9286c8bb439dcc0cc683ad44639dbdd723dd2cb18d23b7402c"]; +"TA-CONSTRAINTS" -> "AOU-02" [sha="6c0717196dfa3eb65a762e157111e3e070204159c3f73dfda1cb5571d77d5806"]; +"TA-CONSTRAINTS" -> "AOU-03" [sha="6f2926b8706b8c16f572643ef2dade3a98c4d4a74b559e6c54a168ccd5812b72"]; +"TA-CONSTRAINTS" -> "AOU-08" [sha="9a882832edbca00f96ff5ec4d3d0e52632e4ec479bd536e8b7af1e0faeed9b1d"]; +"TA-CONSTRAINTS" -> "AOU-09" [sha="b47001a26392bd151e4a03feccb198c74fdfbdfaeb055fd682b59e6b92f0bed5"]; +"TA-CONSTRAINTS" -> "AOU-15" [sha="936c697c984cfadd07db6e2eef5a3f5f540c57d099c7a773c00e551ea15bdf7a"]; +"TA-CONSTRAINTS" -> "AOU-17" [sha="b9866189c44d25116cc7cd47432546f8ea2b3aee0b493b364a89872e11957a69"]; +"TA-CONSTRAINTS" -> "AOU-18" [sha="ca8930b0d06ea3987a85112508f49cfaf1cf77d9a405535f3173975175f15d78"]; +"TA-CONSTRAINTS" -> "AOU-19" [sha="ef2ca2a552493258538bf72c975d4c0ec711cea193adb804765feea86382ec0a"]; +"TA-CONSTRAINTS" -> "AOU-22" [sha="54608fda93406fa3546c2d38ef09d34f7252e2c40bb6bc982decf762ab1c68f0"]; +"TA-CONSTRAINTS" -> "AOU-23" [sha="6a9aa9e8b107c684e68b21b44c363185c5f0668c39307a00758781d65f3e61d4"]; +"TA-CONSTRAINTS" -> "AOU-24" [sha="f75b547858afa4495079cc6c7045603b4775308258710ae92141efb696a72505"]; +"TA-CONSTRAINTS" -> "AOU-25" [sha="68410751488de4c1d3c04505bcc58eaabdce60cda5169843b80aff5697567258"]; +"TA-CONSTRAINTS" -> "AOU-26" [sha="4a2f5289772c2edf42e724049882ab75620e2655aa455314a651ff9566e5b70d"]; +"TA-CONSTRAINTS" -> "AOU-27" [sha="20a7c1f193b7ca51e90d90fc1ea91f5c38c0c008bb236f97b9137c250e038fb9"]; +"TA-CONSTRAINTS" -> "AOU-28" [sha="049d4d8cbdd75d679f8855f2733b3781f7b6b8d70d56f4fecffd5951fff6063d"]; +"TA-CONSTRAINTS" -> "AOU-29" [sha="278f1a8cacd32b255fafdbac1db71a692029a838859bb7c7227435fd85298ab2"]; +"TA-CONSTRAINTS" -> "AOU-10" [sha="54e2cf65bcc7544eda66fd5aca9763008a094dd7389db2463b28c81e1c586375"]; +"TA-CONSTRAINTS" -> "AOU-11" [sha="376c75481202bdc4c6d8f9073f5173ab307859b0495735b204de05e58ef2742c"]; +"TA-DATA" -> "JLS-18" [sha=af2567d28552ab32643e995a9c9c9fd73b2208ce3d5255fba333c8ddd28f72a6]; +"TA-FIXES" -> "JLS-05" [sha=ed0acae690ee7d09884c0e7516c9d2306c646c7d14423d4b69719e9dfa49dff6]; +"TA-FIXES" -> "JLS-04" [sha="873b8bbdac8d45772d9b2d46adbafe7ab75b96a45ca12a15b34bdf813b0bb1df"]; +"TA-FIXES" -> "JLS-11" [sha=c0e08a276ecbf04e06b17803e5dfbcb39689ccc0a92750caf968b612f64cfe4a]; +"TA-FIXES" -> "JLS-30" [sha="2d27bb7a8d115044c2365a4a5da9d8ec684de189cf905b6d36f7070a4560c3ed"]; +"TA-FIXES" -> "JLS-29" [sha="90b02f12c24f139d76067af519460332bffe7a6838aa95890d7c63a2f91ea6e0"]; +"TA-FIXES" -> "JLS-28" [sha="9d873f973d068b1f171bb71a49afdaad0aa1fa910d5103632645d6a228d0b7a4"]; +"TA-FIXES" -> "JLS-33" [sha="965714f76be3c687658309b5e02b22229249e76e8842aa55eb22e061302cd14a"]; +"TA-INPUTS" -> "JLS-04" [sha="262db6d430e99ef3a23645c93a1cc5bda1270ceba90b4d8cccb40b1eb85e9860"]; +"TA-ITERATIONS" -> "JLS-10" [sha="6e77b132d4159d65e261e90466537dbf44edc643b44c0671b8c40b994ef08590"]; +"TA-ITERATIONS" -> "JLS-19" [sha="9bc13b823f8b49d742b92a8aaf18b8aeb2bb9b0749f4b6dead241af85aea876c"]; +"TA-METHODOLOGIES" -> "JLS-13" [sha="4e2fb7871a608c98d11b10f4ca4391d69b360419c6a9e1baf7cb40b980fc9e94"]; +"TA-MISBEHAVIOURS" -> "JLS-02" [sha="532ddabfefb6664d9731084a44df220d1ebdb9f840760d7c471cf04dfc8e96ef"]; +"TA-MISBEHAVIOURS" -> "JLS-24" [sha=e8de01ff7c316debcd96afa4b3b6b62be73522e4531214c18b3ad7eec826275e]; +"TA-MISBEHAVIOURS" -> "JLS-25" [sha="56ba396580f90e5a10fd5adfe33864921537d47e21b215a8faf531855af40ecd"]; +"TA-MISBEHAVIOURS" -> "JLS-31" [sha="ff3352e20146a81904da6d8d94b003b4e0acbc2a8a63a73ea017ea0535e45e79"]; +"TA-RELEASES" -> "JLS-14" [sha="1202b9934353436fba927de6762cf62a8fc23ab0815a3c06f9d0a77b55830720"]; +"TA-RELEASES" -> "JLS-21" [sha="5d57d2b547a841bb31f29034b785d9bec1ffb0e495d80e0e356a54391aa22e1b"]; +"TA-SUPPLY_CHAIN" -> "JLS-23" [sha=fe2b810e22c4da9911266183bc8679a56d8dd2d5a76624cd1f3ee329d9b93a08]; +"TA-TESTS" -> "JLS-16" [sha=a4143b13d9ae2553534457603bdca9beb6cca0ee5b8b9bae50cefa97d2519702]; +"TA-UPDATES" -> "JLS-06" [sha="7386ba4dfdca14a2b0c73b6b759ddeee66e0196f164322d552c2867e5c7a4b96"]; +"TA-UPDATES" -> "JLS-07" [sha="9256bec79e828b44dd12d4298483bbab7ab24a1eb542c133ee5392ee5829cb7f"]; +"TA-UPDATES" -> "JLS-12" [sha="45b7cf8eebee7a35ba39b3f990cefe3cbfd79c5f74415c5321026d64d89f5703"]; +"TA-UPDATES" -> "JLS-32" [sha="65fb597f506b9ed6aa4ba992256b75c1415cd2237d6744e33f0ce5809b06a011"]; +"TA-UPDATES" -> "JLS-35" [sha="08116185564ed13afb24d006b2c36349ed96f16d8d3e4ec64997b0b042b04186"]; +"TA-UPDATES" -> "JLS-34" [sha="5bdd9ef19c9351c141aa86cc34ded4898b63d0e4e062041c2a6915a503aa5734"]; +"TA-VALIDATION" -> "JLS-01" [sha=a3de6195b2fd041851e2e7b53376274fe5f06bc9642812dbc91f80dfd8763f9c]; +"TA-VALIDATION" -> "JLS-22" [sha="464509465b1cf7045e3f94bbdca6dff5322c52f6a404d5cf02cc7249ce9bb75e"]; +"TIJ-01" -> "TIJ-01.1" [sha="59d60a1c49c4a50d24614fc2e20e4a6304200fc31a3b75f8b77937f79fe838eb"]; +"TIJ-01" -> "TIJ-01.2" [sha="67949547e939ee5e88a515afe3f8862082b412693a5a2d04e3d7ecdddfe6b0d5"]; +"TIJ-02" -> "TIJ-02.1" [sha="3585100ee15a42488fc47f0a86339dfd939d8f6029055f6cf138ddda63fb1924"]; +"TIJ-02" -> "TIJ-02.2" [sha="fc36ff21c12e722fc7abedd8148311713a177a74e149a3775df009ec6a1aab34"]; +"TIJ-02" -> "TIJ-02.3" [sha="894246db4b2ab1e1e3ce18cd7061b265e4691e79b7516e715272e932389a3ed3"]; +"TIJ-02" -> "TIJ-02.4" [sha="3c05436c7259164191c7b6d1199f9090af647ad174b078a616c278a325ad10b8"]; +"TIJ-02" -> "TIJ-02.5" [sha="2bf89cbe38de39f20824eaacaa7a0b60a82c43857af240773537ea76550fc892"]; +"TIJ-03" -> "TIJ-03.1" [sha="7b6d51ada9e4a55eb3c71f3492e9699b72d531fb82e0687b1453664ddb54c0c5"]; +"TIJ-03" -> "TIJ-03.2" [sha="9696c704d877ba54dc00c0bfb698293932c19700f1d82bdda698df14d404fdf8"]; +"TIJ-03" -> "TIJ-03.3" [sha="f9fef773d4db075691fb41c5e42417fcea1913421921bd75728e2229dd241d9e"]; +"TIJ-03" -> "TIJ-03.4" [sha="078e925cde2e621cd1d2fc2ee8e89d779d930dc89d4157770d73ce0ffc3ec20a"]; +"TIJ-03" -> "TIJ-03.5" [sha="874322f75effd246c1ef72f1feaf18b15a6eb1dd9993a34fa92ca329eb1ac9b4"]; +"TIJ-04" -> "TIJ-04.1" [sha="2d66c3727cb4f1840c6b02ed81cee131dad6e35211e9dd67e1d5fc5b29584904"]; +"TIJ-04" -> "TIJ-04.2" [sha="91fee756f8825e2ca6a3f8e49069186c18397f686e52692ecb084d214070a26a"]; +"TIJ-04" -> "TIJ-04.3" [sha="0079068171fee85617e46d2ea0648fabb684cddf03cee55f85db072611baac61"]; +"TIJ-05" -> "TIJ-05.1" [sha="1fde4df327707bfbd1c6f02b1efdc06b6f157d84234ece79a31e81f9494f2201"]; +"TIJ-05" -> "TIJ-05.2" [sha="c53f4206c38c4f905d205305cc1c62e32172ff0b6375d4847a14a4365b502c97"]; +"TIJ-05" -> "TIJ-05.3" [sha="6a3a7b60f4c973273b2263a4f79c487d242041bab0ab456296f7e6081f7a5b6b"]; +"TIJ-05" -> "TIJ-05.4" [sha="e895a3b5b79b6e6e73d832aab9f3b70fc9988f8c29fdc69f87ed8596b2a0b401"]; +"TIJ-05" -> "TIJ-05.5" [sha="972363166eccc1976a0cc2e69299c330a09d94ff4eab5e00667ea4f61054cdd4"]; +"TIJ-06" -> "NPF-01.2" [sha="32bbca01001f8b499b82fdf8f2d7923e0c929fe86961f2a2b9921767e61c40ec"]; +"TIJ-06" -> "NPF-01.3" [sha="1cee6056c286cca8d6d88d02b303c0c7039c920e235c3b82a89000ca254f3b29"]; +"TIJ-06" -> "NPF-01.4" [sha="f8a091f539e9b35be1eda5aeaf8eb166f1d56577ddc8bb4e46a07df514285a02"]; +"TIJ-06" -> "NPF-01.5" [sha="ce5b5a4411736a0483e752bb5b698d857529b1d641293ab5bc4448e055b99d47"]; +"TRUSTABLE-SOFTWARE" -> "TT-CHANGES" [sha=a526e6de925b57edddfbc350de334735ee7ef23828b9e66ba781e8633c9f72df]; +"TRUSTABLE-SOFTWARE" -> "TT-CONFIDENCE" [sha="07cdcfab2c8c5121dd0acecf3771ee674dde8663e4cb335cfb74aa774f10cc5b"]; +"TRUSTABLE-SOFTWARE" -> "TT-CONSTRUCTION" [sha="8598c4138e9dda4691a3cbc1613530bb1a3f1c163edf523e41a9ba532b98fe83"]; +"TRUSTABLE-SOFTWARE" -> "TT-EXPECTATIONS" [sha=f6dba0c755d9ac4c9ed0ed2e08d5d51e6f7f1572e6de5581c90fbdaf3cafa4d4]; +"TRUSTABLE-SOFTWARE" -> "TT-PROVENANCE" [sha=c97824acbd35cf2b4a9e4ee2f66c46333b483eac99ef690e2bb105ef4756e527]; +"TRUSTABLE-SOFTWARE" -> "TT-RESULTS" [sha=b9e5b5fdf1cda120574cd2f351e9876a0a0c683152267d3898e6c161e7bda988]; +"TT-CONFIDENCE" -> "TA-METHODOLOGIES" [sha="5752e4930e6b0dbc6829b053f4bc7e7e054d416a8c9b2e19a1c3dd83d51fba9b"]; +"TT-CONFIDENCE" -> "TA-CONFIDENCE" [sha="2eaf5b9e879128e866585d5016bfde73f1ef1b192915fdb988cba7b6a0e679f2"]; +"TT-CONSTRUCTION" -> "TA-RELEASES" [sha="290d67048ce0b7e9d40d236b01fc79305d3d49d2c4a541ab3fe48d38347d45d5"]; +"TT-CONSTRUCTION" -> "TA-TESTS" [sha=dddbe1b9b7a7fdaf4003a939660dcb547eacfd78b6f446cb4e065047d95efd9a]; +"TT-CONSTRUCTION" -> "TA-ITERATIONS" [sha="671795bbd8a789803e29f531e12074129e99f1329d27bc97ad0bbee01d8432db"]; +"TT-EXPECTATIONS" -> "TA-BEHAVIOURS" [sha=bab309ba80ce2c2b1d7146220da91f1f456c03d4aad8a724db777933e8924ebb]; +"TT-EXPECTATIONS" -> "TA-MISBEHAVIOURS" [sha=b9c4c4ce6e39a7171aa8b02c3267172229ff3de17ff5cd2da9839e67334e5453]; +"TT-EXPECTATIONS" -> "TA-CONSTRAINTS" [sha=""]; +"TT-EXPECTATIONS" -> "TA-INDICATORS" [sha=c6b66b2315b853fbe7f4844631f8a522cf0cff8f2984dea65c8b627512efdede]; +"TT-PROVENANCE" -> "TA-SUPPLY_CHAIN" [sha=a9efbde8812834ed5ea620c826a6b41f28219b61a06b00dcd74632685124a8b9]; +"TT-PROVENANCE" -> "TA-INPUTS" [sha=b72b13298357c1738735fc9cc56b0e64cc9fec0124f1721315f64f24faa17f71]; +"TT-RESULTS" -> "TA-DATA" [sha=bdbef171f4a2b69b6f8b47d3b2c9f0642ffb3120ba471c7be0da274a54c4d549]; +"TT-RESULTS" -> "TA-ANALYSIS" [sha="53f912e517e9b33ca019d4a4aac432fee37c3315ea9a155e145b90122f9c8fb7"]; +"TT-RESULTS" -> "TA-VALIDATION" [sha=bc8f3c8b5afd04ec4f77e750b8c82e5bb1c729811895ff49663b904d42d49fdc]; +"WFJ-01" -> "NJF-02" [sha="3b09348f67d0c150745b4aba9644cf3f6ed2424239e68ffd77d217d68738bb95"]; +"WFJ-01" -> "NJF-03" [sha="171e9d25a0d9d01626602f7d88a3d4320d7129916e36d8f221c2c2542705035d"]; +"WFJ-01" -> "NJF-04" [sha="68921b61bd4dfa08dfc4e77960c32632a63dc40c3fa7c0cb3251905b402f00be"]; +"WFJ-01" -> "NJF-01" [sha="3eb3cfe8a4619f6fa90795024e35f674c2df30cceaa171835a5c44cc59183251"]; +"WFJ-02" -> "NJF-07" [sha="7d7122a96f7875f7e5796333e93daed5250727a9e74973ab88eaf157ddf1da4b"]; +"WFJ-03" -> "NJF-08" [sha="25bd4b5a9856f5ad580df2c59c0351b8ab294f474d2a743ba1d1267df8411fda"]; +"WFJ-04" -> "NJF-05" [sha="0bbe85dfa251a7851a89b3cf10fd4a0bb3011c5883b115e057b89def19f719a5"]; +"WFJ-05" -> "NJF-06" [sha="5d7830c32079192a49fb404e12f18a96f3731f33406a3c650856c8a7ec9bb709"]; +"WFJ-05" -> "NJF-13" [sha="f45232098cc437b20104464b227078d23f2f940cda3b77042b6f25f8b196e5d4"]; +"WFJ-07" -> "NJF-12" [sha="9e25064549eb689d9a54dafa20073935d034ee572c9bd6e8f3d15258d108cb3f"]; +"WFJ-08" -> "NJF-14" [sha="3a5bb8559e6c1498394df6c568fd2540099fd76ba4729e3a2e6f685bead49014"]; +} diff --git a/.dotstop_extensions/README.md b/.dotstop_extensions/README.md new file mode 100644 index 0000000000..586ad18bd0 --- /dev/null +++ b/.dotstop_extensions/README.md @@ -0,0 +1,464 @@ +# Custom references + +References establish links between the documentation and artifacts of the project, either internal (e.g. lines of code) or external (e.g. files stored on a server). + +For each item of the trustable graph, the hash is calculated by trudag using: + +* its own name +* the text of its own statement +* its normativity status +* for every of its references, the *content* of that reference +* for every of its fallacies, the description and content of the corresponding reference + +Custom references are defined in `references.py`. A (custom) reference is used by adding an object into the list `references` in the header of the item file. The `type` corresponds to the classmethod `type` of a reference class of `references.py`, and the remaining object correspond to the arguments of the constructor. + +## CPPTestReference + +The content of a `CPPTestReference` is given by the lines of code corresponding to a test-case or a section of a test-case in a specified unit-test-file. The sections are identified in the value of "name", where the nested sections are separated by semicolons. + +For the `CPPTestReference` the expected configuration is: +``` +--- +... + +references: +- type: cpp_test + name: "compliance tests from json.org;expected failures" + path: "tests/src/unit-testsuites.cpp" +--- +``` + +## JSONTestsuiteReference + +The `JSONTestsuiteReference` is a variant of the function reference, which is augmented by an external file containing test-data in the form of well- or ill-formed JSON candidate data. +A `JSONTestsuiteReference` is therefore given by the data of a `CPPTestReference` together with a list containing the paths to these external files. +The external files are stored in a separate branch of the repository, and their text is loaded via call to github. +The content of a `JSONTestsuiteReference` is given by the content of the underlying `CPPTestReference` together with the sum of the contents of the external test-suite files. + +For the `JSONTestsuiteReference` the expected configuration is: +``` +--- +... + +references: +- type: JSON_testsuite + name: "compliance tests from json.org;expected failures" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json_tests/fail2.json" + - "/json_tests/fail3.json" + description: "invalid json" + remove_other_test_data_lines: False # optional, the default value is True +--- +``` + +## FunctionReference + +The content of a `FunctionReference` is given by the code inclusive all comments of a C++ function within a class in a specified file in the repository. The specific position, i.e. start- and end-line, of the code within that file is not part of the content. + +For the `FunctionReference` an example is: +``` +--- +... + +references: +- type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" +--- +``` + +Since functions may be overloaded, a `FunctionReference` can be initialised with an optional overload-parameter. +The overload-parameter specifies which implementation of the function is referred to, i.e. if the overload-parameter for the function ``class::function()`` is set to _n_, then the _n_-th implementation when counting the occurrences from top to bottom of ``function()`` within the class ``class`` is used, if it exists; otherwise, an error is thrown. Additionally, it is possible, but not mandatory, to give a description. The full example is: +``` +--- +... + +references: +- type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json" + overload: 2 +--- +``` + +## WebReference + +The content of a `WebReference` is its url. This reference is intended to be utilised in case that the content of the web-site is constantly changing (e.g. due to a clock being implemented somewhere on the site), but the reviewer is certain that the type of the content and it being supportive of the statement is fulfilled as long a the website is reachable. An example is `https://introspector.oss-fuzz.com/project-profile?project=json`, where the most recent fuzz-testing report for nlohmann/json is published. + +For the `WebReference`, an example is: +``` +--- +... + +references: +- type: website + url: "https://math.stackexchange.com/" +--- +``` +An example of `WebReference` with non-empty description is +``` +--- +... + +references: +- type: website + url: "https://ncatlab.org/nlab/show/smooth+Serre-Swan+theorem" + description: "Wiki article on the smooth Serre-Swan theorem" +--- +``` + +## WebContentReference + +The content of a `WebContentReference` is its content. This reference is intended to be utilised in case of *static* references, that should not vary in a short time-frame, and whose content is most important for the trustability of the statement. An example is a file located on a github repository, e.g. `https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/.github/workflows/cifuzz.yml` + +A `WebContentReference` looks identical to a `WebReference` with `type: web_content` instead of `type: website`. + +For the `TimeVaryingWebReference`, examples of the possible configurations are: +``` +--- +... + +references: +- type: web_content + url: "https://math.stackexchange.com/" +--- +``` +in case of an empty description, and +``` +--- +... + +references: +- type: web_content + url: "https://ncatlab.org/nlab/show/smooth+Serre-Swan+theorem" + description: "Wiki article on the smooth Serre-Swan theorem" +--- +``` +in case of a custom description. + +## TimeVaryingWebReference + +The content of a `TimeVaryingWebReference` is given by the content of a changelog, whose default value is `ChangeLog.md`, which mirrors the changelog of nlohmann/json. This reference is intended for websites whose content is constantly changing, so that a `WebContentReference` makes the item un-reviewable, but whose content at the time of an update influences the trustability. An example is `https://github.com/nlohmann/json/pulse/monthly`, which can be used to demonstrate that nlohmann/json is *up to the most recent version* under active development. + +An example of the complete configuration for `TimeVaryingWebReference` is + +``` +--- +... +references: +- type: project_website + url: "https://ncatlab.org/nlab/show/smooth+Serre-Swan+theorem" + description: "Wiki article on the smooth Serre-Swan theorem" + changelog: "ideas/graded/graded_Serre_Swan.tex" +--- +``` +where `description` and `changelog` are optional arguments. + +## ListOfTestCases + +The content of a `ListOfTestCases` is given by the list of test-cases extracted from the unit-tests given in the files in the provided directories. +It is assumed that a unit-test is saved in a file with the name unit-xxx.cpp, and only those files are used to compile the list. +Further, it is assumed that a unit-test-file is structured as + +``` +... +TEST_CASE("my test case") +{ + ... + SECTION("my section") + { + ... + } + ... +} +``` + +and the structure regarding test-cases and (nested) sections of test-cases is extracted. The expected configuration is + +``` +--- +... +references: +- type: list_of_test_cases + test_files: + - TSF/tests + - tests/src +--- +``` + +## workflow_failures + +This reference queries `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure+branch%3A{self._branch}` and collects the number of failed workflow runs as its content. +Here, owner, repo and branch are the arguments given to the constructor of the reference. +If no branch is specified, then all failures are collected, i.e. `https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure` is queried. +In case the website is un-reachable, or the github layout changes drastically so that the number of failed workflow runs does not exist at the expected location, an error is thrown. + +The expected configuration is + +``` +--- +... +references: +- type: workflow_failures + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +--- +``` + +## ItemReference + +Some references support every (directly or indirectly) supporting item of an item. +Instead of repeating these references in each supporting item, these references are listed in the supported item. +The inheritance of the references is then clarified in the documentation by an `ItemReference`. +In the final documentation in human-readable form, an ItemReference simply lists all items of which the references are inherited with hyperlinks. + +To detect the inheritance of references in the content of the supporting items, the content of an ItemReference is the combination of the sha's stored in the .dotstop.dot file of the listed supported items. +If any reference of any of the listed supported items changes, then its sha changes and the review-status of the item becomes false. +After successful re-review, the review-status of the supported items is re-set to true, so that the new sha is stored in the .dotstop.dot file. +This automatically sets the review-status of the supporting items, which inherit the references, to false, thereby triggering a re-review of these. +The expected configuration is as follows + +``` +--- +... +references: +- type: item + items: + - ITEM-1 + - ITEM-2 + - ... +--- +... +``` +Here, the elements of the list `items` must be normative nodes of the trustable graph, otherwise an error is thrown. + +## IncludeListReference + +The content of an `IncludeListReference` is given by the list of `#include` lines extracted from a specified source/header file in the repository (for example `single_include/nlohmann/json.hpp`). This reference is useful to document which libraries a file depends on without embedding the full file content into the report. + +Behaviour: +- content: returns the concatenation of all lines that begin with `#include` in the target file as UTF-8 encoded bytes. If no includes are found, the content is `b"No includes found"`. +- as_markdown: renders the found `#include` lines as a C++ code block (```cpp ... ```). If a `description` was provided when constructing the reference, the description is shown as an indented bullet above the code block. +- If the referenced file does not exist or is not a regular file, accessing `content` raises a ReferenceError. + +Usage example: + +``` +--- +... + +references: +- type: include_list + path: "single_include/nlohmann/json.hpp" + description: "List of direct includes of the amalgamated header" +--- +``` + +Notes: +- `description` is optional. +- The reference only extracts lines whose first non-whitespace characters are `#include`. + +# Validators + +Validators are extensions of trudag, used to validate any data that can be reduced to a floating point metric. The resulting scores are used as evidence for the trustability of items in the trustable graph. + +## check_artifact_exists + +The check_artifact_exists script validates the presence of artifacts from GitHub Actions workflows for the current SHA. The score is given based on the number of artifacts found vs the number of artifacts expected. + +The available configuration dict keys for check_artifact_names are: + - `check_amalgamation` + - `codeql` + - `dependency_review` + - `labeler` + - `test_trudag_extensions` + - `ubuntu` + +The available configuration dict values for check_artifact_names are: + - 'include' + - 'exclude' + +These indicate whether a certain artifact should be included as evidence for a Trustable graph item. + +## https_response_time + +The automatic validator https_response_time checks the responsiveness of a given website. The expected configuration is as in the example: +``` +evidence: + type: https_response_time + configuration: + target_seconds: 2 # acceptable response time in seconds, integer or float + urls: # list of urls to be checked, list of strings + - "https://github.com/nlohmann/json/issues" + - "https://github.com/nlohmann/json/graphs/commit-activity" + - "https://github.com/nlohmann/json/forks?include=active&page=1&period=&sort_by=last_updated" +``` +A response time of at least the five-fold of the acceptable response time is deemed unacceptable and gives an individual score of zero. +Likewise unacceptable is a response code other than `200`, which gives an individual score of zero. + +The total score is the mean of the individual scores. + +## check_test_results + +The automatic validator `check_test_results` is intended to evaluate the database `MemoryEfficientTestResults.db` which is generated in the ubuntu-Workflow, and which contains the test-report of the most recent workflow run. This database is temporary, and, contrary to `TSF/MemoryEfficientTestResultData.db`, which is persistently stored on the branch `save_historical_data`, not persistently stored. + +The expected configuration is given as follows: + +``` +evidence: + type: check_test_results + configuration: + tests: # list of test-files + - class_lexer + - unicode1 + - strings + database: MemoryEfficientTestResults.db # optional argument, default: MemoryEfficientTestResults.db; path to test-result database from project root + table: test_results # optional argument, default: test_results; name of table in database +``` + +The test-files are called unit-FILE_NAME.cpp. In the configuration, FILE_NAME is expected only, i.e. without the leading unit- and without the file-extension. + +For each test specified in test-files, the number of passed and failed test-cases is calculated, while the number of skipped test-cases is ignored. The score of each test is then the ratio of passed test-cases compared to all non-skipped test-cases; the total score is the mean of the individual scores. + +## check_issues + +The automatic validator `check_issues` is intended to evaluate the json-lists `raw_open_issues.json` and `raw_closed_issues.json` and compare with the list of known issues of nlohmann/json labelled as bug opened since the release of the version of nlohmann/json that is documented. The json lists are generated in the publish_documentation-Workflow, and not persistently stored. + +The expected configuration is given as follows: + +``` +evidence: + type: check_issues + configuration: + release_date: "2025-04-11T00:00:00Z" # release date of the documented version in the format %Y-%m-%dT%H:%M:%SZ + list_of_known_misbehaviours: "./TSF/docs/nlohmann_misbehaviours_comments.md" # path to the list of known misbehaviours relative to the root of the repository + +``` + +In case that the release date is not specified using the expected format, or either of the `raw_open_issues.json` and `raw_closed_issues.json` can not be opened, then the score 0.0 is returned together with an error indicating the warning. + +The list of known misbehaviours collects the known issues labelled as bugs opened in nlohmann/json since the release of the version that is documented. +These issues are collected in a table containing the issue-ID, an indication whether the issue applies to the usage of nlohmann/json within Eclipse S-CORE and a comment, which is printed into the list of known misbehaviours. +From `raw_closed_issues.json`, all issue IDs are collected, which are labelled as bug and opened after the release_date; and from `raw_open_issues.json`, all issue IDs are collected. +If for any of these IDs, it is not explicitly indicated in the list of known misbehaviours that this issue does not apply to Eclipse S-CORE, then the score 0.0 is returned. +Otherwise, the score 1.0 is assigned. + +## did_workflows_fail + +The automatic validator `did_workflows_fail` queries the web-site `https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure+branch%3A{branch}` and looks on the number of workflow run results which is printed at the head of the table. +In case that this number is not zero, a score of 0.0 is returned, and 1.0 otherwise. + +The expected configuration is given as follows: + +``` +evidence: + type: did_workflows_fail + configuration: + owner: "eclipse-score" # owner of the repository + repo: "inc_nlohmann_json" # name of the repository + branch: "json_version_3_12_0" # name of the branch + action: "push" # optional, default is push +``` + +It is of utmost importance that the arguments come with quotation marks. Otherwise, the update helper does not work as intended. + +## coveralls_reporter + +The automatic validator `coveralls_reporter` queries the [coveralls](https://coveralls.io/) api to get the line and branch coverages calculated by the service, which is running on the repository. +Unless the version of `nlohmann/json` documented in this repository changes, it is expected that both coverage numbers remain constant. +When initialising the reference, the current code coverage is given as a parameter, to which the fetched coverages are compared. +If no branch is specified, then the most recently calculated coverage is fetched, so that it is generally recommended to specify a branch. +Moreover, it is possible to specify the number of decimal digits, which is defaulted to three, when not specified. +The validator returns a score of 1.0 if both fetched coverages rounded to the specified number of decimal digits coincide with the specified ones, and a score of 0.0 otherwise. + +The expected configuration is the following: + +``` +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +``` + +## combinator + +The trudag tool does currently not support the use of multiple custom validators for one single TSF item. To work around this, the validator `combinator` is implemented as a meta-validator that executes multiple validators and combines their scores using a weighted average. This enables the validation of complex trustable items that require evidence from multiple sources or validation methods. + +The combinator accepts a list of validators, each with its own configuration and optional weight. Each validator is executed independently, and their scores are combined using the formula: `(score1 * weight1 + score2 * weight2 + ...) / (weight1 + weight2 + ...)`. If no weights are specified, all validators are treated with equal weight (weight = 1.0). + +The combinator supports the following validator types: +- `check_artifact_exists` +- `https_response_time` +- `check_test_results` +- `file_exists` +- `sha_checker` +- `check_issues` +- `did_workflows_fail` +- `coveralls_reporter` + +The expected configuration is as follows: + +``` +evidence: + type: combinator + configuration: + validators: + - type: "check_test_results" + weight: 2.0 # optional, defaults to 1.0 + configuration: + tests: + - class_lexer + - unicode1 + - type: "https_response_time" + weight: 1.0 # optional, defaults to 1.0 + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json/issues" + - type: "coveralls_reporter" + weight: 1.5 # optional, defaults to 1.0 + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 + - type: "did_workflows_fail" + configuration: + owner: "eclipse-score" + repo: "inc_nlohmann_json" + branch: "json_version_3_12_0" +``` + +All weights must be non-negative. If the sum of all weights is zero, the combinator returns a score of 0.0. The combinator aggregates all exceptions and warnings from the individual validators and returns them alongside the combined score. + +# Data store interface + +The data store interface utilises the built-in the `dump` functionality of trudag to store the trustable score, and to include the development of the trustable score over time into the report. + +Since no persistent data store is established as of now, the current implementation serves as a proof of concept, where the collected data are stored on a separate branch of the repository. + +The input of the data store are the data generated by the trudag tool during the `score` or `publish` operation. These data have the format: + +``` +[{"scores": [{id: "ID-1", "score": score}, ...], "info": {"Repository root": "my_repository", "Commit SHA": "sha_123", "Commit date/time": "%a %b %d %H:%M:%S %Y", "Commit tag": "my_tag", "CI job id": 123, "Schema version": 123, "Branch name": "my_branch"}}] +``` + +## push + +This functionality writes the generated data into an sqlite database `TrustableScoring.db` located in the folder `TSF`. This database contains two tables, `commit_info`, where the metadata of "info" are stored, and `scores`, where the scores are stored, and which references `commit_info` via the date as foreign key. + +It is intended to store data only once per commit. If, for any reason, the same commit generates data more than once, then only the most recent data are stored, and the obsolete data are deleted. This still ensures that the scoring history of the main branch is as complete as possible. + +## pull + +This functionality parses the information stored in `TrustableScoring.db` into the format which is expected by trudag. In case that no data are found, the empty history is returned. \ No newline at end of file diff --git a/.dotstop_extensions/__init__.py b/.dotstop_extensions/__init__.py new file mode 100644 index 0000000000..c2d4cd1531 --- /dev/null +++ b/.dotstop_extensions/__init__.py @@ -0,0 +1 @@ +# This file makes the directory a Python package \ No newline at end of file diff --git a/.dotstop_extensions/data_store.py b/.dotstop_extensions/data_store.py new file mode 100644 index 0000000000..e3e8b25d94 --- /dev/null +++ b/.dotstop_extensions/data_store.py @@ -0,0 +1,107 @@ +import sqlite3 +from datetime import datetime +import os + +# global variable -- path to persistent data storage +persistent_storage = "TSF/TrustableScoring.db" + +def data_store_pull() -> list[dict]: + data = get_my_data() + return data + +def data_store_push(data: list[dict]): + push_my_data(data) + +def get_my_data() -> list[dict]: + # check if persistent data has been loaded + if not os.path.exists(persistent_storage): + return [] + # initialise connection to persistent storage + connector = sqlite3.connect(persistent_storage) + connector.execute("PRAGMA foreign_keys = ON") + cursor = connector.cursor() + # initialise tables, if not exist + cursor.execute("CREATE TABLE IF NOT EXISTS commit_info(date INTEGER PRIMARY KEY, root TEXT, SHA TEXT, tag TEXT, job_id TEXT, schema_version INTEGER, branch_name TEXT)") + cursor.execute("CREATE TABLE IF NOT EXISTS scores(ID TEXT, score REAL, date INTEGER, PRIMARY KEY (ID, date), FOREIGN KEY(date) REFERENCES commit_info(date))") + # initialise my_data + my_data = [] + # read commit_info + cursor.execute("SELECT * FROM commit_info") + commit_info = cursor.fetchall() + for info in commit_info: + command = f"SELECT * FROM scores WHERE date=={info[0]}" + cursor.execute(command) + scores = cursor.fetchall() + date = datetime.fromtimestamp(info[0]) + date_as_string = date.strftime("%a %b %d %H:%M:%S %Y") + if len(info) == 6: + branch_name = "" + else: + branch_name = info[6] if info[6]!=None else "" + commit = {"Repository root": info[1], + "Commit SHA": info[2], + "Commit date/time": date_as_string, + "Commit tag": info[3], + "CI job id": info[4], + "Schema version": info[5], + "Branch name": branch_name + } + score_data = [] + for score in scores: + score_datum = {"id": score[0], "score": score[1]} + score_data.append(score_datum) + my_datum = {"scores": score_data, "info": commit} + my_data.append(my_datum) + return my_data + +def push_my_data(data: list[dict]): + # It is assumed that the folder containing the persistent storage does exist. + # initialise connection to persistent storage; if database itself does not exist, then create + connector = sqlite3.connect(persistent_storage) + connector.execute("PRAGMA foreign_keys = ON") + cursor = connector.cursor() + cursor.execute("CREATE TABLE IF NOT EXISTS commit_info(date INTEGER PRIMARY KEY, root TEXT, SHA TEXT, tag TEXT, job_id TEXT, schema_version INTEGER, branch_name TEXT)") + cursor.execute("CREATE TABLE IF NOT EXISTS scores(ID TEXT, score REAL, date INTEGER, PRIMARY KEY (ID, date), FOREIGN KEY(date) REFERENCES commit_info(date))") + cursor.execute("PRAGMA table_info(commit_info);") + columns = [pragma_info[1] for pragma_info in cursor.fetchall()] + # branch_name was first forgotten + if "branch_name" not in columns: + cursor.execute("ALTER TABLE commit_info ADD COLUMN branch_name TEXT DEFAULT ''") + connector.commit() + # extract data from data + info = data[0].get("info") + scores = data[0].get("scores") + # Currently, the commit date is stored as string. + # Since the local timezone is used and for comparison, + # it would be better to have it as a unix-timestamp. + datum_string = info.get("Commit date/time") + datum = int(datetime.strptime(datum_string, "%a %b %d %H:%M:%S %Y").timestamp()) + # check if current commit coincides with existing commit + cursor.execute("SELECT MAX(date) AS recent_commit FROM commit_info") + if datum == cursor.fetchone()[0]: + print("Only the most recent data for each commit are stored. Overwriting obsolete data ...") + # write commit_info + root = info.get("Repository root") + sha = info.get("Commit SHA") + tag = info.get("Commit tag") + job_id = info.get("CI job id") + schema_version = info.get("Schema version") + branch_name = info.get("Branch name") + command = f"INSERT OR REPLACE INTO commit_info VALUES('{datum}', '{root}', '{sha}', '{tag}', '{job_id}', '{schema_version}', '{branch_name}')" + cursor.execute(command) + connector.commit() + # write scores + for score in scores: + id = score.get("id") + numerical_score = score.get("score") + cursor.execute("SELECT COUNT(*) FROM scores WHERE date = ? AND ID = ?", (datum, id)) + if cursor.fetchone()[0]>0: + cursor.execute("DELETE FROM scores WHERE date = ? AND ID = ?", (datum, id)) + connector.commit() + command = f"INSERT OR REPLACE INTO scores VALUES('{id}', {numerical_score}, '{datum}')" + cursor.execute(command) + # don't forget to commit! + connector.commit() + # terminate data-base connection + connector.close() + \ No newline at end of file diff --git a/.dotstop_extensions/references.py b/.dotstop_extensions/references.py new file mode 100644 index 0000000000..999b6fdd83 --- /dev/null +++ b/.dotstop_extensions/references.py @@ -0,0 +1,1010 @@ +from pathlib import Path +from trudag.dotstop.core.reference.references import BaseReference +from trudag.dotstop.core.reference.references import SourceSpanReference +import requests +import sqlite3 +import re + +# Constants +MAX_JSON_LINES_FOR_DISPLAY = 25 +TEST_DATA_REPO_URL = "https://raw.githubusercontent.com/eclipse-score/inc_nlohmann_json/refs/heads/json_test_data_version_3_1_0_mirror/" +NUM_WHITESPACE_FOR_TAB = 4 + +def format_cpp_code_as_markdown(code: str) -> str: + return f"```cpp\n{code}\n```\n" + +def format_json_as_markdown(json_content: str) -> str: + return f"```json\n{json_content}\n```\n" + +def make_md_bullet_point(text: str, indent_level: int = 0) -> str: + indent = '\t' * indent_level + return f"{indent}- {text}\n" + +def add_indentation(text: str, indent_level: int) -> str: + indent = '\t' * indent_level + return indent + text.replace('\n', '\n' + indent) + +class CPPTestReference(BaseReference): + """ + Represents a reference to a specific section within a C++ test file. The class + assumes that the C++ test sections are defined using `SECTION("name")` or + # `TEST_CASE("name")` syntax, where the section name can be nested using + colon-separated names (e.g., "testcase1:section1:section2"). We assume that the + section path is unique within the file. + + Additionally, the opening brace `{` must be on the line immediately after the + section declaration, and the closing brace `}` must have the same indentation + as the opening brace. This is the case for the tests from nlohmann_json. + """ + def __init__(self, name: str, path: str) -> None: + """ + Initialize CPPTestReference. + + Args: + name: Section name, use colon-separated for nested sections (e.g., "testcase1:section1:section2") + path: Relative path from project root to the file + """ + self._name = name + self._path = Path(path) + + @classmethod + def type(cls) -> str: + return "cpp_test" + + def get_section(self) -> str: + """Extract the specified section from the C++ test file.""" + with open(self._path, 'r') as file: + lines = file.readlines() + section_start_line = self.find_section_start(lines) + section_end_line = self.find_section_end(lines, section_start_line) + test_section = ''.join(lines[section_start_line:section_end_line]) + return test_section + + def find_section_start(self, file_lines: list[str]) -> int: + """ + This method finds the starting line index of the section in the file. It expects + the section name to be in the format "section1" or "section1:section2". It searches + for the first occurrence of a line containing either SECTION("section1") + or TEST_CASE("section1") where section1 matches the first part of the section name. + This is done iteratively for nested sections until the full section name sequence + is matched. This implicitly assumes that the section paths are unique within the file. + + Args: + file_lines: List of lines from the C++ test file + + Returns: + Line index where the section starts (i.e. the line containing SECTION or TEST_CASE) + """ + section_names = self._name.split(';') + for line_number, line in enumerate(file_lines): + # Check if current line contains a SECTION or TEST_CASE declaration matching the current first section name + section_pattern = f'SECTION("{section_names[0]}"' + test_case_pattern = f'TEST_CASE("{section_names[0]}"' + if section_pattern in line or test_case_pattern in line: + if len(section_names) == 1: + # If we only have one section name left, we found our target + return line_number + else: + # Remove the found section from the list and continue searching for nested sections + section_names.pop(0) + + raise ValueError("Start of section "+self._name+" not found.") + + def find_section_end(self, file_lines: list[str], start_index: int): + """ + Find the ending line index of a C++ test section. + + This method expects C++ test sections to follow the pattern: + SECTION("name") + { + // section content + } + + The opening brace must be on the line immediately after the section declaration, + and the closing brace must have the same indentation as the opening brace. This + is the case for the tests from nlohmann_json. + + Args: + file_lines: List of lines from the C++ test file + start_index: Line index where the section declaration was found + + Returns: + Line index immediately after the closing brace of the section + + Raises: + ValueError: If the section doesn't follow expected brace pattern or + if matching closing brace is not found + """ + # Verify we have a valid line after the section declaration + if start_index + 1 >= len(file_lines): + raise ValueError("Section declaration is on the last line - no opening brace found") + + # replace in every line tabs with spaces to ensure consistency + file_lines_whitespaces = [line.replace('\t', ' ' * NUM_WHITESPACE_FOR_TAB) for line in file_lines] + + # The line after the section starts with " "*n + "{" and the section ends with " "*n + "}" + # We assume that there are only whitespace characters after the opening/ending brace + # Check that the pattern matches the expected format + line_after_start_line = file_lines_whitespaces[start_index + 1] + if not line_after_start_line.strip() == '{': + raise ValueError("Section start line does not match expected pattern (' '*n + '{')") + + # Create the expected closing line by replacing '{' with '}' + end_line = line_after_start_line.replace('{', '}').rstrip() + + # Search for the matching closing brace with same indentation + for line_number in range(start_index + 1, len(file_lines)): + if file_lines[line_number].rstrip() == end_line: + return line_number + 1 + + raise ValueError("Section end not found") + + def remove_leading_whitespace_preserve_indentation(self, text: str) -> str: + """Remove leading whitespace from all lines while preserving relative indentation.""" + lines = text.split('\n') + lines = [line.replace('\t', ' ' * NUM_WHITESPACE_FOR_TAB) for line in lines] + ident_to_remove = len(lines[0]) - len(lines[0].lstrip()) + + # Remove the baseline indentation from all lines + adjusted_lines = [] + for line in lines: + if line.strip(): # Non-empty line + if not line.startswith(lines[0][:ident_to_remove]): + # If the indentation is not >= than for the baseline, return the original text + return text + adjusted_lines.append(line[ident_to_remove:] if len(line) >= ident_to_remove else line) + else: # Empty line + adjusted_lines.append('') + + return '\n'.join(adjusted_lines) + + @property + def content(self) -> bytes: + # encoding is necessary since content will be hashed + return self.get_section().encode('utf-8') + + + def as_markdown(self, filepath: None | str = None) -> str: + content = self.content.decode('utf-8') + content = self.remove_leading_whitespace_preserve_indentation(content) + return format_cpp_code_as_markdown(content) + + def __str__(self) -> str: + # this is used as a title in the trudag report + return f"cpp-test: [{self._name}]\n({self._path})" + + +class JSONTestsuiteReference(CPPTestReference): + """ + Represents a reference to one or more JSON testsuite files, where the CPP test + structure is assumed to be as in tests/src/unit-testsuites.cpp and the JSON testsuite + files are assumed to be hosted in the nlohmann/json_test_data repository on github. + + The referenced JSON files are displayed (using the as_markdown function) as well as + the relevant part of the C++ test section that uses them. Both the C++ test file and + the JSON files are included in the content property that is used for hashing. + """ + + def __init__(self, name: str, path, test_suite_paths: str, description: str, remove_other_test_data_lines: bool = True) -> None: + """ + Initialize JSONTestsuiteReference. + + Args: + name: Section name in the C++ test file, use colon-separated for nested sections + path: Relative path from project root to the C++ test file + test_suite_paths: List of relative paths to JSON test files in the nlohmann test data repository + description: Human-readable description of what this test suite covers + remove_other_test_data_lines: If True, removes lines from the markdown (not the content used for hashing) that include 'TEST_DATA_DIRECTORY' and '.json"' + + Raises: + ValueError: If test_suite_paths is not a list of strings + """ + super().__init__(name, path) + self._path = Path(path) + if not isinstance(test_suite_paths, list): + raise ValueError(f"test_suite_paths must be a list of strings: {test_suite_paths}") + + self._description = description + self._test_suite_paths = test_suite_paths + self.check_testsuite_file_is_used_by_cpp_test() + self._remove_other_test_data_lines = remove_other_test_data_lines + self._loaded_json_cache = {} + + @property + def _loaded_json_map(self) -> dict[str, str]: + """Lazy-load JSON content for all test suite paths.""" + for path in self._test_suite_paths: + if path not in self._loaded_json_cache: + self._loaded_json_cache[path] = self.get_testsuite_content(path) + return self._loaded_json_cache + + def check_testsuite_file_is_used_by_cpp_test(self) -> None: + """Check if the C++ test file uses the JSON testsuite files.""" + cpp_test_content = self.get_section() + for test_suite_path in self._test_suite_paths: + if test_suite_path not in cpp_test_content: + raise ValueError(f"JSON testsuite {test_suite_path} is not used in the C++ test file {self._path}") + + @classmethod + def type(cls) -> str: + return "JSON_testsuite" + + def get_testsuite_content(self, test_suite_path: str) -> str: + url = TEST_DATA_REPO_URL + str(test_suite_path) + try: + response = requests.get(url) + response.raise_for_status() + return response.text + except requests.RequestException as e: + raise ValueError(f"Failed to fetch testsuite content from {url}: {e}") + + @property + def content(self) -> bytes: + all_json_content = "\n".join(self._loaded_json_map.values()) + content = self.get_section() + "\n" + all_json_content + return content.encode('utf-8') + + @staticmethod + def is_json_test_line(line: str) -> bool: + return 'TEST_DATA_DIRECTORY' in line and '.json"' in line + + def filter_other_test_data_lines(self, text: str) -> str: + """Remove lines that only contain other test data references.""" + lines = text.split('\n') + filtered_lines = [] + + for line in lines: + if any(test_suite_path in line for test_suite_path in self._test_suite_paths) or not self.is_json_test_line(line): + filtered_lines.append(line) + + if len(filtered_lines) < len(lines): + filtered_lines.append('\n // Note: Other test data lines have been filtered out for conciseness.') + + return '\n'.join(filtered_lines) + + def get_single_json_as_markdown(self, test_suite_path: str) -> str: + num_json_lines = len(self._loaded_json_map[test_suite_path].split('\n')) + if num_json_lines > MAX_JSON_LINES_FOR_DISPLAY: + link_to_file = TEST_DATA_REPO_URL + str(test_suite_path) + json_for_display = f"[Link to file]({link_to_file}) [Content too large - {num_json_lines} lines]\n\n" + else: + json_for_display = format_json_as_markdown(self._loaded_json_map[test_suite_path]) + + markdown_bullet_point = make_md_bullet_point(f"JSON Testsuite: {test_suite_path}") + return f"{markdown_bullet_point}\n\n {json_for_display}\n\n" + + def get_all_json_as_markdown(self) -> str: + """Get all JSON test files as markdown.""" + return "\n\n".join( + self.get_single_json_as_markdown(test_suite_path) for test_suite_path in self._test_suite_paths + ) + + def as_markdown(self, filepath: None | str = None) -> str: + description = "" + if self._description!="": + description = f"Description: {self._description}\n\n" + + # we can not simply use the parent class's as_markdown method, because it does not filter out + # the other test data lines, which are not relevant for the trudag report + cpp_test_content = self.remove_leading_whitespace_preserve_indentation(self.get_section()) + if self._remove_other_test_data_lines: + cpp_test_content = self.filter_other_test_data_lines(cpp_test_content) + cpp_test_content = format_cpp_code_as_markdown(cpp_test_content) + + cpp_test_title = super().__str__() + '\n\n' + + markdown_content = ( + self.get_all_json_as_markdown() + + make_md_bullet_point(cpp_test_title) + + cpp_test_content + ) + if description != "": + markdown_content = make_md_bullet_point(description) + markdown_content + # the markdown content is indented by one level to fit into the report markdown structure + return add_indentation(markdown_content, 1) + + def __str__(self) -> str: + # this is used as a title in the trudag report + return f"cpp-testsuite: [{', '.join(self._test_suite_paths)}]" + +class WebReference(BaseReference): + """ + Represents a reference to a website. + This custom reference type is included as an example on https://codethinklabs.gitlab.io/trustable/trustable/trudag/references.html + and, for the most part, copied from there + """ + def __init__(self, url: str, description: str = "") -> None: + self._url = url + self._description = description + + @classmethod + def type(cls) -> str: + return "website" + + @property + def content(self) -> bytes: + # In the example, the text on the website is used. + # This does not work for constantly changing websites. + # Would the text be used, then the statement could never be reviewed. + # Therefore, the url is returned, which is sufficient for our purposes. + return self._url.encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + # If we did not add a description, nothing is printed + if (self._description == ""): + return f"`{self._url}`" + # else, we print the description below the url + return f"`{self._url}`\n"+make_md_bullet_point(self._description,1) + + def __str__(self) -> str: + # this is used as a title in the trudag report + return f"website: {self._url}" + +class WebContentReference(WebReference): + def __init__(self, url: str, description: str = "") -> None: + super().__init__(url, description) + + @classmethod + def type(cls): + return "web_content" + + @property + def content(self) -> bytes: + return requests.get(self._url).text.encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + return super().as_markdown(filepath) + + def __str__(self) -> str: + return super().__str__() + +class TimeVaryingWebReference(WebReference): + def __init__(self, url, description = "", changelog = "ChangeLog.md"): + super().__init__(url, description) + self._changelog = changelog + + @classmethod + def type(cls) -> str: + return "project_website" + + @property + def content(self) -> bytes: + with open(self._changelog, 'r') as file: + lines = file.readlines() + lines.insert(0,self._url) + return '\n'.join(lines).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + return super().as_markdown(filepath) + + def __str__(self) -> str: + return super().__str__() + +class FunctionReference(SourceSpanReference): + """ + Represents a reference to a function within a class in a hpp-file. This class assumes that + the hpp-file has the form + ... + class xyz + { + ... + output function_name(input) + { + ... + } + ... + }; + This is the layout that is followed by the hpp-files of nlohmann/json. + + A specific function is identified by + 1. the hpp-file + 2. the name of the class, whithin which our function is defined + 3. (optionally) the number of prior definitions within the same class in case of overloaded functions; + by default, the first definition is used. + Since classes are in hpp-files of nlohmann/json uniquely identified by their name, this uniquely identifies a function. + """ + + def __init__(self, name: str, path: str, description: str = "", overload: str = "1") -> None: + [start_line,end_line] = FunctionReference.get_function_line_numbers(Path(path),name,int(overload)) + # SourceSpanReference copies code from a start-character in a start-line + # up to an end-character in an end-line. + # Here, we want every character in all lines between start- and end-line. + # Therefore, we set the end-character to 1000, which could fail, if ever a + # line with more than 1000 characters is copied. + # In nlohmann/json, no hpp-file has such a line, so that the following works fine. + super().__init__(Path(path),[[start_line,0],[end_line,1000]]) + self._name = name + self._overload = int(overload) + self._description = description + + def language(self): + return "C++" + + @classmethod + def type(cls) -> str: + return "function_reference" + + def remove_leading_whitespace_preserve_indentation(self, text: str) -> str: + """ + Remove leading whitespace from all lines while preserving relative indentation. + This is identical to CPPTestReference.remove_leading_whitespace_preserve_indentation + """ + lines = text.split('\n') + lines = [line.replace('\t', ' ' * NUM_WHITESPACE_FOR_TAB) for line in lines] + ident_to_remove = len(lines[0]) - len(lines[0].lstrip()) + + # Remove the baseline indentation from all lines + adjusted_lines = [] + for line in lines: + if line.strip(): # Non-empty line + if not line.startswith(lines[0][:ident_to_remove]): + # If the indentation is not >= than for the baseline, return the original text + return text + adjusted_lines.append(line[ident_to_remove:] if len(line) >= ident_to_remove else line) + else: # Empty line + adjusted_lines.append('') + + return '\n'.join(adjusted_lines) + + @staticmethod + def get_function_line_numbers(path: Path, name: str, overload = 1) -> tuple[int, int]: + with open(path, 'r') as file: + lines = file.readlines() + return FunctionReference.get_function_boundaries(path, name, lines, overload) + + + def get_function_boundaries(path: Path, name: str, lines: list[str], overload: int) -> list[int]: + # Split name in class_name and function_name, + # and check that both, and only both, parts of the name are found. + name_parts = name.split("::") + if len(name_parts) != 2: + raise ValueError(f"Name {name} does not have the form class_name::function_name") + # name_parts[0] is interpreted as class_name, + # name_parts[1] is interpreted as function_name + in_class = False + sections = [] + instance = 0 + start_line = 0 + found_start = False + in_body = False + for line_number, line in enumerate(lines): + # first task: find literal string "class class_name " within a line + if not in_class: + if f"class {name_parts[0]} " in line or f"class {name_parts[0]}\n" in line: + in_class = True + continue + # now we are within the class + # time to search for our function + # ignore all commented out lines + if line.strip().startswith("//"): + continue + if "};" in line and len(sections)==0: + # then, we have reached the end of the class + break + if not found_start: + if '{' in line or '}' in line: + for c in line: + if c == '{': + sections.append(1) + if c == '}': + try: + sections.pop() + except IndexError: + raise ValueError(f"Fatal error: Could not resolve {name} in file {path}.") + # A function-declaration always contains the literal string " function_name(" + # When this string is found within the indentation of the class itself, + # then it can be assumed that we have a function declaration. + # This is true in case of the hpp-files of nlohmann/json. + if f" {name_parts[1]}(" in line and len(sections) == 1: + instance += 1 + if instance == overload: + start_line = line_number + found_start = True + sections.pop() + else: + if '{' in line or '}' in line: + for c in line: + if c == '{': + sections.append(1) + if c == '}': + try: + sections.pop() + except IndexError: + raise ValueError(f"Fatal error: Could not resolve {name} in file {path}.") + if not in_body and len(sections)>0: + in_body = True + if in_body and len(sections)==0: + return [start_line,line_number] + if not in_class: + raise ValueError(f"Could not find class {name_parts[0]} in file {path}") + if not found_start and overload%10 == 1 and overload%100 != 11: + raise ValueError(f"Could not locate {overload}st implementation of {name_parts[1]} in file {path}.") + elif not found_start and overload%10 == 2 and overload%100 != 12: + raise ValueError(f"Could not locate {overload}nd implementation of {name} in file {path}.") + elif not found_start and overload%10 == 3 and overload%100 != 13: + raise ValueError(f"Could not locate {overload}rd implementation of {name} in file {path}.") + elif not found_start: + raise ValueError(f"Could not locate {overload}th implementation of {name} in file {path}.") + else: + raise ValueError(f"Could not find end of function-body of {name} in file {path}.") + + @property + def content(self) -> bytes: + # I don't think this needs to be further encoded, since it is encoded by super() + return self.code + + + def as_markdown(self, filepath: None | str = None) -> str: + content = self.code.decode('utf-8') + content = self.remove_leading_whitespace_preserve_indentation(content) + content = format_cpp_code_as_markdown(content) + if self._description != "": + content = make_md_bullet_point(f"Description: {self._description}",1) + "\n\n" + add_indentation(content,1) + return content + + def __str__(self) -> str: + # this is used as a title in the trudag report + return f"function: [{self._name}]\n({str(self.path)})" + +class ListOfTestCases(BaseReference): + + def __init__(self, test_files: list[str], recent_result_database: str = "artifacts/MemoryEfficientTestResults.db", recent_result_table: str = "test_results") -> None: + self._test_files = test_files + self._database = recent_result_database + self._table = recent_result_table + + @staticmethod + def compile_string(items: list[str]) -> str: + # input: list of strings representing the structure of TEST_CASE, SECTION etc., + # e.g. items = ["lexer class", "scan", "literal names"] + # output: the last item of the list, representing the most recent SECTION, + # indented as in the source code + # throws error if input is empty + if len(items) == 0: + raise RuntimeError("Received empty structural list; nonempty list expected.") + result = "" + for _ in range(1, len(items)): + result += " " + if items: + result += "* " + items[-1] + return result + + @staticmethod + def extract_quotation(s: str) -> str: + # input: string containing at least one quoted substring, e.g. s = "my \"input\"" + # output: the first quoted substring of the input + # throws error if no quoted substring can be found. + first = s.find('"') + if first == -1: + raise RuntimeError("Expected quotation mark; none were detected.") + second = s.find('"', first + 1) + if second == -1: + raise RuntimeError("Expected quotation marks; only one was detected.") + return s[first + 1 : second] + + @staticmethod + def remove_and_count_indent(s: str) -> tuple[int, str]: + # input: string with possibly leading whitespace (space of horizontal tab) + # output: the number of leading spaces and the string with leading whitespace removed; + # tab counted as four spaces + cnt = 0 + i = 0 + n = len(s) + while i < n and (s[i] == " " or s[i] == "\t"): + if s[i] == " ": + cnt += 1 + elif s[i] == "\t": + cnt += 4 + i += 1 + return (cnt, s[i:]) + + @staticmethod + def head_of_list() -> str: + return """## List of all unit-tests with test environments + +This list contains all unit-tests possibly running in this project. +These tests are compiled from the source-code, where the individual unit-tests are arranged in TEST_CASEs containing possibly nested SECTIONs. +To reflect the structure of the nested sections, nested lists are utilised, where the top-level list represents the list of TEST_CASEs. + +It should be noted that not all unit-tests in a test-file are executed with every compiler-configuration. +""" + + @staticmethod + def transform_test_file_to_test_name(test_file: str) -> str: + return "test-"+"-".join((test_file.split('.')[0]).split('-')[1:]) + + @classmethod + def type(cls) -> str: + return "list_of_test_cases" + + def extract_test_structure(self, file_path: Path) -> str: + # input: path to a file potentially containing unit-tests + # output: the extracted arrangement of TEST_CASE and SECTION + # in the form of nested markdown lists + + indent = 0 # the indent of the currently read line + current_indent = 0 # the indent of the last TEST_CASE or SECTION + current_path = [] # the current path + lines_out = [] # the collection of lines to be outputted + + # open file_path as read-only, and process line by line + with file_path.open("r", encoding="utf-8", errors="replace") as source: + for line in source: + # count and remove leading whitespace + indent, trimmed = self.remove_and_count_indent(str(line)) + + # check whether we have found a TEST_CASE + if trimmed.startswith("TEST_CASE(") or trimmed.startswith("TEST_CASE_TEMPLATE(") or trimmed.startswith("TEST_CASE_TEMPLATE_DEFINE("): + # remember the current indent + current_indent = indent + # TEST_CASE is always the head of a new arrangement-structure + # remove stored structure + current_path.clear() + # extract name of TEST_CASE and append path + current_path.append(self.extract_quotation(trimmed)) + lines_out.append(self.compile_string(current_path)) + + # check whether we have found a SECTION + if trimmed.startswith("SECTION("): + # update path to reflect arrangement of current section + while indent <= current_indent and current_path: + current_path.pop() + current_indent -= 4 + # remember the current indent + current_indent = indent + # extract name of SECTION and append path + current_path.append(self.extract_quotation(trimmed)) + lines_out.append(self.compile_string(current_path)) + + # process extracted lines + return ("\n".join(lines_out) + "\n") if lines_out else "" + + def extract_recent_test_environments(self) -> dict: + """ + Extract recent test environment information from the test results database. + + This method connects to the SQLite database specified in self._database and queries + the table specified in self._table to retrieve information about test environments + where unit tests were executed. It categorizes the results into tests that ran + without skipping any test cases ('noskip') and tests where some cases were skipped ('skip'). + + The database is expected to have a table with columns: + - name: test file name (e.g., "test-example") + - compiler: compiler used (e.g., "gcc", "clang") + - cpp_standard: C++ standard used (e.g., "c++17", "c++20") + - skipped_cases: number of test cases that were skipped (0 means no skips) + + Returns: + dict: A dictionary where keys are test case names and values are dictionaries containing: + - "noskip": list of environments where all tests ran (no skipped cases) + - "skip": list of environments where some tests were skipped + Each environment entry contains compiler, standard, and (for skip) skipped count. + + Raises: + RuntimeError: If the database cannot be accessed or the expected table doesn't exist + """ + fetched_data = dict() + connector = None + try: + # initialise connection to test result database + connector = sqlite3.connect(self._database) + cursor = connector.cursor() + # verify that the expected table does exist + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name = ?;",(self._table,)) + if cursor.fetchone() is None: + raise RuntimeError(f"Fatal Error: Could not find table {self._table} in database {self._database}.") + + # get all test-files from recent test executions + command = f"SELECT name FROM {self._table};" + cursor.execute(command) + raw_cases = cursor.fetchall() + cases = set([raw_case[0] for raw_case in raw_cases]) + # for each test-file + for case in cases: + case_data = dict() + # get the test-environments + command = f"SELECT compiler, cpp_standard FROM {self._table} WHERE name = ? and skipped_cases == 0" + cursor.execute(command,(case,)) + results = cursor.fetchall() + case_data["noskip"] = [{"compiler":result[0], "standard":result[1]} for result in results] + # some test-cases are skipped with certain environments + # It is unclear from the log, which cases are skipped; + # we leave this to the interested reader + command = f"SELECT compiler, cpp_standard, skipped_cases FROM {self._table} WHERE name = ? and skipped_cases != 0" + cursor.execute(command, (case,)) + results = cursor.fetchall() + case_data["skip"] = [{"compiler": result[0], "standard": result[1], "skipped": result[2]} for result in results] + fetched_data[case] = case_data + except sqlite3.Error as e: + raise RuntimeError(f"Fatal Error accessing database {self._database}: {e}") + finally: + if connector: + connector.close() + return fetched_data + + def fetch_all_test_data(self, input: list[str]): + """ + Extract and compile test structure information from C++ test files along with execution environment data. + + This method processes a list of file or directory paths to find C++ unit test files (matching pattern + "unit-*.cpp"), extracts their TEST_CASE and SECTION structure, and combines this with recent test + execution environment information from the database to generate a comprehensive markdown report. + + The method recursively searches directories for test files, parses each file to extract the hierarchical + test structure (TEST_CASE containing nested SECTIONs), and correlates this with historical execution + data to show which compiler/standard combinations were used to run the tests. + + Args: + input: List of file or directory paths to process. Files must match "unit-*.cpp" pattern. + Directories are recursively searched for matching test files. + + Returns: + str: A markdown-formatted report containing: + - Header explaining the test structure format + - For each test file: nested bullet lists showing TEST_CASE and SECTION hierarchy + - Execution environment information showing which compiler/standard combinations + successfully ran all tests vs. which had some test cases skipped + - Notes about files that appear to have no recent execution history + + Note: + The method relies on extract_recent_test_environments() to get database information + and extract_test_structure() to parse individual test files. Test file names are + transformed using transform_test_file_to_test_name() to match database entries. + """ + # inputs: path(s) to directory potentially containing some test-data + extracted_test_data = [] + recent_test_data = self.extract_recent_test_environments() + for arg in input: + p = Path(arg) + if p.is_file() and p.suffix == ".cpp" and p.name.startswith("unit-"): + extracted_test_data.append((p.name,self.extract_test_structure(p))) + elif p.is_dir(): + for entry in p.rglob("*"): + if entry.is_file() and entry.suffix == ".cpp" and entry.name.startswith("unit-"): + extracted_test_data.append((entry.name,self.extract_test_structure(entry))) + extracted_test_data.sort(key= lambda x: x[0]) + result = self.head_of_list() + for test_file, list_of_tests in extracted_test_data: + result += f"\n\n### List of tests in file {test_file}\n\n" + result += list_of_tests + result += "\n\n" + if recent_test_data.get(self.transform_test_file_to_test_name(test_file), None) is None: + result += "Unfortunately, none of the following tests seems to have been executed. Very strange indeed!\n\n" + else: + if recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip",None) is not None: + if len(recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip")) != 0: + result += "\nAll tests in this file were run in the following configurations:\n\n" + for datum in recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("noskip"): + result += "* " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + if recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip",None) is not None: + if len(recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip")) != 0: + result += "\nIn the following configuration, however, some test-cases were skipped:\n\n" + for datum in recent_test_data.get(self.transform_test_file_to_test_name(test_file)).get("skip"): + result += "* " + how_many = datum.get("skipped",None) + result += str(how_many) + if how_many == 1: + result += " test case was skipped when using " + else: + result += " test cases were skipped when using " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + return result + + @property + def content(self) -> bytes: + # encoding is necessary since content will be hashed + return self.fetch_all_test_data(self._test_files).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + return self.content.decode('utf-8') + + def __str__(self) -> str: + # this is used as a title in the trudag report + return "List of all unit-tests" + +from trudag.dotstop.core.reference.references import LocalFileReference as LFR + +class VerboseFileReference(LFR): + def __init__(self, path: str, description: str = "", **kwargs) -> None: + self._path = Path(path) + self._description = description + + @classmethod + def type(cls) -> str: + return "verbose_file" + + @property + def content(self) -> bytes: + if not self._path.is_file(): + raise ReferenceError( + f"Cannot get non-existent or non-regular file {self._path}" + ) + with self._path.open("rb") as reference_content: + return reference_content.read() + + def as_markdown(self, filepath: None | str = None) -> str: + result = super().as_markdown() + if self._description != "": + result += make_md_bullet_point(f"Description: {self._description}\n\n") + return result + + def __str__(self) -> str: + return str(self._path) + +class Checklist(LFR): + def __init__(self, path: str, **kwargs) -> None: + self._path = Path(path) + + @classmethod + def type(cls) -> str: + return "checklist" + + @property + def content(self) -> bytes: + if not self._path.is_file(): + raise ReferenceError( + f"Cannot get non-existent or non-regular file {self._path}" + ) + with self._path.open("rb") as reference_content: + return reference_content.read() + + def as_markdown(self, filepath: None | str = None) -> str: + return self.content.decode('utf-8') + + def __str__(self) -> str: + return str(self._path) + +del LFR + +class workflow_failures(BaseReference): + def __init__(self, owner: str, repo: str, branch: str | None = None) -> None: + self._owner = owner + self._repo = repo + self._branch = branch + + @classmethod + def type(cls) -> str: + return "workflow_failures" + + @property + def content(self) -> bytes: + # build the url + url = f"https://github.com/{self._owner}/{self._repo}/actions?query=is%3Afailure" + if self._branch is not None: + url += f"+branch%3A{self._branch}" + # ask the website + res = requests.get(url) + # if call is not successful, raise an error + if res.status_code != 200: + candidate = f"The url {url} is not reachable, so that the number of failed workflows can not be fetched!" + raise RuntimeError(candidate) + # otherwise fetch the number printed in the head of the table + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + candidate = f"The number of failed workflows can not be found, please check that the table head contains \"XX workflow run results\"!" + raise RuntimeError(candidate) + return m.group(1).encode('utf-8') + + def as_markdown(self, filepath: None | str = None) -> str: + if self._branch is None: + return f"{self.content.decode('utf-8')} workflows failed on {self._owner}/{self._repo}" + else: + return f"{self.content.decode('utf-8')} workflows failed on branch {self._branch} of {self._owner}/{self._repo}" + + def __str__(self) -> str: + # this is used as a title in the trudag report + if self._branch is not None: + result = f"failures on branch {self._branch} of {self._owner}/{self._repo}" + else: + result = f"failures on {self._owner}/{self._repo}" + return result + +class ItemReference(BaseReference): + def __init__(self, items: list[str]) -> None: + if len(items) == 0: + raise RuntimeError("Error: Can't initialise empty ItemReference.") + self._items = items + + @classmethod + def type(cls) -> str: + return "item" + + @staticmethod + def get_markdown_link(item: str) -> str: + first_part = item.split("-")[0] + return f"see [here]({first_part}.md#{item.lower()}) to find {item}" + + @staticmethod + def get_reference_contents(items: list[str]) -> bytes: + lines = open(".dotstop.dot","r").read().split("\n") + contents = [] + for item in items: + # check whether the item is valid + content = [line for line in lines if line.startswith(f"\"{item}\" [")] + if len(content) != 1: + raise RuntimeError(f"Error: The item {item} is not contained in the trustable graph") + contents.append(content[0].encode("utf-8")) + return b"".join(contents) if len(contents)!=0 else b"No external references" + + @property + def content(self) -> bytes: + return ItemReference.get_reference_contents(self._items) + + def as_markdown(self, filepath: None | str = None) -> str: + result = "" + for item in self._items: + result += make_md_bullet_point(ItemReference.get_markdown_link(item),1) + return result + + def __str__(self): + title = "this item also refers to the references of " + if len(self._items) == 1: + title += "item " + else: + title += "items " + title += ", ".join(self._items) + return title + + +class IncludeListReference(BaseReference): + """ + Reference that lists all #include lines in a given file (e.g. single_include/nlohmann/json.hpp). + Usage: IncludeListReference("single_include/nlohmann/json.hpp", "optional description") + """ + def __init__(self, path: str, description: str = "") -> None: + self._path = Path(path) + self._description = description + + @classmethod + def type(cls) -> str: + return "include_list" + + @property + def content(self) -> bytes: + if not self._path.is_file(): + raise ReferenceError(f"Cannot get non-existent or non-regular file {self._path}") + + text = self._path.read_text(encoding="utf-8") + includes = [] + + for line in text.splitlines(): + # Only process lines that start with #include (ignoring whitespace) + if line.lstrip().startswith("#include"): + # Remove single-line comments + line = line.split("//")[0].rstrip() + + # Remove multi-line comments + comment_start = line.find("/*") + if comment_start != -1: + comment_end = line.find("*/", comment_start) + if comment_end != -1: + line = line[:comment_start] + line[comment_end + 2:] + + # Add the cleaned include line + includes.append(line.rstrip()) + + if not includes: + return b"No includes found" + return ("\n".join(includes)).encode("utf-8") + + def as_markdown(self, filepath: None | str = None) -> str: + content = self.content.decode("utf-8") + if content == "No includes found": + return make_md_bullet_point(f"No includes found in {self._path}", 1) + md = format_cpp_code_as_markdown(content) + if self._description: + md = make_md_bullet_point(f"Description: {self._description}", 1) + "\n\n" + add_indentation(md, 1) + else: + md = add_indentation(md, 1) + return md + + def __str__(self) -> str: + return f"List of included libraries for: {self._path}" + diff --git a/.dotstop_extensions/test_references.py b/.dotstop_extensions/test_references.py new file mode 100644 index 0000000000..1adccf4e75 --- /dev/null +++ b/.dotstop_extensions/test_references.py @@ -0,0 +1,919 @@ +import pytest +import tempfile +from pathlib import Path +from unittest.mock import patch +from references import CPPTestReference, JSONTestsuiteReference, FunctionReference, ItemReference, IncludeListReference, ListOfTestCases +from validators import file_exists + + +@pytest.fixture +def sample_cpp_content(): + """Sample C++ test file content for testing.""" + return '''#include "test.h" + +TEST_CASE("basic_test") +{ + SECTION("section1") + { + CHECK(true); + + SECTION("nested_section") + { + CHECK(1 == 1); + } + } + + SECTION("section2") + { + CHECK(false); + } +} + +TEST_CASE("another_test") +{ + CHECK(2 == 2); +} +''' + +@pytest.fixture +def sample_testsuite_test(): + """Sample JSON testsuite content for testing.""" + return '''TEST_CASE("compliance tests from json.org") +{ + // test cases are from https://json.org/JSON_checker/ + + SECTION("expected failures") + { + for (const auto* filename : + { + TEST_DATA_DIRECTORY "/json_tests/fail1.json", + TEST_DATA_DIRECTORY "/json_tests/fail2.json", + TEST_DATA_DIRECTORY "/json_tests/fail3.json", + \\ TEST_DATA_DIRECTORY "/json_tests/fail4.json", + }) + { + CAPTURE(filename) + std::ifstream f(filename); + json _; + CHECK_THROWS_AS(_ = json::parse(f), json::parse_error&); + } + } +} +''' + +@pytest.fixture +def sample_hpp_content(): + """Sample content in the style of lexer.hpp""" + return '''template +class lexer_base +{ + // class body +}; + +template +class lexer : public lexer_base +{ + + private + bool dummy_function() + { + return my_function(); + } + + bool my_function() + { + // function body + } +}; +''' + +def create_temp_file(content, suffix='.txt'): + """Helper method to create temporary files for testing.""" + with tempfile.NamedTemporaryFile(mode='w', suffix=suffix, delete=False) as f: + f.write(content) + f.flush() + return Path(f.name) + +@pytest.fixture +def temp_cpp_file(sample_cpp_content): + """Create a temporary C++ file for testing.""" + temp_file = create_temp_file(sample_cpp_content, '.cpp') + yield temp_file + temp_file.unlink() + +@pytest.fixture +def temp_cpp_file_with_testsuite(sample_testsuite_test): + """Create a temporary C++ file with testsuite content.""" + temp_file = create_temp_file(sample_testsuite_test, '.cpp') + yield temp_file + temp_file.unlink() + +@pytest.fixture +def temp_hpp_file(sample_hpp_content): + """Create a temporary .hpp file for testing.""" + temp_file = create_temp_file(sample_hpp_content, '.hpp') + yield temp_file + temp_file.unlink() + +def test_init(): + """Test CPPTestReference initialization.""" + ref = CPPTestReference("test_section", "test.cpp") + assert ref._name == "test_section" + assert ref._path == Path("test.cpp") + +def test_type_classmethod(): + """Test the type class method.""" + assert CPPTestReference.type() == "cpp_test" + +def test_find_section_start_single_section(): + """Test finding a single section start.""" + lines = [ + '#include "test.h"\n', + '\n', + 'TEST_CASE("basic_test")\n', + '{\n', + ' SECTION("section1")\n', + ' {\n', + ' CHECK(true);\n', + ' }\n', + '}\n' + ] + ref = CPPTestReference("basic_test", "test.cpp") + start_index = ref.find_section_start(lines) + assert start_index == 2 + +def test_find_section_start_nested_section(): + """Test finding nested section start.""" + lines = [ + 'TEST_CASE("basic_test")\n', + '{\n', + ' SECTION("section1")\n', + ' {\n', + ' SECTION("nested")\n', + ' {\n', + ' CHECK(true);\n', + ' }\n', + ' }\n', + '}\n' + ] + ref = CPPTestReference("basic_test;section1", "test.cpp") + start_index = ref.find_section_start(lines) + assert start_index == 2 + +def test_find_section_start_no_bracket_following_quote(): + """Test finding section start in case of no section start.""" + lines = [ + 'TEST_CASE("basic_test"* doctest::skip())\n', + '{\n', + ' SECTION("section1")\n', + ' {\n', + ' CHECK(true);\n', + ' }\n', + '}\n' + ] + ref = CPPTestReference("basic_test", "test.cpp") + start_index = ref.find_section_start(lines) + assert start_index == 0 + +def test_find_section_start_not_found(): + """Test exception when section is not found.""" + lines = [ + 'TEST_CASE("basic_test")\n', + '{\n', + ' CHECK(true);\n', + '}\n' + ] + ref = CPPTestReference("nonexistent_section", "test.cpp") + with pytest.raises(ValueError, match="Start of section nonexistent_section not found"): + ref.find_section_start(lines) + +def test_find_section_end(): + """Test finding section end with matching braces.""" + lines = [ + 'SECTION("test")\n', + '{\n', + ' CHECK(true);\n', + '}\n', + 'other code\n' + ] + ref = CPPTestReference("test", "test.cpp") + end_index = ref.find_section_end(lines, 0) + assert end_index == 4 + +def test_find_section_end_indented_braces(): + """Test finding section end with indented braces.""" + lines = [ + ' SECTION("test")\n', + ' {\n', + ' CHECK(true);\n', + ' }\n', + 'other code\n' + ] + ref = CPPTestReference("test", "test.cpp") + end_index = ref.find_section_end(lines, 0) + assert end_index == 4 + +def test_find_section_end_no_opening_brace(): + """Test exception when no opening brace is found.""" + lines = [ + 'SECTION("test")\n' + ] + ref = CPPTestReference("test", "test.cpp") + with pytest.raises(ValueError, match="Section declaration is on the last line"): + ref.find_section_end(lines, 0) + +def test_find_section_end_invalid_pattern(): + """Test exception when opening brace pattern is invalid.""" + lines = [ + 'SECTION("test")\n', + 'invalid line\n' + ] + ref = CPPTestReference("test", "test.cpp") + with pytest.raises(ValueError, match="Section start line does not match expected pattern"): + ref.find_section_end(lines, 0) + +def test_find_section_end_no_closing_brace(): + """Test exception when no matching closing brace is found.""" + lines = [ + 'SECTION("test")\n', + '{\n', + ' CHECK(true);\n' + ] + ref = CPPTestReference("test", "test.cpp") + with pytest.raises(ValueError, match="Section end not found"): + ref.find_section_end(lines, 0) + +def test_remove_leading_whitespace_preserve_indentation(): + """Test whitespace removal while preserving indentation.""" + ref = CPPTestReference("test", "test.cpp") + # 4 - (tab+4) - 4 spaces here + text = " line1\n\t line2\n line3\n" + # 0 - 4 - 0 spaces here + expected = "line1\n line2\nline3\n" + result = ref.remove_leading_whitespace_preserve_indentation(text) + assert result == expected + +def test_remove_leading_whitespace_preserve_indentation_tabs(): + test_input = '''\t\t\tSECTION("empty object") +\t\t\t{ +\t\t\t\tCHECK(parser_helper("{}") == json(json::value_t::object)); +\t\t\t} +''' + + expected_output = '''SECTION("empty object") +{ + CHECK(parser_helper("{}") == json(json::value_t::object)); +} +''' + ref = CPPTestReference("test", "test.cpp") + result = ref.remove_leading_whitespace_preserve_indentation(test_input) + assert result == expected_output + +def test_get_section_integration(temp_cpp_file): + """Test complete section extraction.""" + ref = CPPTestReference("basic_test;section1", str(temp_cpp_file)) + section = ref.get_section() + assert 'TEST_CASE("basic_test")' not in section + assert 'SECTION("section1")' in section + assert 'CHECK(true)' in section + assert 'SECTION("nested_section")' in section + assert 'SECTION("section2")' not in section + +def test_get_section_file_not_found(): + """Test exception when file is not found.""" + ref = CPPTestReference("test", "nonexistent.cpp") + with pytest.raises(FileNotFoundError): + ref.get_section() + +def test_content_property(temp_cpp_file): + """Test content property returns bytes.""" + ref = CPPTestReference("basic_test", str(temp_cpp_file)) + content = ref.content + assert isinstance(content, bytes) + assert b'TEST_CASE("basic_test")' in content + assert b'SECTION("section2")' in content + assert b'TEST_CASE("another_test")' not in content + +def test_as_markdown(temp_cpp_file): + """Test markdown formatting.""" + ref = CPPTestReference("basic_test", str(temp_cpp_file)) + markdown = ref.as_markdown() + assert isinstance(markdown, str) + assert 'TEST_CASE("basic_test")' in markdown + +@pytest.mark.parametrize("section_name,expected_line", [ + ("basic_test", 2), + ("another_test", 20) +]) +def test_find_different_sections(sample_cpp_content, section_name, expected_line): + """Test finding different sections in the same file.""" + lines = sample_cpp_content.split('\n') + lines = [line + '\n' for line in lines] # Add back newlines + ref = CPPTestReference(section_name, "test.cpp") + start_index = ref.find_section_start(lines) + assert start_index == expected_line + +def test_nested_section_extraction(temp_cpp_file): + """Test extracting nested sections.""" + ref = CPPTestReference("basic_test;section1;nested_section", temp_cpp_file) + section = ref.get_section() + assert 'nested_section' in section + +def test_testsuite_json_loading(): + """Test TestsuiteReference initialization and type.""" + with patch.object(JSONTestsuiteReference, 'check_testsuite_file_is_used_by_cpp_test') as mock_check, \ + patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content: + + mock_check.return_value = None # Mock the validation check + mock_get_content.return_value = '["Unclosed array"' + + suite_ref = JSONTestsuiteReference("name", "path", ["/json_tests/fail2.json"], "description") + json = suite_ref.get_testsuite_content("/json_tests/fail2.json") + assert json == '["Unclosed array"' + +def test_json_testsuite_reference_content(temp_cpp_file_with_testsuite, sample_testsuite_test): + """Test JSONTestsuiteReference content property.""" + suite_ref = JSONTestsuiteReference("compliance tests from json.org;expected failures", str(temp_cpp_file_with_testsuite), ["/json_tests/fail2.json", "/json_tests/fail3.json"], "description") + + content = suite_ref.content + assert isinstance(content, bytes) + + decoded_content = content.decode('utf-8') + + relevant_section = '\n'.join(sample_testsuite_test.split('\n')[4:-2]) + + # content should include the section from the C++ test file and the JSON files + assert relevant_section in decoded_content + # "/json_tests/fail2.json" + assert '["Unclosed array"' in decoded_content + # "/json_tests/fail3.json" + assert '{unquoted_key: "keys must be quoted"}' in decoded_content + +def test_json_testsuite_reference_init_valid(): + """Test JSONTestsuiteReference initialization with valid parameters.""" + test_suite_paths = ["tests/data/json_tests/pass1.json", "tests/data/json_tests/pass2.json"] + description = "Test suite for valid JSON parsing" + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'check_testsuite_file_is_used_by_cpp_test') as mock_check: + + mock_get_content.side_effect = lambda path: f"content of {path}" + mock_check.return_value = None # Mock the validation check + + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, description) + + # Test initialization + assert ref._name == "test_section" + assert ref._path == Path("test.cpp") + assert ref._test_suite_paths == test_suite_paths + assert ref._description == description + + # Test that content was loaded for each path + assert len(ref._loaded_json_map) == 2 + assert ref._loaded_json_map["tests/data/json_tests/pass1.json"] == "content of tests/data/json_tests/pass1.json" + assert ref._loaded_json_map["tests/data/json_tests/pass2.json"] == "content of tests/data/json_tests/pass2.json" + + # Verify get_testsuite_content was called for each path + assert mock_get_content.call_count == 2 + +def test_json_testsuite_reference_init_invalid_paths_type(): + """Test JSONTestsuiteReference initialization with invalid test_suite_paths type.""" + with pytest.raises(ValueError, match="test_suite_paths must be a list of strings"): + JSONTestsuiteReference("test_section", "test.cpp", "not_a_list", "description") + +def test_type_classmethod_JSON_testsuite(): + """Test the type class method.""" + assert JSONTestsuiteReference.type() == "JSON_testsuite" + +def test_is_json_test_line_valid_cases(): + """Test is_json_test_line with valid JSON test lines.""" + # line from "json.org examples" + assert JSONTestsuiteReference.is_json_test_line(' std::ifstream f(TEST_DATA_DIRECTORY "/json.org/5.json");') + # line from "compliance tests from json.org" + assert JSONTestsuiteReference.is_json_test_line(' TEST_DATA_DIRECTORY "/json_tests/fail2.json",') + # line from "nst's JSONTestSuite (2)" + assert JSONTestsuiteReference.is_json_test_line('TEST_DATA_DIRECTORY "/nst_json_testsuite2/test_parsing/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json",') + + +def test_filter_other_test_data_lines_keeps_relevant_lines(): + """Test filter_other_test_data_lines keeps lines with relevant test suite paths.""" + test_suite_paths = ["json_tests/pass1.json", "json_tests/pass2.json"] + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'check_testsuite_file_is_used_by_cpp_test') as mock_check: + + mock_get_content.return_value = "sample json content" + mock_check.return_value = None # Mock the validation check + + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test description") + + input_text = '''TEST_CASE("test") +{ + // TEST_DATA_DIRECTORY "/json_tests/pass1.json", + // TEST_DATA_DIRECTORY "/json_tests/fail1.json", + TEST_DATA_DIRECTORY "/json_tests/pass2.json", + TEST_DATA_DIRECTORY "/json_tests/other.json", + CHECK(true); +}''' + +def test_get_single_json_as_markdown_small_content(): + """Test get_single_json_as_markdown with small JSON content for two files.""" + test_suite_paths = ["json_tests/small.json", "json_tests/large.json"] + + def mock_content_side_effect(path): + if path == "json_tests/small.json": + return '{"test": "value1"}' + elif path == "json_tests/large.json": + return 'test\n'*500 + return "" + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'check_testsuite_file_is_used_by_cpp_test') as mock_check: + + mock_get_content.side_effect = mock_content_side_effect + mock_check.return_value = None # Mock the validation check + + + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test description") + + result1 = ref.get_single_json_as_markdown("json_tests/small.json") + assert "- JSON Testsuite: json_tests/small.json" in result1 + assert "```json" in result1 + assert '{"test": "value1"}' in result1 + + result2 = ref.get_single_json_as_markdown("json_tests/large.json") + assert "- JSON Testsuite: json_tests/large.json" in result2 + assert '[Link to file]' in result2 + +def test_get_all_json_as_markdown(): + """Test get_all_json_as_markdown with multiple JSON files.""" + test_suite_paths = ["json_tests/file1.json", "json_tests/file2.json"] + + def mock_content_side_effect(path): + if path == "json_tests/file1.json": + return '{"name": "test1"}' + elif path == "json_tests/file2.json": + return '{"name": "test2"}' + return "" + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'check_testsuite_file_is_used_by_cpp_test') as mock_check: + + mock_get_content.side_effect = mock_content_side_effect + mock_check.return_value = None # Mock the validation check + + + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test description") + + result = ref.get_all_json_as_markdown() + + # Should contain both JSON files + assert "- JSON Testsuite: json_tests/file1.json" in result + assert "- JSON Testsuite: json_tests/file2.json" in result + + # Should contain both JSON contents + assert '{"name": "test1"}' in result + assert '{"name": "test2"}' in result + + # Should have proper separation between files + assert result.count("```json") == 2 + +def test_as_markdown(): + """Test as_markdown method with complete output structure.""" + test_suite_paths = ["json_tests/test1.json", "json_tests/test2.json"] + + def mock_content_side_effect(path): + if path == "json_tests/test1.json": + return '{"test": "value1"}' + elif path == "json_tests/test2.json": + return '{"test": "value2"}' + return "" + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'get_section') as mock_get_section: + + mock_get_content.side_effect = mock_content_side_effect + mock_get_section.return_value = '''TEST_CASE("test") +{ + TEST_DATA_DIRECTORY "/json_tests/test1.json", + TEST_DATA_DIRECTORY "/json_tests/test2.json", + CHECK(true); +}''' + + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test JSON files") + + result = ref.as_markdown() + + # Should contain description + assert "Description: Test JSON files" in result + + # Should contain JSON content + assert "JSON Testsuite: json_tests/test1.json" in result + assert "JSON Testsuite: json_tests/test2.json" in result + assert '{"test": "value1"}' in result + assert '{"test": "value2"}' in result + + # Should contain C++ test content + assert "cpp-test:" in result + assert "```cpp" in result + assert "CHECK(true);" in result + + # Should be indented (starts with tab) + assert result.startswith('\t') + + +def test_check_testsuite_file_is_used_by_cpp_test_valid(): + """Test check_testsuite_file_is_used_by_cpp_test with valid usage.""" + test_suite_paths = ["json_tests/pass1.json", "json_tests/pass2.json"] + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'get_section') as mock_get_section: + + mock_get_content.return_value = "sample json content" + mock_get_section.return_value = '''TEST_CASE("test") +{ + for (const auto* filename : + { + TEST_DATA_DIRECTORY "/json_tests/pass1.json", + TEST_DATA_DIRECTORY "/json_tests/pass2.json", + }) + { + CHECK(true); + } +}''' + + # Should not raise any exception + ref = JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test description") + assert ref._test_suite_paths == test_suite_paths + +def test_check_testsuite_file_is_used_by_cpp_test_missing_file(): + """Test check_testsuite_file_is_used_by_cpp_test with missing file reference.""" + test_suite_paths = ["json_tests/missing.json", "json_tests/pass1.json"] + + with patch.object(JSONTestsuiteReference, 'get_testsuite_content') as mock_get_content, \ + patch.object(JSONTestsuiteReference, 'get_section') as mock_get_section: + + mock_get_content.return_value = "sample json content" + mock_get_section.return_value = '''TEST_CASE("test") +{ + TEST_DATA_DIRECTORY "/json_tests/pass1.json", + CHECK(true); +}''' + + # Should raise ValueError for missing file + with pytest.raises(ValueError, match="JSON testsuite json_tests/missing.json is not used in the C\\+\\+ test file"): + JSONTestsuiteReference("test_section", "test.cpp", test_suite_paths, "Test description") + +def test_get_function_boundaries(): + lines = [ + 'template\n', + 'class lexer_base\n', + '{\n', + ' // class body\n', + '};\n', + '\n', + 'template\n', + 'class lexer : public lexer_base\n', + '{\n', + '\n', + ' private\n', + ' bool dummy_function()\n', + ' {\n', + ' return my_function();\n', + ' }\n', + '\n', + ' bool my_function()\n', + ' {\n', + ' // function body \n', + ' }\n', + '};\n' + ] + assert FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,1) == [16,19] + +def test_get_function_boundaries_with_multiline_declaration(): + lines = [ + 'template\n', + 'class lexer_base\n', + '{\n', + ' // class body\n', + '};\n', + '\n', + 'template\n', + 'class lexer : public lexer_base\n', + '{\n', + '\n', + ' private\n', + ' bool dummy_function()\n', + ' {\n', + ' return my_function();\n', + ' }\n', + '\n', + ' bool my_function(int: foo,', + ' bool: bar)\n', + ' {\n', + ' // function body \n', + ' }\n', + '};\n' + ] + assert FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,1) == [16,20] + +def test_get_function_boundaries_with_multiple_overloads(): + lines = [ + 'template\n', + 'class lexer_base\n', + '{\n', + ' // class body\n', + '};\n', + '\n', + 'template\n', + 'class lexer : public lexer_base\n', + '{\n', + '\n', + ' private\n', + ' bool dummy_function()\n', + ' {\n', + ' return my_function();\n', + ' }\n', + '\n', + ' bool my_function()\n', + ' {\n', + ' // function body \n', + ' }\n', + '\n', + ' void my_function()\n', + ' {\n', + ' // function body \n', + ' }\n', + '\n', + ' int my_function()\n', + ' {\n', + ' // function body \n', + ' }\n', + '};\n' + ] + assert FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,1) == [16,19] + assert FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,2) == [21,24] + assert FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,3) == [26,29] + with pytest.raises(ValueError, match="Could not locate 4th implementation of lexer::my_function in file foo."): + FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,4) + with pytest.raises(ValueError, match="Could not locate 123rd implementation of lexer::my_function in file foo."): + FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,123) + with pytest.raises(ValueError, match="Could not locate 11th implementation of lexer::my_function in file foo."): + FunctionReference.get_function_boundaries("foo","lexer::my_function",lines,11) + +def test_get_function_line_numbers(temp_hpp_file): + [a,b] = FunctionReference.get_function_line_numbers(str(temp_hpp_file),"lexer::my_function") + assert a == 16 + assert b == 19 + +def test_init_function_reference(temp_hpp_file): + ref = FunctionReference("lexer::my_function",str(temp_hpp_file)) + assert ref.code == b" bool my_function()\n {\n // function body \n }\n" + assert ref._name == "lexer::my_function" + assert ref.path == temp_hpp_file + assert ref._overload == 1 + +def test_faulty_init_ItemReference(): + with pytest.raises(RuntimeError, match = r"Error: Can't initialise empty ItemReference."): + item_reference = ItemReference([]) + +def test_init_ItemReference(): + item_reference = ItemReference(["Hallo","Welt"]) + assert item_reference._items == ["Hallo","Welt"] + +def test_file_exists(tmp_path): + root = tmp_path / "direx" + root.mkdir() + path_1 = root / "subfolder" + path_1.mkdir() + path_1_1 = path_1 / "test.yml" + path_1_1.write_text("test") + path_2 = root / "script.py" + path_2.write_text("print(\"Hallo, Welt\")") + files = [str(path_1),str(path_1_1),"foo.bar",str(path_2)] + + score, exceptions = file_exists({"files": files}) + + assert score == 2/4 + assert any(isinstance(exception,Warning) for exception in exceptions) + assert any(isinstance(exception,RuntimeError) for exception in exceptions) + +# ListOfTestCases tests +@pytest.fixture +def sample_unit_test_content(): + """Sample unit test file content for testing ListOfTestCases.""" + return '''TEST_CASE("basic arithmetic") +{ + SECTION("addition") + { + CHECK(1 + 1 == 2); + + SECTION("positive numbers") + { + CHECK(5 + 3 == 8); + } + } + + SECTION("multiplication") + { + CHECK(2 * 3 == 6); + } +} + +TEST_CASE("another test") +{ + CHECK(true); +} +''' + +@pytest.fixture +def temp_unit_test_file(sample_unit_test_content, tmp_path): + """Create a temporary unit test file.""" + test_file = tmp_path / "unit-sample.cpp" + test_file.write_text(sample_unit_test_content) + return test_file + +def test_list_of_test_cases_type_classmethod(): + """Test the type class method.""" + assert ListOfTestCases.type() == "list_of_test_cases" + +def test_compile_string(): + """Test compile_string static method.""" + # Test single item + result = ListOfTestCases.compile_string(["test_case"]) + assert result == "* test_case" + + # Test nested items + result = ListOfTestCases.compile_string(["test_case", "section1", "section2"]) + assert result == " * section2" + + # Test proper indentation + result = ListOfTestCases.compile_string(["test_case", "section"]) + assert result == " * section" + +def test_compile_string_empty_list(): + """Test compile_string with empty list raises error.""" + with pytest.raises(RuntimeError, match="Received empty structural list; nonempty list expected."): + ListOfTestCases.compile_string([]) + +def test_extract_quotation(): + """Test extract_quotation static method.""" + # Test basic extraction + result = ListOfTestCases.extract_quotation('TEST_CASE("my test")') + assert result == "my test" + + # Test with multiple quotes (should return first) + result = ListOfTestCases.extract_quotation('SECTION("section1") and "section2"') + assert result == "section1" + +def test_extract_quotation_no_quotes(): + """Test extract_quotation with no quotes raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation mark; none were detected."): + ListOfTestCases.extract_quotation("no quotes here") + +def test_extract_quotation_single_quote(): + """Test extract_quotation with single quote raises error.""" + with pytest.raises(RuntimeError, match="Expected quotation marks; only one was detected."): + ListOfTestCases.extract_quotation('single quote"') + +def test_remove_and_count_indent(): + """Test remove_and_count_indent static method.""" + # Test spaces + count, text = ListOfTestCases.remove_and_count_indent(" hello") + assert count == 4 + assert text == "hello" + + # Test tabs (4 spaces each) + count, text = ListOfTestCases.remove_and_count_indent("\t\thello") + assert count == 8 + assert text == "hello" + + # Test mixed tabs and spaces + count, text = ListOfTestCases.remove_and_count_indent("\t hello") + assert count == 6 + assert text == "hello" + + # Test no indentation + count, text = ListOfTestCases.remove_and_count_indent("hello") + assert count == 0 + assert text == "hello" + +def test_head_of_list(): + """Test head_of_list static method.""" + result = ListOfTestCases.head_of_list() + assert "## List of all unit-tests with test environments" in result + assert "TEST_CASEs" in result + assert "SECTIONs" in result + +def test_transform_test_file_to_test_name(): + """Test transform_test_file_to_test_name static method.""" + result = ListOfTestCases.transform_test_file_to_test_name("unit-example-test.cpp") + assert result == "test-example-test" + + result = ListOfTestCases.transform_test_file_to_test_name("unit-simple.cpp") + assert result == "test-simple" + +def test_extract_test_structure(temp_unit_test_file): + """Test extract_test_structure method.""" + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(temp_unit_test_file) + + # Should contain test cases and sections + assert "* basic arithmetic" in result + assert " * addition" in result + assert " * positive numbers" in result + assert " * multiplication" in result + assert "* another test" in result + +def test_extract_test_structure_empty_file(tmp_path): + """Test extract_test_structure with empty file.""" + empty_file = tmp_path / "empty.cpp" + empty_file.write_text("") + + list_ref = ListOfTestCases([]) + result = list_ref.extract_test_structure(empty_file) + assert result == "" + +def test_list_of_test_cases_init(): + """Test ListOfTestCases initialization.""" + test_files = ["tests/unit-test1.cpp", "tests/unit-test2.cpp"] + list_ref = ListOfTestCases(test_files, "custom.db", "custom_table") + + assert list_ref._test_files == test_files + assert list_ref._database == "custom.db" + assert list_ref._table == "custom_table" + +def test_list_of_test_cases_init_defaults(): + """Test ListOfTestCases initialization with default parameters.""" + test_files = ["tests/unit-test1.cpp"] + list_ref = ListOfTestCases(test_files) + + assert list_ref._test_files == test_files + assert list_ref._database == "artifacts/MemoryEfficientTestResults.db" + assert list_ref._table == "test_results" + +def test_str_method(): + """Test __str__ method.""" + list_ref = ListOfTestCases(["test_file"]) + assert str(list_ref) == "List of all unit-tests" +def test_include_list_init(): + ref = IncludeListReference("some/path.hpp", "my desc") + assert ref._path == Path("some/path.hpp") + assert ref._description == "my desc" + +def test_type_classmethod_include_list(): + assert IncludeListReference.type() == "include_list" + +def test_content_includes_found(): + content = '#include \n #include "local.h"\nint x = 0;\n' + temp = create_temp_file(content, suffix='.hpp') + try: + ref = IncludeListReference(str(temp), "desc") + data = ref.content + assert isinstance(data, bytes) + decoded = data.decode('utf-8') + assert '#include ' in decoded + assert '#include "local.h"' in decoded + finally: + temp.unlink() + +def test_content_no_includes(): + temp = create_temp_file('int x = 1;\n// nothing to include\n', suffix='.hpp') + try: + ref = IncludeListReference(str(temp)) + assert ref.content == b"No includes found" + finally: + temp.unlink() + +def test_content_file_not_found(): + ref = IncludeListReference("nonexistent_file_hopefully.hpp") + with pytest.raises(ReferenceError): + _ = ref.content + +def test_as_markdown_with_description(): + content = '#include \n#include "a.h"\n' + temp = create_temp_file(content, suffix='.hpp') + try: + ref = IncludeListReference(str(temp), "list of includes") + md = ref.as_markdown() + assert isinstance(md, str) + # starts with an indented bullet for description + assert md.startswith('\t- Description: list of includes') + assert '```cpp' in md + assert '#include ' in md + finally: + temp.unlink() + +def test_as_markdown_no_includes(): + temp = create_temp_file('void f();\n', suffix='.hpp') + try: + ref = IncludeListReference(str(temp)) + md = ref.as_markdown() + # should return a single indented bullet line about no includes + assert md.strip().startswith('- No includes found in') + finally: + temp.unlink() + +def test_str_include_list(): + ref = IncludeListReference("path/to/file.hpp") + assert str(ref) == f"List of included libraries for: {Path('path/to/file.hpp')}" diff --git a/.dotstop_extensions/validators.py b/.dotstop_extensions/validators.py new file mode 100644 index 0000000000..2f5321cfef --- /dev/null +++ b/.dotstop_extensions/validators.py @@ -0,0 +1,495 @@ +from typing import TypeAlias, Tuple, List +import os +import requests +import sqlite3 +import sys + +current_dir = os.getcwd() +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +from TSF.scripts.generate_list_of_tests import ListOfTestsGenerator +import hashlib +import json +import re +import subprocess + +yaml: TypeAlias = str | int | float | list["yaml"] | dict[str, "yaml"] + +def setup_environment_variables() -> dict[str, str]: + """ + Retrieves and validates the necessary environment variables for GitHub workflows. + Raises a RuntimeError if any required variables are missing. + """ + required_vars = ["GITHUB_TOKEN", "GITHUB_EVENT_NAME", "GITHUB_RUN_ID", "GITHUB_REPOSITORY", "GITHUB_SHA"] + environment = {var: os.getenv(var) for var in required_vars} + + missing_vars = [var for var, value in environment.items() if not value] + if missing_vars: + raise RuntimeError(f"Missing required environment variables: {', '.join(missing_vars)}") + + return environment + +def check_artifact_exists(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + # Setup environment variables using the helper function + try: + env = setup_environment_variables() + except RuntimeError as e: + return (0,[e]) + + github_token = env["GITHUB_TOKEN"] + github_event_name = env["GITHUB_EVENT_NAME"] + run_id = env["GITHUB_RUN_ID"] + repository = env["GITHUB_REPOSITORY"] + sha = env["GITHUB_SHA"] + + score = 0.0 + + # Validate configuration values + for key, value in configuration.items(): + if value not in {"include", "exclude"}: # Check if value is valid + warning = Warning(f"Invalid configuration value: '{value}' for key '{key}'. Valid values are 'include' or 'exclude'.") + return (0.0, [warning]) # If value is neither include nor exclude, return 0.0 with a warning + + # Determine the number of expected workflows based on the event type + if github_event_name != "pull_request": + configuration["dependency_review"] = "exclude" # Exclude dependency review if not a PR + configuration["check_amalgamation"] = "exclude" # Exclude check amalgamation if not a PR + + if github_event_name != "push": + configuration["publish_documentation"] = "exclude" # Exclude publish documentation if not a push to main + + num_expected_workflows = sum(1 for value in configuration.values() if value == "include") + + # If no workflows are expected, return a score of 1.0 with a warning + if num_expected_workflows == 0: + warning = Warning("No workflows to check, returning a score of 1.0.") + return (1.0, [warning]) + + # GitHub API URL to list artifacts for the current workflow run + url = f"https://api.github.com/repos/{repository}/actions/runs/{run_id}/artifacts" + + # Add authentication headers using the GitHub token + headers = { + "Authorization": f"Bearer {github_token}", + "Accept": "application/vnd.github+json" + } + + # Make the request to the GitHub API to fetch artifacts + response = requests.get(url, headers=headers) + + # Check for a successful response + if response.status_code != 200: + return (score, [RuntimeError(f"Failed to fetch artifacts: {response.status_code} - {response.text}")]) + + # Parse the JSON response + data = response.json() + artifacts_created_data = data.get("artifacts", []) + + # Extract artifact names + artifacts_created = [artifact["name"] for artifact in artifacts_created_data] + + # Check if artifacts for each workflow exist + for key, value in configuration.items(): + if value == "exclude": + continue # Skip excluded workflows + artifact_expected = f"{key}-{sha}" + if artifact_expected in artifacts_created: + score += 1 + + return (score/num_expected_workflows, []) + + +def https_response_time(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + """ + Validates the reachability of a website-reference. + This code is mostly copied from https://codethinklabs.gitlab.io/trustable/trustable/trudag/validators.html, + where this custom validator is presented as an example. + + notable difference: target response time is given in seconds, since we only check if the website is reachable. + """ + target = configuration.get("target_seconds", None) + urls = configuration.get("urls", None) + if not urls: + return (0.0, [ValueError("No url specified for https_response_time validator")]) + if not target: + return (0.0, [ValueError("No target time specified for https_response_time validator")]) + exceptions = [] + scores = [] + for url in urls: + try: + # in the reference website, an url comes together with https:// + response = requests.get(url,timeout=5*target) + except requests.exceptions.ConnectionError as e: + print(f"Critical error: target site {url} could not be reached.") + exceptions.append(e) + scores.append(0) + continue + except requests.exceptions.ReadTimeout as e: + print(f"Error: target site {url} could not be reached within {5*target} seconds.") + exceptions.append(e) + scores.append(0) + continue + # check whether target site is successfully called + if response.status_code == 200: + # if target site is successfully called, check if it is reached within target seconds + # recall that target/response.elapsed.microseconds>1/5, so score is accordingly refactored + score = (min(1e6*target/response.elapsed.microseconds, 1.0)-0.2)*1.25 + scores.append(score) + continue + scores.append(0) + return(sum(scores)/len(scores),exceptions) + + +def check_test_results(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + """ + Validates whether a certain test-case fails, or not. + """ + # get the test-names + raw_tests = configuration.get("tests",None) + if raw_tests is None: + return(1.0, [Warning("Warning: No tests specified! Assuming absolute trustability!")]) + # process test-names + tests = [] + for test in raw_tests: + tests.append(f"test-{str(test)}") + # read optional argument -- database name for the test report -- if specified + database = configuration.get("database", None) + if database is None: + # default value "MemoryEfficientTestResults.db" + database = "MemoryEfficientTestResults.db" + # check whether database containing test-results does exist + ubuntu_artifact = f"./artifacts/{database}" + if not os.path.exists(ubuntu_artifact): + return (0.0, [RuntimeError("The artifact containing the test data was not loaded correctly.")]) + # Ubuntu artifact is loaded correctly and test-results can be accessed. + # read optional argument -- table name for the test report -- if specified + table = configuration.get("table", None) + if table is None: + # default value "test_results" + table = "test_results" + # establish connection to database + try: + connector = sqlite3.connect(ubuntu_artifact) + cursor = connector.cursor() + # check whether our results can be accessed + cursor.execute("SELECT 1 FROM sqlite_master WHERE type='table' AND name=?", (table,)) + if cursor.fetchone() is None: + # if not, it is not trustable + return (0.0, [RuntimeError(f"Table {table} can not be loaded.")]) + # our result table can be read + # initialise variables + score = 0.0 + expected_tests = len(tests) + warnings = [] + for test in tests: + # check if data for test have been captured + command = f"SELECT COUNT(*) FROM {table} WHERE name = ?" + cnt = cursor.execute(command, (test)).fetchone()[0] + if cnt is None or cnt == 0: + # no data found -> assign trustability 0 and inform user + warnings.append(Warning(f"Could not find data for test {test}.")) + continue + # process data for test + command = f""" + SELECT + COALESCE(SUM(passed_cases), 0) AS total_passed, + COALESCE(SUM(failed_cases), 0) AS total_failed + FROM {table} + WHERE name = ? + """ + passed, failed = cursor.execute(command, (test,)).fetchone() + all = float(passed)+float(failed) + if all == 0: + # means that all test-cases have been skipped; could happen due to time-constraints + # and interrupted workflow. + # Assumption: A skipped test is trustable. + score += 1/expected_tests + warnings.append(Warning(f"All test cases of {test} were skipped.")) + else: + # at least one passed or failed test has been found + # observe that expected_tests = 0 if, and only if, tests = [], + # in which case the for-loop is skipped + score += float(passed)/(all*expected_tests) + # terminate database connection + # no commit necessary, since changes on database not intended + connector.close() + return(score, warnings) + except: + return (0.0, [RuntimeError("Fatal error during database evaluation.")]) + +def file_exists(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + # read list of files to be checked + files = configuration.get("files",None) + if files is None: + return (1.0, [Warning("No files to check, assuming trustability")]) + expected_files = len(files) + # if no files are to be checked, assume trustability + if expected_files == 0: + return (1.0, [Warning("No files to check, assuming trustability")]) + found_files = 0 + exceptions = [] + for file in files: + # check if path exists + if not os.path.exists(file): + exceptions.append(RuntimeError(f"Critical Error: The path {file} does not exist.")) + elif os.path.isdir(file): + # only files counted, warn user if directory is detected + exceptions.append(Warning(f"The path {file} links to a directory, but a file is expected.")) + else: + found_files += 1 if os.path.isfile(file) else 0 + return (found_files/expected_files, exceptions) + +def check_list_of_tests(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + # initialise the generator + generator = ListOfTestsGenerator() + db = configuration.get("database",None) + if db is not None: + generator.set_database(db) + table = configuration.get("table",None) + if table is not None: + generator.set_table(table) + sources = configuration.get("sources",None) + if sources is not None: + generator.set_sources(sources) + + # fetch the expected result + try: + with open("./TSF/docs/list_of_test_environments.md", 'r') as f: + expected = f.read() + if expected == generator.fetch_all_test_data(): + return(1.0,[]) + else: + return(0.0,[Exception("The expected list of test-cases does not coincide with the fetched list.")]) + except: + return(0.0,[Exception("An exception occurred when trying to compare the expected and the fetched list of tests.")]) + +def sha_checker(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + # get file of which the sha is to be calculated + file = configuration.get("binary", None) + # test input on validitiy + if file is None: + return (1.0, [Warning("No files to check the SHA-value for; assuming that everything is in order.")]) + elif not isinstance(file, str): + # type-errors are not tolerated + raise TypeError("The value of \"binary\" must be a string") + # get the expected sha + expected_sha = configuration.get("sha", None) + # test input on validitiy + if expected_sha is None: + return (1.0, [Warning("No expected SHA-value transmitted; assuming everything is in order.")]) + try: expected_sha = str(expected_sha) + except: raise TypeError("Can't convert the value of \"sha\" to a string.") + score = 0.0 + exceptions = [] + try: + my_sha = hashlib.sha256(open(file,"rb").read()).hexdigest() + score = 1.0 if str(my_sha) == expected_sha else 0.0 + except: + exceptions.append(RuntimeError(f"Can't calculate the SHA-value of {file}")) + return (score, exceptions) + +def check_issues(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + from datetime import datetime, timezone + # get relevant release date + release_date = configuration.get("release_date",None) + if release_date is None: + return (0.0, [RuntimeError("The release date of the most recent version of nlohmann/json is not specified.")]) + else: + try: + release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() + except: + return(0.0, [RuntimeError("The format of the release date is to be %Y-%m-%dT%H:%M:%SZ")]) + # get path to static list of misbehaviours + raw_known_misbehaviours = configuration.get("list_of_known_misbehaviours",None) + # parse list of inapplicable misbehaviours + inapplicable_misbehaviours = [] + if raw_known_misbehaviours is not None: + try: + # open the list of known misbehaviours + with open(raw_known_misbehaviours) as f: + lines = f.readlines() + except: + # if list can not be opened, assume that there is no list + lines = [] + # parse list of known misbehaviours + for line in lines: + entries = line.split('|') + try: + id = int(entries[0]) + except ValueError: + continue + if len(entries)>1 and entries[1].strip().upper()=="NO": + inapplicable_misbehaviours.append(id) + # parse raw list of open misbehaviours + try: + with open("raw_open_issues.json") as list_1: + all_open_issues = json.load(list_1) + relevant_open_issues = [all_open_issues[i].get("number",None) + for i in range(0,len(all_open_issues)) + if len(all_open_issues[i].get("labels",[]))!=0 + and (all_open_issues[i].get("labels"))[0].get("name") == "kind: bug" + ] + except: + return(0.0, [RuntimeError("The list of open issues could not be extracted.")]) + for issue in relevant_open_issues: + if issue not in inapplicable_misbehaviours and issue is not None: + return(0.0, []) + # parse raw list of closed misbehaviours + try: + with open("raw_closed_issues.json") as list_2: + all_closed_issues = json.load(list_2) + relevant_closed_issues = [all_closed_issues[i].get("number",None) + for i in range(0,len(all_closed_issues)) + if len(all_closed_issues[i].get("labels",[]))!=0 + and (all_closed_issues[i].get("labels"))[0].get("name") == "kind: bug" + and datetime.strptime(all_closed_issues[i].get("createdAt","2000-01-01T00:00:00Z"),"%Y-%m-%dT%H:%M:%SZ") + .replace(tzinfo=timezone.utc) + .timestamp() + >=release_time + ] + except: + return(0.0, [RuntimeError("The list of closed issues could not be extracted.")]) + for issue in relevant_closed_issues: + if issue not in inapplicable_misbehaviours and issue is not None: + return(0.0, []) + # If you are here, then there are no applicable misbehaviours. + return (1.0, []) + +def did_workflows_fail(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, [RuntimeError("The owner is not specified in the configuration of did_workflows_fail.")]) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, [RuntimeError("The repository is not specified in the configuration of did_workflows_fail.")]) + event = configuration.get("event","push") + url = f"https://github.com/{owner}/{repo}/actions?query=event%3A{event}+is%3Afailure" + branch = configuration.get("branch",None) + if branch is not None: + url += f"+branch%3A{branch}" + + try: + res = requests.get(url, timeout=30) # Add timeout to prevent hanging + except requests.exceptions.ConnectionError as e: + return (0.0, [RuntimeError(f"Connection error when accessing {url}: {e}")]) + except requests.exceptions.Timeout as e: + return (0.0, [RuntimeError(f"Timeout error when accessing {url}: {e}")]) + except requests.exceptions.RequestException as e: + return (0.0, [RuntimeError(f"Request error when accessing {url}: {e}")]) + + if res.status_code != 200: + return (0.0, [RuntimeError(f"The website {url} can not be successfully reached! Status code: {res.status_code}")]) + m = re.search(r'(\d+)\s+workflow run results', res.text, flags=re.I) + if m is None: + return (0.0, [RuntimeError("The number of failed workflows can not be found.")]) + if m.group(1).strip() != "0": + return (0.0, [Warning("There are failed workflows!")]) + return (1.0, []) + +def coveralls_reporter(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + owner = configuration.get("owner",None) + if owner is None: + return (0.0, [ValueError("The owner needs to be specified in the configuration for coveralls_reporter.")]) + repo = configuration.get("repo",None) + if repo is None: + return (0.0, [ValueError("The repository needs to be specified in the configuration for coveralls_reporter.")]) + branch = configuration.get("branch",None) + if branch is not None: + url = f"coveralls.io/github/{owner}/{repo}?branch={branch}.json" + else: + url = f"coveralls.io/github/{owner}/{repo}.json" + res = requests.get(url) + if res.status_code != 200: + return (0.0, [RuntimeError(f"Can not reach {url} to fetch the code coverage!")]) + res = json.loads(res.text) + try: + covered_lines = int(res.get("covered_lines","0")) + relevant_lines = int(res.get("relevant_lines","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for lines!")]) + try: + expected_line_coverage = float(configuration.get("line_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("line_coverage needs to be a floating point value!")]) + try: + digits = int(configuration.get("significant_decimal_digits","3")) + except ValueError: + return (0.0, [ValueError("significant_decimal_digits needs to be an integer value!")]) + if round(expected_line_coverage, digits) != round(covered_lines/relevant_lines * 100, digits): + return (0.0, [Warning("The line coverage has changed!")]) + try: + covered_branches = int(res.get("covered_branches","0")) + relevant_branches = int(res.get("relevant_branches","1")) + except ValueError: + return (0.0, [RuntimeError("Critical error in the coveralls api: Expecting integer values for branches!")]) + try: + expected_branch_coverage = float(configuration.get("branch_coverage","0.0")) + except ValueError: + return (0.0, [ValueError("branch_coverage needs to be a floating point value!")]) + if round(expected_branch_coverage, digits) != round(covered_branches/relevant_branches * 100, digits): + return (0.0, [Warning("The branch coverage has changed!")]) + return (1.0, []) + + + +def combinator(configuration: dict[str, yaml]) -> tuple[float, list[Exception | Warning]]: + validators = configuration.get("validators",None) + if validators is None: + return (1.0, [Warning("No validators were given, returning the void-validator.")]) + elif not isinstance(validators,list): + return (0.0, [TypeError("The list of validators must be given as list.")]) + scores = [] + exceptions = [] + weights = [] + for validator in validators: + # fetch configuration + validator_configuration = validator.get("configuration", None) + if not isinstance(validator_configuration,dict[str, yaml]): + return (0.0, [TypeError("Validator configuration must be an object.")]) + # fetch weight + weight = float(validator.get("weight",1.0)) + if weight<0: + return (0.0, [TypeError("Validator weights must be non-negative.")]) + weights.append(weight) + # fetch type + validator_type = validator.get("type", None) + if validator_type is None: + return (0.0, [TypeError("Missing validator type declaration.")]) + # execute validator + if validator_type == "check_artifact_exists": + validator_score, validator_errors = check_artifact_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "https_response_time": + validator_score, validator_errors = https_response_time(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_test_results": + validator_score, validator_errors = check_test_results(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "file_exists": + validator_score, validator_errors = file_exists(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "sha_checker": + validator_score, validator_errors = sha_checker(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "check_issues": + validator_score, validator_errors = check_issues(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "did_workflows_fail": + validator_score, validator_errors = did_workflows_fail(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + elif validator_type == "coveralls_reporter": + validator_score, validator_errors = coveralls_reporter(validator_configuration) + scores.append(validator_score) + exceptions.extend(validator_errors) + if sum(weights) == 0.0: + return (0.0, exceptions) + else: + return (sum(list(map(lambda x,y: x*y, scores, weights)))/sum(weights),exceptions) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e1e1040fc6..18402e515e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,6 +1,7 @@ # JSON for Modern C++ was originally written by Niels Lohmann. -# Since 2013, over 250 contributors have helped to improve the library. -# This CODEOWNERS file is only to make sure that @nlohmann is requested -# for a code review in case of a pull request. -* @nlohmann +# GitHub CODEOWNERS file is a simple way to automate review system on github, +# by automatically assigning owners to a pull request based on which files +# were modified. All directories should have a proper codeowner +# Syntax: https://help.github.com/articles/about-codeowners/ + diff --git a/.github/labels_bulk.json b/.github/labels_bulk.json new file mode 100644 index 0000000000..033c50dfc7 --- /dev/null +++ b/.github/labels_bulk.json @@ -0,0 +1,217 @@ +[ + { + "color": "fc2929", + "description": "", + "name": "kind: bug" + }, + { + "color": "207de5", + "description": "", + "name": "kind: enhancement/improvement" + }, + { + "color": "cc317c", + "description": "", + "name": "kind: question" + }, + { + "color": "cccccc", + "description": "the issue is a duplicate; refer to the linked issue instead", + "name": "solution: duplicate" + }, + { + "color": "e6e6e6", + "description": "the issue is not related to the library", + "name": "solution: invalid" + }, + { + "color": "000000", + "description": "the issue will not be fixed (either it is impossible or deemed out of scope)", + "name": "solution: wontfix" + }, + { + "color": "e11d21", + "description": "", + "name": "confirmed" + }, + { + "color": "eb6420", + "description": "related to MSVC", + "name": "platform: visual studio" + }, + { + "color": "fbca04", + "description": "please discuss the issue or vote for your favorite option", + "name": "state: please discuss" + }, + { + "color": "fbca04", + "description": "the issue needs help to proceed", + "name": "state: help needed" + }, + { + "color": "eb6420", + "description": "related to Android NDK", + "name": "platform: android" + }, + { + "color": "0e8a16", + "description": "", + "name": "documentation" + }, + { + "color": "eb6420", + "description": "related to MinGW", + "name": "platform: mingw" + }, + { + "color": "00bb00", + "description": "a fix for the issue has been proposed and waits for confirmation", + "name": "solution: proposed fix" + }, + { + "color": "5319e7", + "description": "BSON, CBOR, MessagePack, UBJSON", + "name": "aspect: binary formats" + }, + { + "color": "eb6420", + "description": "related to Intel compiler", + "name": "platform: icc" + }, + { + "color": "eb6420", + "description": "related to ARM architecture", + "name": "platform: arm" + }, + { + "color": "999999", + "description": "the issue has not been updated in a while and will be closed automatically soon unless it is updated", + "name": "state: stale" + }, + { + "color": "f9d0c4", + "description": "the author of the issue needs to provide more details", + "name": "state: needs more info" + }, + { + "color": "FFFFFF", + "description": "", + "name": "pinned" + }, + { + "color": "d4c5f9", + "description": "", + "name": "good first issue" + }, + { + "color": "444444", + "description": "", + "name": "release item: :sparkles: new feature" + }, + { + "color": "444444", + "description": "", + "name": "release item: :bug: bug fix" + }, + { + "color": "444444", + "description": "", + "name": "release item: :zap: improvement" + }, + { + "color": "444444", + "description": "", + "name": "release item: :hammer: further change" + }, + { + "color": "444444", + "description": "", + "name": "release item: :fire: deprecation" + }, + { + "color": "d665f2", + "description": "", + "name": "state: waiting for PR" + }, + { + "color": "d1260f", + "description": "Security vulnerability detected by WhiteSource", + "name": "security vulnerability" + }, + { + "color": "ed689f", + "description": "", + "name": "spam" + }, + { + "color": "D4C5F9", + "description": "It would be great if someone could review the proposed changes.", + "name": "review needed" + }, + { + "color": "006B75", + "description": "Please rebase your branch to origin/develop", + "name": "please rebase" + }, + { + "color": "0C6E99", + "description": "", + "name": "state: blocked" + }, + { + "color": "BFD4F2", + "description": "", + "name": "S" + }, + { + "color": "BFD4F2", + "description": "", + "name": "M" + }, + { + "color": "BFD4F2", + "description": "", + "name": "L" + }, + { + "color": "93E0B0", + "description": "", + "name": "CI" + }, + { + "color": "5A8E44", + "description": "", + "name": "tests" + }, + { + "color": "563241", + "description": "", + "name": "CMake" + }, + { + "color": "0366d6", + "description": "Pull requests that update a dependency file", + "name": "dependencies" + }, + { + "color": "444444", + "description": "A feature that should be discussed for the next major release", + "name": "🔮 V4 candidate" + }, + { + "color": "000000", + "description": "Pull requests that update GitHub Actions code", + "name": "github_actions" + }, + { + "color": "2b67c6", + "description": "Pull requests that update Python code", + "name": "python" + }, + { + "color": "623B7E", + "description": "we cannot reproduce the issue", + "name": "state: cannot reproduce" + } +] \ No newline at end of file diff --git a/.github/requirements.in b/.github/requirements.in new file mode 100644 index 0000000000..8fe024885e --- /dev/null +++ b/.github/requirements.in @@ -0,0 +1,4 @@ +pytest==8.4.1 +colorama>=0.4 +exceptiongroup>=1 +tomli>=1 \ No newline at end of file diff --git a/.github/requirements.txt b/.github/requirements.txt new file mode 100644 index 0000000000..7290b80e4d --- /dev/null +++ b/.github/requirements.txt @@ -0,0 +1,72 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --generate-hashes /workspaces/json/.github/requirements.in +# +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via -r /workspaces/json/.github/requirements.in +exceptiongroup==1.3.0 \ + --hash=sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10 \ + --hash=sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88 + # via -r /workspaces/json/.github/requirements.in +iniconfig==2.1.0 \ + --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ + --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 + # via pytest +packaging==25.0 \ + --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ + --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f + # via pytest +pluggy==1.6.0 \ + --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ + --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 + # via pytest +pygments==2.19.2 \ + --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ + --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b + # via pytest +pytest==8.4.1 \ + --hash=sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7 \ + --hash=sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c + # via -r /workspaces/json/.github/requirements.in +tomli==2.2.1 \ + --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ + --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ + --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \ + --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \ + --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \ + --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \ + --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \ + --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \ + --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \ + --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \ + --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \ + --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \ + --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \ + --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \ + --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \ + --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \ + --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \ + --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \ + --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \ + --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \ + --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \ + --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \ + --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \ + --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \ + --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \ + --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \ + --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \ + --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \ + --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \ + --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \ + --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ + --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 + # via -r /workspaces/json/.github/requirements.in +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 + # via exceptiongroup diff --git a/.github/workflows/SME_review_checker.yml b/.github/workflows/SME_review_checker.yml new file mode 100644 index 0000000000..49d7f620e7 --- /dev/null +++ b/.github/workflows/SME_review_checker.yml @@ -0,0 +1,96 @@ +name: SME Review Checker + +on: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string + +permissions: + contents: read + pull-requests: read + +jobs: + check-SME-review: + runs-on: ubuntu-latest + + steps: + - name: Harden Runner + uses: step-security/harden-runner@c6295a65d1254861815972266d5933fd6e532bdf # v2.11.1 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: Get changed files + id: changed-files + run: | + # Get the base branch + BASE_BRANCH="${{ github.event.pull_request.base.ref || 'main' }}" + + # Get all changed files in the PR + CHANGED_FILES=$(git diff --name-only origin/$BASE_BRANCH...HEAD) + + # Save changed files to output + echo "files<> $GITHUB_OUTPUT + echo "$CHANGED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Check if supported statements are scored + run: | + # Read the changed files + CHANGED_FILES="${{ steps.changed-files.outputs.files }}" + + # Process each changed file + while IFS= read -r file; do + # Skip empty lines + if [[ -z "$file" ]]; then + continue + fi + + echo "Checking file: $file" + + # Check if file is in TSF/trustable folder and ends with .md + if [[ "$file" == TSF/trustable/* && "$file" == *.md ]]; then + # Extract filename without path and extension + filename=$(basename "$file" .md) + + # Skip README files + if [[ "$filename" == "README" ]]; then + continue + fi + + echo "Checking TSF trustable file: $file (filename: $filename)" + + # Check if filename pattern exists in .dotstop.dot + if grep -q "\"$filename\" -> " .dotstop.dot; then + echo " Found reference in .dotstop.dot for: $filename" + + # Check if the file contains "score:" substring + if [[ -f "$file" ]] && grep -q "score:" "$file"; then + echo "ERROR: $file - Error: supported statements shall not be scored" + exit 1 + fi + else + echo "No reference found in .dotstop.dot for: $filename" + fi + fi + done <<< "$CHANGED_FILES" + + echo "All changed TSF items passed validation" + + - name: Generate artifact + run: | + mkdir -p SME_review_checker + echo "SME review checker processed for ${{ inputs.artifact_id }}" > SME_review_checker/SME_review_checker.txt + + - name: Upload SME review checker artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: SME_review_checker/ diff --git a/.github/workflows/check_amalgamation.yml b/.github/workflows/check_amalgamation.yml index 906a5c5b72..b4b6174c06 100644 --- a/.github/workflows/check_amalgamation.yml +++ b/.github/workflows/check_amalgamation.yml @@ -1,7 +1,12 @@ name: "Check amalgamation" on: - pull_request: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string permissions: contents: read @@ -48,7 +53,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: tools - ref: develop + ref: main - name: Install astyle run: | @@ -74,3 +79,15 @@ jobs: ${{ github.workspace }}/venv/bin/astyle --project=tools/astyle/.astylerc --suffix=orig $(find docs/examples include tests -type f \( -name '*.hpp' -o -name '*.cpp' -o -name '*.cu' \) -not -path 'tests/thirdparty/*' -not -path 'tests/abi/include/nlohmann/*' | sort) echo Check find $MAIN_DIR -name '*.orig' -exec false {} \+ + + - name: Generate amalgamation artifact + run: | + echo "Generating amalgamation artifact..." + mkdir -p check_amalgamation + echo "Amalgamation processed for ${{ inputs.artifact_id }}" > check_amalgamation/check_amalgamation.txt + + - name: Upload amalgamation artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: check_amalgamation/ \ No newline at end of file diff --git a/.github/workflows/cifuzz.yml b/.github/workflows/cifuzz.yml deleted file mode 100644 index 256d4986db..0000000000 --- a/.github/workflows/cifuzz.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: CIFuzz -on: [pull_request] - -permissions: - contents: read - -jobs: - Fuzzing: - runs-on: ubuntu-latest - steps: - - name: Harden Runner - uses: step-security/harden-runner@c6295a65d1254861815972266d5933fd6e532bdf # v2.11.1 - with: - egress-policy: audit - - - name: Build Fuzzers - id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@57fe4475324c5506adbfecdcdd2917f65c86ee9e # master - with: - oss-fuzz-project-name: 'json' - dry-run: false - language: c++ - - name: Run Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@57fe4475324c5506adbfecdcdd2917f65c86ee9e # master - with: - oss-fuzz-project-name: 'json' - fuzz-seconds: 300 - dry-run: false - language: c++ - - name: Upload Crash - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: failure() && steps.build.outcome == 'success' - with: - name: artifacts - path: ./out/artifacts diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b7af3212bd..356dd3efc9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,15 +1,12 @@ name: "Code scanning - action" on: - push: - branches: - - develop - - master - - release/* - pull_request: - schedule: - - cron: '0 19 * * 1' - workflow_dispatch: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string concurrency: group: ${{ github.workflow }}-${{ github.ref || github.run_id }} @@ -17,7 +14,7 @@ concurrency: permissions: contents: read - + jobs: CodeQL-Build: @@ -47,3 +44,15 @@ jobs: - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@fc7e4a0fa01c3cca5fd6a1fddec5c0740c977aa2 # v3.28.14 + + - name: Generate codeql artifact + run: | + echo "Generating codeql artifact..." + mkdir -p codeql + echo "codeql processed for ${{ inputs.artifact_id }}" > codeql/codeql.txt + + - name: Upload codeql artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: codeql/ diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 70f08cee36..ed2302a095 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -7,7 +7,14 @@ # # Source repository: https://github.com/actions/dependency-review-action name: 'Dependency Review' -on: [pull_request] + +on: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string permissions: contents: read @@ -25,3 +32,16 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: 'Dependency Review' uses: actions/dependency-review-action@ce3cf9537a52e8119d91fd484ab5b8a807627bf8 # v4.6.0 + + + - name: Generate dependency_review artifact + run: | + echo "Generating Dependency Review artifact..." + mkdir -p dependency_review + echo "dependency review processed for ${{ inputs.artifact_id }}" > dependency_review/dependency_review.txt + + - name: Upload dependency_review artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: dependency_review/ diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 0000000000..08a1835ee8 --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,31 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Documentation Cleanup + +permissions: + contents: read + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight UTC + +jobs: + docs-cleanup: + permissions: + contents: write + pages: write + id-token: write + uses: eclipse-score/cicd-workflows/.github/workflows/docs-cleanup.yml@a370c9723b2e935e940dcf0f0d8981f7aeb3b33f # main + secrets: + token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000000..04b35074ad --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,178 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# this workflow is copied from eclipse-score/cicd-workflows/.github/workflows/publish_documentation.yml +# and modified to download the trudag report artifact before building the documentation + +name: Documentation Build + +on: + workflow_call: + inputs: + retention-days: + description: "Number of days to retain the artifact" + required: false + default: 1 + type: number + workflow-version: + description: "Version or branch to checkout" + required: false + default: "main" + type: string + bazel-target: + description: "The Bazel target to build (e.g., //docs:github_pages)" + required: false + type: string + default: "//docs:github_pages" + deployment_type: + description: "Type of deployment: legacy or workflow" + type: string + required: false + default: "workflow" + +permissions: + contents: read + +jobs: + docs-build: + name: Build Documentation + runs-on: ${{ vars.REPO_RUNNER_LABELS && fromJSON(vars.REPO_RUNNER_LABELS) || 'ubuntu-latest' }} + permissions: + pull-requests: write + steps: + - name: Checkout repository (Handle all events) + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ github.head_ref || github.event.pull_request.head.ref || github.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + + - name: Checkout action + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + # current repo name + repository: eclipse-score/cicd-workflows + ref: ${{ inputs.workflow-version }} + path: ./cicd-workflows + + - name: Setup Bazel with shared caching + uses: bazel-contrib/setup-bazel@e8776f58fb6a6e9055cbaf1b38c52ccc5247e9c4 # 0.14.0 + with: + disk-cache: true + repository-cache: true + bazelisk-cache: true + + - name: Install Graphviz + uses: eclipse-score/apt-install@bd30e2e74a4850389719cb8c3e312bb26aada4e0 # main + with: + packages: graphviz + cache: false + + # only this step is different from the original workflow + - name: Download trudag report artifact + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: trudag-report-${{ github.event.pull_request.head.sha || github.sha }} + path: TSF/docs/generated/ + + - name: Build documentation + run: | + bazel run ${{ inputs.bazel-target }} + tar -cf github-pages.tar _build + + - name: Upload documentation artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: github-pages-${{ github.event.pull_request.head.sha || github.sha }} + path: github-pages.tar + retention-days: ${{ inputs.retention-days }} + if-no-files-found: error + + docs-deploy: + name: Deploy Documentation to GitHub Pages + runs-on: ${{ vars.REPO_RUNNER_LABELS && fromJSON(vars.REPO_RUNNER_LABELS) || 'ubuntu-latest' }} + needs: docs-build + permissions: + pages: write + id-token: write + contents: write + pull-requests: write + steps: + - name: Ensure gh-pages branch exists with initial files + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -e + if ! git ls-remote --exit-code --heads "https://x-access-token:${GH_TOKEN}@github.com/${REPO}.git" gh-pages; then + echo "gh-pages branch does not exist. Creating it..." + git clone --depth=1 "https://x-access-token:${GH_TOKEN}@github.com/${REPO}.git" repo + cd repo + git fetch origin main --depth=1 + AUTHOR_NAME=$(git log origin/main -1 --pretty=format:'%an') + AUTHOR_EMAIL=$(git log origin/main -1 --pretty=format:'%ae') + git config user.name "$AUTHOR_NAME" + git config user.email "$AUTHOR_EMAIL" + echo "Using commit identity: $AUTHOR_NAME <$AUTHOR_EMAIL>" + + git checkout --orphan gh-pages + git rm -rf . || true + REPO_NAME=$(basename "${REPO}") + OWNER_NAME="${REPO%%/*}" + + touch versions.json + echo "[" > versions.json + echo " {" >> versions.json + echo " \"version\": \"main\"," >> versions.json + echo " \"url\": \"https://${OWNER_NAME}.github.io/${REPO_NAME}/main/\"" >> versions.json + echo " }" >> versions.json + echo "]" >> versions.json + + touch index.html + echo '' > index.html + echo '' >> index.html + echo '' >> index.html + echo ' ' >> index.html + echo ' ' >> index.html + echo ' Redirecting...' >> index.html + echo '' >> index.html + echo '' >> index.html + echo '

If you are not redirected, click here.

' >> index.html + echo '' >> index.html + echo '' >> index.html + + touch .nojekyll + git add versions.json index.html .nojekyll + git commit -m "Initialize gh-pages branch with versions.json and index.html" + git push origin gh-pages + cd .. + rm -rf repo + else + echo "gh-pages branch exists. Skipping creation." + fi + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Download documentation artifact + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: github-pages-${{ github.event.pull_request.head.sha || github.sha }} + + - name: Untar documentation artifact + run: mkdir -p extracted_docs && tar -xf github-pages.tar -C extracted_docs + + - name: Deploy 🚀 + id: pages-deployment + uses: eclipse-score/cicd-workflows/.github/actions/deploy-versioned-pages@a370c9723b2e935e940dcf0f0d8981f7aeb3b33f # main + with: + source_folder: extracted_docs/_build + deployment_type: ${{ inputs.deployment_type }} \ No newline at end of file diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index a71bd0f495..ea5a48babc 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -1,13 +1,63 @@ name: "Pull Request Labeler" on: - pull_request_target: - types: [opened, synchronize] + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string permissions: contents: read - + jobs: + clone_missing_labels: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + steps: + - name: Checkout action + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Add labels from labels_bulk.json + run: | + # Path to the static JSON file + LABELS_FILE=".github/labels_bulk.json" + + # Determine the current repository (target repository) + TARGET_REPO="${GITHUB_REPOSITORY}" + + echo "Cloning labels from ${LABELS_FILE} to $TARGET_REPO..." + + # Load labels from the static JSON file + SOURCE_LABELS=$(cat "$LABELS_FILE") + + # Fetch labels from the target repository for comparison + TARGET_LABELS=$(gh label list --repo "$TARGET_REPO" --json name --limit 1000) + + # Loop through all labels in the static JSON file + echo "$SOURCE_LABELS" | jq -c '.[]' | while read -r label; do + LABEL_NAME=$(echo "$label" | jq -r '.name') + LABEL_COLOR=$(echo "$label" | jq -r '.color') + LABEL_DESCRIPTION=$(echo "$label" | jq -r '.description') + + # Check if the label already exists in the target repository + if ! echo "$TARGET_LABELS" | jq -e --arg NAME "$LABEL_NAME" '.[] | select(.name == $NAME)' > /dev/null; then + # Create the label if it doesn't exist + echo "Creating label: $LABEL_NAME..." + gh label create "$LABEL_NAME" --repo "$TARGET_REPO" --force --color "$LABEL_COLOR" --description "$LABEL_DESCRIPTION" + else + echo "Label '$LABEL_NAME' already exists in $TARGET_REPO. Skipping..." + fi + done + + echo "All labels cloned successfully!" + label: permissions: contents: read @@ -24,3 +74,15 @@ jobs: - uses: srvaroa/labeler@e216fb40e2e6d3b17d90fb1d950f98bee92f65ce # master env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - name: Generate label artifact + run: | + echo "Generating label artifact..." + mkdir -p labeler + echo "Labels processed for ${{ inputs.artifact_id }}" > labeler/labeler.txt + + - name: Upload label artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: labeler/ \ No newline at end of file diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml deleted file mode 100644 index dc99a2dde1..0000000000 --- a/.github/workflows/macos.yml +++ /dev/null @@ -1,120 +0,0 @@ -name: macOS - -on: - push: - branches: - - develop - - master - - release/* - pull_request: - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: -# macos-11 is deprecated -# macos-11: -# runs-on: macos-11 -# strategy: -# matrix: -# xcode: ['11.7', '12.4', '12.5.1', '13.0'] -# env: -# DEVELOPER_DIR: /Applications/Xcode_${{ matrix.xcode }}.app/Contents/Developer -# -# steps: -# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 -# - name: Run CMake -# run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_FastTests=ON -# - name: Build -# run: cmake --build build --parallel 10 -# - name: Test -# run: cd build ; ctest -j 10 --output-on-failure - -# macos-12 is deprecated (https://github.com/actions/runner-images/issues/10721) -# macos-12: -# runs-on: macos-12 # https://github.com/actions/runner-images/blob/main/images/macos/macos-12-Readme.md -# strategy: -# matrix: -# xcode: ['13.1', '13.2.1', '13.3.1', '13.4.1', '14.0', '14.0.1', '14.1'] -# env: -# DEVELOPER_DIR: /Applications/Xcode_${{ matrix.xcode }}.app/Contents/Developer -# -# steps: -# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 -# - name: Run CMake -# run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_FastTests=ON -# - name: Build -# run: cmake --build build --parallel 10 -# - name: Test -# run: cd build ; ctest -j 10 --output-on-failure - - macos-13: - runs-on: macos-13 # https://github.com/actions/runner-images/blob/main/images/macos/macos-13-Readme.md - strategy: - matrix: - xcode: ['14.1', '14.2', '14.3', '14.3.1', '15.0.1', '15.1', '15.2'] - env: - DEVELOPER_DIR: /Applications/Xcode_${{ matrix.xcode }}.app/Contents/Developer - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_FastTests=ON - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 --output-on-failure - - macos-14: - runs-on: macos-14 # https://github.com/actions/runner-images/blob/main/images/macos/macos-14-Readme.md - strategy: - matrix: - xcode: ['15.3', '15.4'] - env: - DEVELOPER_DIR: /Applications/Xcode_${{ matrix.xcode }}.app/Contents/Developer - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_FastTests=ON - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 --output-on-failure - - macos-15: - runs-on: macos-15 # https://github.com/actions/runner-images/blob/main/images/macos/macos-15-Readme.md - strategy: - matrix: - xcode: ['16.0', '16.1', '16.2'] - env: - DEVELOPER_DIR: /Applications/Xcode_${{ matrix.xcode }}.app/Contents/Developer - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_FastTests=ON - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 --output-on-failure - - xcode_standards: - runs-on: macos-latest - strategy: - matrix: - standard: [11, 14, 17, 20, 23, 26] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DJSON_TestStandards=${{ matrix.standard }} - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 --output-on-failure diff --git a/.github/workflows/parent-workflow.yml b/.github/workflows/parent-workflow.yml new file mode 100644 index 0000000000..775d88e6c0 --- /dev/null +++ b/.github/workflows/parent-workflow.yml @@ -0,0 +1,165 @@ +name: Parent Workflow + +on: + pull_request: + push: + branches: + - main + schedule: + - cron: '0 0 * * *' # Runs daily at midnight UTC + +permissions: + contents: read + +jobs: + labeler: + permissions: + contents: read + pages: write + pull-requests: write + name: Run Labeler Workflow + uses: ./.github/workflows/labeler.yml + with: + artifact_id: "labeler-${{ github.sha }}" + + SME_review_checker: + permissions: + contents: read + pull-requests: read + name: Run SME_review_checker Workflow + if: ${{ github.event_name == 'pull_request' }} # only run SME_review_checker for PRs + uses: ./.github/workflows/SME_review_checker.yml + with: + artifact_id: "SME_review_checker-${{ github.sha }}" + + check_amalgamation: + name: Run Amalgamation Workflow + if: ${{ github.event_name == 'pull_request' }} # only run check_amalgamation for PRs + uses: ./.github/workflows/check_amalgamation.yml + with: + artifact_id: "check_amalgamation-${{ github.sha }}" + + test_trudag_extensions: + name: Run Test Trudag Extensions Workflow + uses: ./.github/workflows/test_trudag_extensions.yml + with: + artifact_id: "test_trudag_extensions-${{ github.sha }}" + + codeql: + permissions: + contents: read + security-events: write + name: Run Codeql analysis Workflow + uses: ./.github/workflows/codeql-analysis.yml + with: + artifact_id: "codeql-${{ github.sha }}" + + ubuntu: + name: Run Ubuntu Workflow + permissions: + contents: write + needs: [codeql] # Error if CodeQL and Ubuntu triggered at the same time due to conflicting priorities + uses: ./.github/workflows/ubuntu.yml + with: + artifact_id: "ubuntu-${{ github.sha }}" + + dependency_review: + name: Run dependency_review Workflow + if: ${{ github.event_name == 'pull_request' }} # only run dependency_review for PRs + uses: ./.github/workflows/dependency-review.yml + with: + artifact_id: "dependency_review-${{ github.sha }}" + + collect_artifacts_pr: + name: "Collect Results & Deploy (PR)" + if: github.event_name == 'pull_request' + needs: [labeler, SME_review_checker, check_amalgamation, test_trudag_extensions, dependency_review, codeql, ubuntu] + runs-on: ubuntu-latest + strategy: + matrix: + target: [labeler, SME_review_checker, check_amalgamation, test_trudag_extensions, dependency_review, codeql, ubuntu] + + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Check child workflow results + run: | + echo "=== Checking Child Workflow Results ===" + result="${{ needs[matrix.target].result }}" + echo "${{ matrix.target }} workflow result: $result" + + if [[ "$result" != "success" ]]; then + echo "❌ ${{ matrix.target }} workflow failed! Exiting..." + exit 1 + fi + echo "✅ Child workflows completed successfully!" + env: + current_workflow: ${{ matrix.target }} + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: "${{ matrix.target }}-${{ github.sha }}" + path: artifacts/ + + collect_artifacts_non_pr: + name: "Collect Results & Deploy (Non-PR)" + if: github.event_name != 'pull_request' + needs: [labeler, test_trudag_extensions, codeql, ubuntu] # no check_amalgamation, dependency_review or SME_review_checker if non PR + runs-on: ubuntu-latest + strategy: + matrix: + target: [labeler, test_trudag_extensions, codeql, ubuntu] + + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Check child workflow results + run: | + echo "=== Checking Child Workflow Results ===" + result="${{ needs[matrix.target].result }}" + echo "${{ matrix.target }} workflow result: $result" + + if [[ "$result" != "success" ]]; then + echo "❌ ${{ matrix.target }} workflow failed! Exiting..." + exit 1 + fi + echo "✅ Child workflows completed successfully!" + env: + current_workflow: ${{ matrix.target }} + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: "${{ matrix.target }}-${{ github.sha }}" + path: artifacts/ + + test_publish_documentation: + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + security-events: read + name: Test publish_documentation Workflow + if: github.event_name == 'pull_request' # Whenever on Pull-request, test publication + needs: [collect_artifacts_pr] + uses: ./.github/workflows/test_publication.yml + with: + artifact_id: "ubuntu-${{ github.sha }}" + + publish_documentation: + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + security-events: read + name: Run publish_documentation Workflow + if: github.event_name == 'push' # Publish documentation should only run on push to main + needs: [collect_artifacts_non_pr] + uses: ./.github/workflows/publish_documentation.yml + with: + artifact_id: "ubuntu-${{ github.sha }}" diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index c0e85ab105..2fa89d4ac8 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -1,46 +1,180 @@ -name: Publish documentation +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Documentation -# publish the documentation on every merge to develop branch on: - push: - branches: - - develop - paths: - - docs/mkdocs/** - - docs/examples/** - workflow_dispatch: - -# we don't want to have concurrent jobs, and we don't want to cancel running jobs to avoid broken publications -concurrency: - group: documentation - cancel-in-progress: false + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts to be loaded' + required: true + type: string permissions: contents: read jobs: - publish_documentation: + run-trudag: + runs-on: ubuntu-latest permissions: contents: write + pages: write + pull-requests: write + id-token: write - if: github.repository == 'nlohmann/json' - runs-on: ubuntu-22.04 steps: - - name: Harden Runner - uses: step-security/harden-runner@c6295a65d1254861815972266d5933fd6e532bdf # v2.11.1 + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Load ubuntu-artifact + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - egress-policy: audit + name: ${{ inputs.artifact_id }} + path: artifacts/ + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11.2' + + # trudag version 2025.8.5 + - name: Install trudag and dependencies + run: | + sudo apt-get update + sudo apt-get install -y graphviz + pip install git+https://gitlab.com/CodethinkLabs/trustable/trustable@9957f12171cb898d83df5ae708fdba0a38fece2e + + - name: Install tools + run: | + sudo apt-get update + sudo apt-get install -y jq + sudo apt install gh -y + + - name: Authenticate with GitHub CLI + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + - name: Get current branch name + id: get_branch + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + PR_BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + GITHUB_REF_SAFE: ${{ github.ref }} + run: | + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # Use the branch from the pull request safely via environment variable + current_branch="$PR_BRANCH_NAME" + else + # Remove the "refs/heads/" prefix from GITHUB_REF safely + current_branch="${GITHUB_REF_SAFE#refs/heads/}" + fi + + # Write the branch name to $GITHUB_ENV securely + echo "branch_name=${current_branch}" >> "$GITHUB_ENV" + + - name: Fetch data storage branch + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git fetch --all + git checkout save_historical_data && git pull || git checkout -b save_historical_data + + - name: Fetch open issues labelled as bug from nlohmann/json + id: fetch_issues + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # Define variables + REPO="nlohmann/json" # Target repository + OUTPUT_FILE="TSF/misbehaviours.md" - - name: Install virtual environment - run: make install_venv -C docs/mkdocs + # Check if the target repository is reachable + if ! gh repo view "$REPO" --json name -q .name; then + echo "Could not reach the target repository ($REPO). Aborting workflow." + exit 1 + fi - - name: Build documentation - run: make build -C docs/mkdocs + # Fetch open issues from the nlohmann/json repository + gh issue list --repo "$REPO" --state open --limit 10000 --json number,title,state,createdAt,url,labels > raw_open_issues.json - - name: Deploy documentation - uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 + # Fetch closed issues from the nlohmann/json repository + gh issue list --repo "$REPO" --state closed --limit 10000 --json number,title,state,createdAt,url,labels > raw_closed_issues.json + + # print the list of misbehaviours + python3 TSF/scripts/generate_list_of_misbehaviours.py > $OUTPUT_FILE + + # Push misbehaviours file to save_historical_data branch + git add TSF/misbehaviours.md + git commit -m "Updated issues list" || echo "No changes to commit" + git push origin save_historical_data && git pull + + - name: Load persistent data + run: | + if ! git ls-tree --name-only origin/save_historical_data TSF/TrustableScoring.db | grep TSF/TrustableScoring.db; then + mkdir -p TSF + touch TSF/TrustableScoring.db + git add TSF/TrustableScoring.db + git commit -m "Initialise persistent data storage" + git push origin save_historical_data + fi + git checkout $branch_name + git checkout save_historical_data -- TSF/TrustableScoring.db + + - name: Generate trudag report + run: | + REPO_FULL="${{ github.repository }}" + REPO_NAME="${REPO_FULL#*/}" + OWNER_NAME="${REPO_FULL%%/*}" + TSF/scripts/generate_report.sh "https://${OWNER_NAME}.github.io/${REPO_NAME}/main" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout data storage branch + run: | + git stash push --include-untracked -- $(git status --porcelain | awk '{print $2}' | grep -v TSF/TrustableScoring.db) + git checkout save_historical_data + git pull + + - name: Store persistent data + run: | + git add TSF/TrustableScoring.db + git commit -m "Append data storage" || echo "Historical data already up to date." + git push origin save_historical_data + + - name: Recover stash + run: | + git checkout $branch_name + git stash apply + + - name: Upload trudag artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./docs/mkdocs/site + name: trudag-report-${{ github.sha }} + path: TSF/docs/generated/ + if-no-files-found: error + + build-docs: + needs: run-trudag + # the eclipse-score/cicd-workflow docs.yml is adjusted to download the trudag report artifact before building the documentation + uses: ./.github/workflows/docs.yml + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + + with: + # the bazel-target depends on your repo specific docs_targets configuration (e.g. "suffix")Add commentMore actions + bazel-target: "//:docs -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" + retention-days: 3 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a95e5fa851..3019f30b42 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -12,7 +12,7 @@ on: schedule: - cron: '20 7 * * 2' push: - branches: ["develop"] + branches: ["main"] permissions: contents: read diff --git a/.github/workflows/test_publication.yml b/.github/workflows/test_publication.yml new file mode 100644 index 0000000000..13da6bd616 --- /dev/null +++ b/.github/workflows/test_publication.yml @@ -0,0 +1,122 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Documentation + +on: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts to be loaded' + required: true + type: string + +permissions: + contents: read + +jobs: + run-trudag: + runs-on: ubuntu-latest + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Load ubuntu-artifact + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: ${{ inputs.artifact_id }} + path: artifacts/ + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11.2' + + # trudag version 2025.8.5 + - name: Install trudag and dependencies + run: | + sudo apt-get update + sudo apt-get install -y graphviz + pip install git+https://gitlab.com/CodethinkLabs/trustable/trustable@9957f12171cb898d83df5ae708fdba0a38fece2e + + - name: Generate trudag report + run: | + REPO_FULL="${{ github.repository }}" + REPO_NAME="${REPO_FULL#*/}" + OWNER_NAME="${REPO_FULL%%/*}" + TSF/scripts/generate_report.sh "https://${OWNER_NAME}.github.io/${REPO_NAME}/main" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Install tools + run: | + sudo apt-get update + sudo apt-get install -y jq + sudo apt install gh -y + + - name: Fetch open issues labelled as bug from nlohmann/json + id: fetch_issues + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + + # Define variables + REPO="nlohmann/json" # Target repository + OUTPUT_FILE="TSF/misbehaviours.md" + + # Check if the target repository is reachable + if ! gh repo view "$REPO" --json name -q .name; then + echo "Could not reach the target repository ($REPO). Aborting workflow." + exit 1 + fi + + # Fetch open issues from the nlohmann/json repository + gh issue list --repo "$REPO" --state open --limit 10000 --json number,title,state,createdAt,url,labels > raw_open_issues.json + + # Fetch closed issues from the nlohmann/json repository + gh issue list --repo "$REPO" --state closed --limit 10000 --json number,title,state,createdAt,url,labels > raw_closed_issues.json + + # Add title to the output file + echo "# Misbehaviours Report" > $OUTPUT_FILE + echo "" >> $OUTPUT_FILE + echo "This report lists known misbehaviours or bugs of v3.12.0 of the nlohmann/json repository. The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue." >> $OUTPUT_FILE + echo "" >> $OUTPUT_FILE + + # Add subtitle for open issues + echo "## Open Issues" >> $OUTPUT_FILE + echo "" >> $OUTPUT_FILE + + # Filter raw open issues for labels containing "bug" and convert to output .md file + jq -r ' + map(select(.labels[]?.name | test("bug"; "i"))) | + map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | + .[] + ' raw_open_issues.json >> $OUTPUT_FILE + + # Add subtitle for closed issues + echo "" >> $OUTPUT_FILE + echo "## Closed Issues (since v3.12.0)" >> $OUTPUT_FILE + echo "" >> $OUTPUT_FILE + + # Filter raw closed issues for labels containing "bug", created after release date of nlohmann/json version in use, and convert to output .md file + jq -r ' + map(select(.labels[]?.name | test("bug"; "i"))) | + map(select(.createdAt > "2025-04-11T00:00:00Z")) | # Adjust date as needed, 2025-04-11 corresponds to release v3.12.0 of nlohmann/json + map("### [#\(.number)](\(.url))\n- **Title:** \(.title)\n- **State:** \(.state)\n- **Created At:** \(.createdAt)\n") | + .[] + ' raw_closed_issues.json >> $OUTPUT_FILE diff --git a/.github/workflows/test_trudag_extensions.yml b/.github/workflows/test_trudag_extensions.yml new file mode 100644 index 0000000000..e02c7e9986 --- /dev/null +++ b/.github/workflows/test_trudag_extensions.yml @@ -0,0 +1,46 @@ +name: Test Trudag extensions + +on: + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string + +permissions: + contents: read + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11.2' + + - name: Install dependencies + run: | + pip install --require-hashes -r .github/requirements.txt + pip install git+https://gitlab.com/CodethinkLabs/trustable/trustable@cc6b72753e1202951d382f60ff08320f5a957c7b + + - name: Run tests + run: | + cd .dotstop_extensions + PYTHONPATH=.. pytest -v + + - name: Generate test_trudag_extensions artifact + run: | + echo "Generating test_trudag_extensions artifact..." + mkdir -p test_trudag_extensions + echo "test_trudag_extensions processed for ${{ inputs.artifact_id }}" > test_trudag_extensions/test_trudag_extensions.txt + + - name: Upload test_trudag_extensions artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: test_trudag_extensions/ \ No newline at end of file diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 87d0d79968..34c6ecc7b0 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -1,14 +1,13 @@ name: Ubuntu on: - push: - branches: - - develop - - master - - release/* - pull_request: - workflow_dispatch: - + workflow_call: + inputs: + artifact_id: + description: 'Unique identifier for artifacts' + required: true + type: string + permissions: contents: read @@ -19,7 +18,7 @@ concurrency: jobs: ci_test_gcc: runs-on: ubuntu-latest - container: gcc:latest + container: gcc:14 steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Get latest CMake and ninja @@ -28,6 +27,11 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ci_test_gcc + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_gcc_artefact + path: build/my_logs/ ci_infer: runs-on: ubuntu-latest @@ -59,10 +63,16 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ${{ matrix.target }} + - name: Upload test report + if: ${{ matrix.target != 'ci_reuse_compliance' && matrix.target != 'ci_cpplint' && matrix.target != 'ci_test_amalgamation' && matrix.target != 'ci_cppcheck' }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_static_analysis_ubuntu_artefact_${{ matrix.target }} + path: build/my_logs/ ci_static_analysis_clang: runs-on: ubuntu-latest - container: silkeh/clang:dev + container: silkeh/clang:20 strategy: matrix: target: [ci_test_clang, ci_clang_tidy, ci_test_clang_sanitizer, ci_clang_analyze, ci_single_binaries] @@ -73,9 +83,16 @@ jobs: - name: Get latest CMake and ninja uses: lukka/get-cmake@28983e0d3955dba2bb0a6810caae0c6cf268ec0c # v4.0.0 - name: Run CMake - run: cmake -S . -B build -DJSON_CI=On + # The default C++ compiler in the docker image is clang++-14 which does not support all compiler flags + run: cmake -S . -B build -DJSON_CI=On -DCLANG_TOOL=clang++-20 - name: Build run: cmake --build build --target ${{ matrix.target }} + - name: Upload test report + if: ${{ matrix.target != 'ci_clang_analyze' && matrix.target != 'ci_single_binaries' && matrix.target != 'ci_clang_tidy' }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_static_analysis_clang_artefact_${{ matrix.target }} + path: build/my_logs/ ci_cmake_options: runs-on: ubuntu-latest @@ -93,6 +110,12 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ${{ matrix.target }} + - name: Upload test report + if: ${{ matrix.target != 'ci_cmake_flags' }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_cmake_options_artefact_${{ matrix.target }} + path: build/my_logs/ ci_test_coverage: runs-on: ubuntu-latest @@ -124,6 +147,12 @@ jobs: with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: ${{ github.workspace }}/build/json.info.filtered.noexcept + fail-on-error: false # Do not fail the workflow if Coveralls fails + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_coverage_artefact + path: build/my_logs/ ci_test_compilers_gcc_old: runs-on: ubuntu-latest @@ -137,13 +166,18 @@ jobs: run: CXX=g++-${{ matrix.compiler }} cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ci_test_compiler_g++-${{ matrix.compiler }} + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_compilers_gcc_old_artefact_${{ matrix.compiler }} + path: build/my_logs/ ci_test_compilers_gcc: runs-on: ubuntu-latest strategy: matrix: # older GCC docker images (4, 5, 6) fail to check out code - compiler: ['7', '8', '9', '10', '11', '12', '13', '14', 'latest'] + compiler: ['7', '8', '9', '10', '11', '12', '13', '14'] container: gcc:${{ matrix.compiler }} steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -153,12 +187,17 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ci_test_compiler_default + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_compilers_gcc_clang_artefact_${{ matrix.compiler }} + path: build/my_logs/ ci_test_compilers_clang: runs-on: ubuntu-latest strategy: matrix: - compiler: ['3.4', '3.5', '3.6', '3.7', '3.8', '3.9', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15-bullseye', '16', '17', '18', '19', '20', 'latest'] + compiler: ['17', '18', '19', '20'] container: silkeh/clang:${{ matrix.compiler }} steps: - name: Install unzip and git @@ -173,10 +212,15 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ci_test_compiler_default + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_compilers_clang_artefact_${{ matrix.compiler }} + path: build/my_logs/ ci_test_standards_gcc: runs-on: ubuntu-latest - container: gcc:latest + container: gcc:14.3 strategy: matrix: standard: [11, 14, 17, 20, 23, 26] @@ -188,10 +232,15 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ci_test_gcc_cxx${{ matrix.standard }} + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_standards_gcc_artefact_${{ matrix.standard }} + path: build/my_logs/ ci_test_standards_clang: runs-on: ubuntu-latest - container: silkeh/clang:latest + container: silkeh/clang:20 strategy: matrix: standard: [11, 14, 17, 20, 23, 26] @@ -210,6 +259,11 @@ jobs: - name: Build with libstdc++ run: cmake --build build --target ci_test_clang_cxx${{ matrix.standard }} if: ${{ matrix.stdlib == 'libstdcxx' }} + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_test_standards_clang_artefact_${{ matrix.stdlib }}_${{ matrix.standard }} + path: build/my_logs/ ci_cuda_example: runs-on: ubuntu-latest @@ -232,6 +286,11 @@ jobs: run: | . /opt/intel/oneapi/setvars.sh cmake --build build --target ci_icpc + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ci_icpc_artefact + path: build/my_logs/ ci_test_documentation: runs-on: ubuntu-latest @@ -249,3 +308,330 @@ jobs: run: cmake -S . -B build -DJSON_CI=On - name: Build run: cmake --build build --target ${{ matrix.target }} + + publish_test_data_success: + runs-on: ubuntu-latest + permissions: + contents: write + needs: [ci_test_gcc, ci_infer, ci_static_analysis_ubuntu, ci_static_analysis_clang, ci_cmake_options, ci_test_coverage, ci_test_compilers_gcc_old, ci_test_compilers_gcc, ci_test_compilers_clang, ci_test_standards_gcc, ci_test_standards_clang, ci_cuda_example, ci_icpc, ci_test_documentation] + if: success() + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Download all workflow artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + path: my_artifacts + + - name: Get current branch name + id: get_branch + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + PR_BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + GITHUB_REF_SAFE: ${{ github.ref }} + run: | + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # Use the branch from the pull request safely via environment variable + current_branch="$PR_BRANCH_NAME" + else + # Remove the "refs/heads/" prefix from GITHUB_REF safely + current_branch="${GITHUB_REF_SAFE#refs/heads/}" + fi + + # Write the branch name to $GITHUB_ENV securely + echo "branch_name=${current_branch}" >> "$GITHUB_ENV" + + - name: Fetch data storage branch + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git fetch --all + git checkout save_historical_data && git pull || git checkout -b save_historical_data + + - name: Load persistent data + run: | + if ! git ls-tree --name-only origin/save_historical_data TSF/MemoryEfficientTestResultData.db | grep TSF/MemoryEfficientTestResultData.db; then + touch TSF/MemoryEfficientTestResultData.db + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Create persistent storage" + git push origin save_historical_data + fi + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right commit from actions/checkout + # Just get the file from save_historical_data branch + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + else + # For push events, switch to the branch and get the file + git checkout $branch_name + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + fi + + - name: append test data + run: | + python3 TSF/scripts/capture_test_data_memory_sensitive.py successful + + - name: Checkout data storage branch + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git stash push --include-untracked -- $(git status --porcelain | awk '{print $2}' | grep -v TSF/MemoryEfficientTestResultData.db) + git checkout save_historical_data + + - name: Commit and Push persistent data + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Save test data" || echo "No test data to be saved" + git push origin save_historical_data + + - name: Recover stash + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right branch/commit + git stash apply + else + # For push events, switch back to the working branch + git checkout $branch_name + git stash apply + fi + + - name: Move test report + run: | + mkdir test_report + mv MemoryEfficientTestResults.db test_report/MemoryEfficientTestResults.db + + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: publish_data_artifact + path: test_report/ + + + publish_test_data_failure: + runs-on: ubuntu-latest + permissions: + contents: write + needs: [ci_test_gcc, ci_infer, ci_static_analysis_ubuntu, ci_static_analysis_clang, ci_cmake_options, ci_test_coverage, ci_test_compilers_gcc_old, ci_test_compilers_gcc, ci_test_compilers_clang, ci_test_standards_gcc, ci_test_standards_clang, ci_cuda_example, ci_icpc, ci_test_documentation] + if: failure() + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Download all workflow artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + path: my_artifacts + + - name: Get current branch name + id: get_branch + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + PR_BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + GITHUB_REF_SAFE: ${{ github.ref }} + run: | + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # Use the branch from the pull request safely via environment variable + current_branch="$PR_BRANCH_NAME" + else + # Remove the "refs/heads/" prefix from GITHUB_REF safely + current_branch="${GITHUB_REF_SAFE#refs/heads/}" + fi + + # Write the branch name to $GITHUB_ENV securely + echo "branch_name=${current_branch}" >> "$GITHUB_ENV" + + - name: Fetch data storage branch + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git fetch --all + git checkout save_historical_data && git pull || git checkout -b save_historical_data + + - name: Load persistent data + run: | + if ! git ls-tree --name-only origin/save_historical_data TSF/MemoryEfficientTestResultData.db | grep TSF/MemoryEfficientTestResultData.db; then + touch TSF/MemoryEfficientTestResultData.db + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Create persistent storage" + git push origin save_historical_data + fi + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right commit from actions/checkout + # Just get the file from save_historical_data branch + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + else + # For push events, switch to the branch and get the file + git checkout $branch_name + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + fi + + - name: append test data + run: | + python3 TSF/scripts/capture_test_data_memory_sensitive.py failed + + - name: Checkout data storage branch + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git stash push --include-untracked -- $(git status --porcelain | awk '{print $2}' | grep -v TSF/MemoryEfficientTestResultData.db) + git checkout save_historical_data + + - name: Commit and Push persistent data + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Save test data" || echo "No test data to be saved" + git push origin save_historical_data + + - name: Recover stash + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right branch/commit + git stash apply + else + # For push events, switch back to the working branch + git checkout $branch_name + git stash apply + fi + + - name: Move test report + run: | + mkdir test_report + mv MemoryEfficientTestResults.db test_report/MemoryEfficientTestResults.db + + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: publish_data_artifact + path: test_report/ + + + publish_test_data_cancellation: + runs-on: ubuntu-latest + permissions: + contents: write + needs: [ci_test_gcc, ci_infer, ci_static_analysis_ubuntu, ci_static_analysis_clang, ci_cmake_options, ci_test_coverage, ci_test_compilers_gcc_old, ci_test_compilers_gcc, ci_test_compilers_clang, ci_test_standards_gcc, ci_test_standards_clang, ci_cuda_example, ci_icpc, ci_test_documentation] + if: cancelled() + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Download all workflow artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + path: my_artifacts + + - name: Get current branch name + id: get_branch + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + PR_BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + GITHUB_REF_SAFE: ${{ github.ref }} + run: | + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # Use the branch from the pull request safely via environment variable + current_branch="$PR_BRANCH_NAME" + else + # Remove the "refs/heads/" prefix from GITHUB_REF safely + current_branch="${GITHUB_REF_SAFE#refs/heads/}" + fi + + # Write the branch name to $GITHUB_ENV securely + echo "branch_name=${current_branch}" >> "$GITHUB_ENV" + + - name: Fetch data storage branch + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git fetch --all + git checkout save_historical_data && git pull || git checkout -b save_historical_data + + - name: Load persistent data + run: | + if ! git ls-tree --name-only origin/save_historical_data TSF/MemoryEfficientTestResultData.db | grep TSF/MemoryEfficientTestResultData.db; then + touch TSF/MemoryEfficientTestResultData.db + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Create persistent storage" + git push origin save_historical_data + fi + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right commit from actions/checkout + # Just get the file from save_historical_data branch + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + else + # For push events, switch to the branch and get the file + git checkout $branch_name + git checkout save_historical_data -- TSF/MemoryEfficientTestResultData.db + fi + + - name: append test data + run: | + python3 TSF/scripts/capture_test_data_memory_sensitive.py cancelled + + - name: Checkout data storage branch + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git stash push --include-untracked -- $(git status --porcelain | awk '{print $2}' | grep -v TSF/MemoryEfficientTestResultData.db) + git checkout save_historical_data + + - name: Commit and Push persistent data + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + git add TSF/MemoryEfficientTestResultData.db + git commit -m "Save test data" || echo "No test data to be saved" + git push origin save_historical_data + + - name: Recover stash + if: github.event_name == 'schedule' || (github.event_name == 'push' && github.ref_name == 'main') + run: | + # Handle fork PRs by staying on current working branch instead of switching + if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + # For PRs, we're already on the right branch/commit + git stash apply + else + # For push events, switch back to the working branch + git checkout $branch_name + git stash apply + fi + + - name: Move test report + run: | + mkdir test_report + mv MemoryEfficientTestResults.db test_report/MemoryEfficientTestResults.db + + - name: Upload test report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: publish_data_artifact + path: test_report/ + + + ubuntu_artifact: + runs-on: ubuntu-latest + needs: [publish_test_data_success, publish_test_data_failure, publish_test_data_cancellation, ci_test_gcc, ci_infer, ci_static_analysis_ubuntu, ci_static_analysis_clang, ci_cmake_options, ci_test_coverage, ci_test_compilers_gcc_old, ci_test_compilers_gcc, ci_test_compilers_clang, ci_test_standards_gcc, ci_test_standards_clang, ci_cuda_example, ci_icpc, ci_test_documentation] + if: ${{ always() && (needs.publish_test_data_success.result == 'success' || needs.publish_test_data_failure.result == 'success' || needs.publish_test_data_cancellation.result == 'success') }} + steps: + - name: Download test report + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: publish_data_artifact + + - name: Generate ubuntu artifact + run: | + echo "Generating ubuntu artifact..." + mkdir -p ubuntu + if [ -f MemoryEfficientTestResults.db ]; then + mv MemoryEfficientTestResults.db ubuntu/MemoryEfficientTestResults.db + else + echo "No test-results were captured." + fi + echo "ubuntu processed for ${{ inputs.artifact_id }}" > ubuntu/ubuntu.txt + + - name: Upload ubuntu artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: ${{ inputs.artifact_id }} + path: ubuntu/ diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml deleted file mode 100644 index 4e21d995d5..0000000000 --- a/.github/workflows/windows.yml +++ /dev/null @@ -1,134 +0,0 @@ -name: Windows - -on: - push: - branches: - - develop - - master - - release/* - pull_request: - workflow_dispatch: - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.ref || github.run_id }} - cancel-in-progress: true - -jobs: - mingw: - runs-on: windows-2019 - strategy: - matrix: - architecture: [x64, x86] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up MinGW - uses: egor-tensin/setup-mingw@84c781b557efd538dec66bde06988d81cd3138cf # v2.2.0 - with: - platform: ${{ matrix.architecture }} - version: 12.2.0 # https://github.com/egor-tensin/setup-mingw/issues/14 - - name: Run CMake - run: cmake -S . -B build -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C Debug --output-on-failure - - msvc2019: - runs-on: windows-2019 - strategy: - matrix: - build_type: [Debug, Release] - architecture: [Win32, x64] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 16 2019" -A ${{ matrix.architecture }} -DJSON_BuildTests=On -DCMAKE_CXX_FLAGS="/W4 /WX" - if: matrix.build_type == 'Release' - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 16 2019" -A ${{ matrix.architecture }} -DJSON_BuildTests=On -DJSON_FastTests=ON -DCMAKE_CXX_FLAGS="/W4 /WX" - if: matrix.build_type == 'Debug' - - name: Build - run: cmake --build build --config ${{ matrix.build_type }} --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C ${{ matrix.build_type }} --output-on-failure - - msvc2019_latest: - runs-on: windows-2019 - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 16 2019" -DJSON_BuildTests=On -DCMAKE_CXX_FLAGS="/permissive- /std:c++latest /utf-8 /W4 /WX" - - name: Build - run: cmake --build build --config Release --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C Release --output-on-failure - - msvc2022: - runs-on: windows-2022 - strategy: - matrix: - build_type: [Debug, Release] - architecture: [Win32, x64] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 17 2022" -A ${{ matrix.architecture }} -DJSON_BuildTests=On -DCMAKE_CXX_FLAGS="/W4 /WX" - if: matrix.build_type == 'Release' - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 17 2022" -A ${{ matrix.architecture }} -DJSON_BuildTests=On -DJSON_FastTests=ON -DCMAKE_CXX_FLAGS="/W4 /WX" - if: matrix.build_type == 'Debug' - - name: Build - run: cmake --build build --config ${{ matrix.build_type }} --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C ${{ matrix.build_type }} --output-on-failure - - msvc2022_latest: - runs-on: windows-2022 - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 17 2022" -DJSON_BuildTests=On -DCMAKE_CXX_FLAGS="/permissive- /std:c++latest /utf-8 /W4 /WX" - - name: Build - run: cmake --build build --config Release --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C Release --output-on-failure - - clang: - runs-on: windows-2019 - strategy: - matrix: - version: [11, 12, 13, 14, 15] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Install Clang - run: curl -fsSL -o LLVM${{ matrix.version }}.exe https://github.com/llvm/llvm-project/releases/download/llvmorg-${{ matrix.version }}.0.0/LLVM-${{ matrix.version }}.0.0-win64.exe ; 7z x LLVM${{ matrix.version }}.exe -y -o"C:/Program Files/LLVM" - - name: Run CMake - run: cmake -S . -B build -DCMAKE_CXX_COMPILER="C:/Program Files/LLVM/bin/clang++.exe" -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On - - name: Build - run: cmake --build build --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure - - clang-cl-11: - runs-on: windows-2019 - strategy: - matrix: - architecture: [Win32, x64] - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run CMake - run: cmake -S . -B build -G "Visual Studio 16 2019" -A ${{ matrix.architecture }} -T ClangCL -DJSON_BuildTests=On - - name: Build - run: cmake --build build --config Debug --parallel 10 - - name: Test - run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure diff --git a/.gitignore b/.gitignore index 03fe8147e6..9f1c1db346 100644 --- a/.gitignore +++ b/.gitignore @@ -38,10 +38,26 @@ # Swift Package Manager build directory /.build +# docs:incremental and docs:ide_support build artifacts +/_build +/build venv nlohmann_json.spdx # Bazel-related +bazel-* MODULE.bazel.lock +user.bazelrc + +/temp +/TSF/temp + +/TSF/docs/generated +# temporary folder used for generation of list of test-results +/artifacts + +/.venv + +__pycache__/ diff --git a/BUILD.bazel b/BUILD.bazel index de0ff7145d..430e4a2d55 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,5 +1,6 @@ load("@rules_cc//cc:cc_library.bzl", "cc_library") load("@rules_license//rules:license.bzl", "license") +load("@score_docs_as_code//:docs.bzl", "docs") package( default_applicable_licenses = [":license"], @@ -78,3 +79,7 @@ cc_library( includes = ["single_include"], visibility = ["//visibility:public"], ) + +docs( + source_dir = "TSF/docs", +) \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 88ef6f6617..8b8f4e61c4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -223,3 +223,10 @@ if(JSON_Install) ) include(CPack) endif() + +# to accurately capture test data, increase the size + +file(WRITE "${CMAKE_BINARY_DIR}/CTestCustom.cmake" " +set(CTEST_CUSTOM_MAXIMUM_PASSED_TEST_OUTPUT_SIZE 0) +set(CTEST_CUSTOM_MAXIMUM_FAILED_TEST_OUTPUT_SIZE 0) +") diff --git a/MODULE.bazel b/MODULE.bazel index 51754e7a3a..a29bf02851 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -3,5 +3,48 @@ module( compatibility_level = 1, ) -bazel_dep(name = "rules_cc", version = "0.0.17") +# bazel_dep(name = "rules_cc", version = "0.0.17") bazel_dep(name = "rules_license", version = "1.0.0") + + +bazel_dep(name = "rules_python", version = "1.4.1") + +PYTHON_VERSION = "3.12" + +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + is_default = True, + python_version = PYTHON_VERSION, +) +use_repo(python) + +# Add GoogleTest dependency +bazel_dep(name = "googletest", version = "1.14.0") + +# Checker rule for CopyRight checks/fixs +bazel_dep(name = "score_cr_checker", version = "0.2.2") +bazel_dep(name = "score_python_basics", version = "0.3.0") +bazel_dep(name = "score_starpls_lsp", version = "0.1.0") + +# C/C++ rules for Bazel +bazel_dep(name = "rules_cc", version = "0.1.1") + +# LLVM Toolchains Rules - host configuration +bazel_dep(name = "toolchains_llvm", version = "1.2.0") + +llvm = use_extension("@toolchains_llvm//toolchain/extensions:llvm.bzl", "llvm") +llvm.toolchain( + cxx_standard = {"": "c++17"}, + llvm_version = "19.1.0", +) +use_repo(llvm, "llvm_toolchain") +use_repo(llvm, "llvm_toolchain_llvm") + +register_toolchains("@llvm_toolchain//:all") + +# Dash license checker +bazel_dep(name = "score_dash_license_checker", version = "0.1.2") + +#docs-as-code +bazel_dep(name = "score_docs_as_code", version = "1.0.2") + diff --git a/README.md b/README.md index f97afeb412..6cb935375f 100644 --- a/README.md +++ b/README.md @@ -1864,3 +1864,10 @@ Some tests change the installed files and hence make the whole process not repro Note you need to call `cmake -LE "not_reproducible|git_required"` to exclude both labels. See [issue #2596](https://github.com/nlohmann/json/issues/2596) for more information. As Intel compilers use unsafe floating point optimization by default, the unit tests may fail. Use flag [`/fp:precise`](https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-8/fp-model-fp.html) then. + + +# Documentation for TSF + +### Build Docs + +You can build and serve the documentation using the following shell script: `scripts/generate_documentation.sh` diff --git a/TSF/README.md b/TSF/README.md new file mode 100644 index 0000000000..0dc6967d66 --- /dev/null +++ b/TSF/README.md @@ -0,0 +1,239 @@ +# TSF Structure + +The TSF-related additions, such as the Trustable Graph and tooling extensions, are primarily organized within the TSF folder: +- the items of the Trustable Graph are in `TSF/trustable` +- the building blocks of the documentation, which is built with the S-CORE docs-as-code tool, is placed in `TSF/docs` +- the report generated by [trudag](https://codethinklabs.gitlab.io/trustable/trustable/trudag/usage.html) is placed in `TSF/docs/generated`. This can either be produced as part of the CI pipeline or manually using `./TSF/scripts/generate_report.sh_`. It is strongly recommended that the TSF/docs folder is included in the .gitignore file +- the utility scripts are documented in the `TSF/scripts` folder + +The TSF graph (including links, nodes and their hashes) is saved in the `.dotstop.dot` file and the trudag extensions including CPP test references are stored in the `.dotstop_extensions` folder since these locations are required by the trudag tool. + +# Forking the repository + +In order to fork this repository or set up any repository where the TSF documentation in this repository is to be included, the following settings have to be configured on GitHub. In addition to the below settings, make sure to also configure appropriate branch protection rules. + +- In `Settings` > `General` >`Features`: + - Enable `Issues` + +- In `Settings` > `Code and automation` > `Actions` > `General` > `Workflow Permissions`: + - Make sure that only the following settings are enabled: + - `Allow all actions and reusable workflows` + - `Require approval for first-time contributors` + - `Read repository contents and packages permissions` + +- In `Settings` > `Code and automation` > `Pages`: + - Under `Source`, select `GitHub Actions` + +- In `Settings` > `Security` > `Advanced Security`: + - Make sure that only the following settings are enabled: + - `Dependency graph` + - `Dependabot alerts` + +- In `Actions tab`: + - Click `I understand my workflows, go ahead and enable them` + - In the left side menu, click `Show more workflows...` and enable any workflows which are labelled as `Disabled` + +# Release management + +The releases process of this repository shall conform to the [release management plan of Eclipse S-CORE](https://github.com/eclipse-score/score/blob/668bae4d1a704565983d34b8447d5a035696299a/docs/platform_management_plan/release_management.rst#id11). Most notably, the release tags shall follow semantic versioning format. + +- The components of the tag shall be incremented both in case of an update to TSF documentation, and in case of updating to a new release of the upstream nlohmann/json library. +- Updates to TSF documentation shall increment the PATCH or MINOR component. +- Updating the version of nlohmann/json to a new release shall increment the appropriate tag component based on the extensiveness and nature of the upstream changes. +- To indicate the version of nlohmann/json in use, the nlohmann/json release tag shall always be clearly included in the release notes of this repository. +- The release notes of this repository shall always indicate whether the release includes changes to only TSF documentation, only the version of nlohmann/json, or both. + +To update either the version of nlohmann/json within S-CORE or TSF documentation, please refer to the respective Update Concepts below. + +# Update Concept for the version of nlohmann/json within S-CORE + +## Assumptions of use + +This description of an update process is based on the following structure of the repository WHICH IS NOT THE CASE YET. +It is assumed that the repository possesses a default branch called ``main`` containing the most recent documented version of ``nlohmann/json`` together with its documentation. +Additionally, there is a branch ``develop``, which is **not** intended to mirror the branch of ``nlohmann/json`` with the same name, but instead serves as an in-repository testing ground for changes to either the library or its documentation. +The releases of the documented version are identified by tags on ``main``. +Moreover, the branch protection rules for ``main`` are set as described in the description of the forking process in ``TSF/README.md`` (WIP). + +Note that there is **no automatic information** on the existence of a new release in the original ``nlohmann/json``; instead the possibility to update is detected **manually**. +Note further that, due to the currently relatively limited use of nlohmann/json within S-CORE, there appears currently no inherent need to keep the version up to date. + +## Update process of the original nlohmann/json + +The releases of ``nlohmann/json`` are collected on the [Release site](https://github.com/nlohmann/json/releases) of the repository ``nlohmann/json``. +Each release announcement is expected to contain the release date, SHA-256 values for json.hpp, include.zip and json.tar.xz, and a brief list containing bug fixes, improvements, further changes and deprecated functions. +The new release is expected to be located within the branch **master**, from where the most recent version can be drawn. + +## Update process of the S-CORE version + +In the following, we shall describe the intricacies of updating the version of ``nlohmann/json`` within Eclipse S-CORE. +This version is not a mere fork of the original master branch of ``nlohmann/json``, but instead enriched with the documentation following the Trustable Software Framework (TSF). + +The enrichment with the documentation necessitates some changes to the fork of the original repository. +For the most part, these changes are in-obtrusive, and mere additions. +In particular, the folders ``include`` and ``single-include`` remain unchanged, and should be updated without further adaptations. +In some cases, however, additional tests are run and data are generated and collected, which were not run or generated in the original ``nlohmann/json``, so that obtrusive changes of files were necessary. +For these files, and in particular the workflow files, caution must be exercised, as to not disturb the documentation. +Moreover, some parts of the documentation must be adapted to the new version. + + +### What can not be updated without further precautions? + +* ``cmake/ci.cmake`` + This file defines, in particular, the various custom cmake targets; in particular, the various configurations for the execution of the unit- and integration-tests are defined. + The TSF requires, or, at the very least, strongly encourages us to collect test-results. + In order to do this efficiently, the ctest command is adapted to automatically generate the junit-logs of each test-run. + For this, the option ``--output-junit`` is set with output path ``../my_logs/TARGETNAME_junit.xml``, where TARGETNAME is replaced by the name of the respective cmake target; in case that this convention is insufficient to uniquely identify the logs, TARGETNAME is amended by a number. + When updating, it must be ensured that these adaptations are preserved. + Moreover, if the update introduces new cmake targets or new executions of ctest, it must be ensured, that the junit-log is generated and stored with a similar naming convention in the folder "../my_logs/". + Otherwise, it can not be ensured that the test data are accurately captured. + +* ``cmake/download_test_data.cmake`` + This file is modified to ensure that the test-data are not downloaded from the original test-data repository, but instead from the copy of that repository within the Eclipse S-CORE organisation. + It must be ensured that this change is preserved. + +* ``tests/CMakeLists.txt`` + This file collects, in particular, the files containing the unit- and integration-tests in a list, which is given to cmake. + Custom tests were added in TSF/tests to document the fulfillment of the expectations. + To ensure that these tests are run, the file tests/CMakeLists.txt has been modified. + During the update, it must be ensured, that the custom tests are still being executed. + +* ``.github/workflows/parent-workflow.yml`` + To ensure a specific execution order for the individual github workflows, their execution is orchestrated by the parent-workflow. + To guarantee that this order is respected, it must be ensured that every other workflow except for ``docs-cleanup.yml``, ``scorecards.yml`` and ``stale.yml`` runs ``on workflow_call``, only. + For the three exceptions, it is recommended to keep the execution scheduled as currently the case. + +* ``.github/workflows/ubuntu.yml`` + The ubuntu workflow orchestrates the parallel execution of various cmake targets with varying configurations running on the latest version of ubuntu. + The first adaptation is that every step, in which a junit-report is generated, generates an artifact. + It must be ensured, that these artifacts are still generated after the update. + The second adaptation is that the test-results are captured, processed and persistently stored or stored in the ubuntu-artifact. + Therefore, it must be ensured that the jobs ``publish_test_data_success``, ``publish_test_data_failure``, ``publish_test_data_cancellation`` and ``ubuntu_artifact`` are executed. + Moreover, in case that any further job is added by nlohmann, it must be ensured that this job is added to the list of jobs required before the latter workflows are executed. + If any further job added by nlohmann generates a junit-log, it must be ensured that this job generates an artifact containing its junit-logs. + +* ``.github/workflows/cifuzz.yml`` + This workflow uses Google's oss-fuzz, which is not available to the copy within Eclipse S-CORE. + Therefore, this workflow needs to be disabled in the copy. + Currently, this is done by removing it altogether, which we recommend to do so that no confusion as to why this workflow is not executed arises. + +* ``.github/workflows/publish_documentation.yml`` + This workflow is replaced with a completely customised version, which reflects the use of trudag and the integration into the Eclipse S-CORE organisation. + Therefore, it is recommended to not change this workflow. + In particular, the version of publish_documentation.yml in the original repository nlohmann/json must not replace the publish_documentation.yml of the present repository. + +* ``.github/workflows/test_trudag_extensions.yml`` + This workflow is not present in the original nlohmann/json and must not be removed, or modified (besides updating the versions of tools, if necessary) by the update. + +* Other entries of ``.github/workflows`` + For every workflow, it must be ensured that the conditions of their execution are unchanged. + The workflows ``check_amalgamation``, ``codeql``, ``dependency_review``, ``labeler`` and ``test_trudag_extensions`` generate an artifact, which must not be changed. + New workflows should be carefully reviewed. + If it is determined that their execution within the project is beneficial, and that they do not interfere with, then they should be integrated within the parent workflow at an appropriate place and their execution condition should be set to on ``workflow``, or their execution should be scheduled appropriately. + It is strongly recommended that the new workflow produces an artifact on success, and that the validator ``check_artifact_exists`` is adapted accordingly. + If nlohmann deletes any of the currently executed workflows, in particular ``check_amalgamation.yml``, ``codeql.yml``, ``dependency_review.yml``, ``labeler.yml``, ``test_trudag_extensions.yml`` and ``ubuntu.yml``, then it is strongly recommended to keep the currently executed version, since the automatic validator ``check_artifact_exists`` depends on the existence of these workflows. + In case that it is determined that these workflows should be deleted also in the documented copy of ``nlohmann/json``, then the validator ``check_artifact_exists`` and all its occurrences must be adapted accordingly. + +* ``ChangeLog.md`` + It must be ensured that the changes of the update are properly described in the file ``ChangeLog.md``. + + +### Necessary adaptations + +The following adaptation is recommended, and has, unfortunately, not been automated. + +* ``TSF/trustable/statements/JLS-02.md`` + It must be carefully ensured that this statement and its references are still valid. In particular, it is strongly recommended to refer to a fuzz testing result running on the version that is updated to. + + +The following adaptations to the documentation have been automated; the python-script TSF/scripts/update_helper.py may be used to assist with these changes. +For the error-free execution is it necessary, however, to adhere to the naming scheme json_version_X_XX_X, and to not change the structure of the directories. + +* ``TSF/Trustable/statements/JLS-11.md`` + It must be ensured that the correct release date is used. + +* ``TSF/trustable/statements/JLS-14.md`` + It must be ensured that the release of the correct version is referenced. + Furthermore, the sha-value of the evidence must be adapted to the one provided in that announcement post. + +* ``TSF/trustable/docs/introduction/index.rst`` + In this file, the version of ``nlohmann/json`` that is documented is explicitly mentioned at two places. + This version must be updated. + +* ``TSF/scripts/generate_list_of_misbehaviours.py`` + This script contains version and release date hard-coded. Both must be updated. + + +### Recommended procedure VERY MUCH WIP + +Based on the above observations, the following recommendations are derived. + +1. Ensure that the content of the branch ``develop`` is identical to the branch ``main``. + Since it is intended to not change the library itself, in particular the folders ``include`` and ``single_include``, this should be possible by updating the documentation. +2. Merge branch master from the original nlohmann/json into ``develop``, e.g. ``git checkout -b json_version_X_XX_X && git merge --no-commit nlohmann/master`` +3. Confirm the deletion of cifuzz.yml, macos.yml and windows.yml. +4. Resolve the potential merge conflict in publish-documentation.yml by rejecting the incoming changes. + Update the versions of the github actions, if necessary. +5. Resolve the potential merge conflicts in check_amalgamation.yml, codeql.yml, dependency_review.yml, labeler.yml, ``test_trudag_extensions.yml`` to ensure that the artifacts are generated, i.e. the jobs ``Generate XXX artifact`` and ``Upload XXX artifact`` are retained. +6. Resolve the potential merge conflict in ubuntu.yml following the above instructions. +7. Resolve the potential merge conflicts in cmake/download_test_data.cmake and cmake/ci.cmake following the above instructions. +8. Carefully examine the automatically merged changes. If no interference is to be expected, complete the merge. +9. In case any additional workflow has been added, carefully examine and integrate into the parent-workflow or schedule appropriately. +10. Adapt the documentation as described above. +11. Generate the documentation locally and carefully investigate any change in the trustable score(s). + If any relevant behaviour of the library changes, adapt the documentation. + Additionally, if any additional tests were added, or existing tests were changed, carefully investigate whether these warrant an amendment of the documentation. +12. Merge into the ``main``. +13. Create a new release under the tag FIXME + +# Update concept for the TSF documentation + +## Assumptions of use + +The documentation follows the Trustable Software Framework (TSF), which is documented [here](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html). +Furthermore, the automatic compilation of the documentation and the tracking of changes to the core functionalities of the library uses _trudag_, which is developed by Codethink and located [here](https://gitlab.com/CodethinkLabs/trustable/trustable). + + +## Version of trudag + +The documentation is currently built using trudag version 2025.8.5. +In case a major change of the trudag happens in the future, this might break some features of the documentation, or change some intended behaviours. +Thus, it is recommended to not change the version of trudag. +In case that it appears wise or necessary to change the version of trudag (e.g. when trudag is eventually certified), the following should be considered: + +* How has the algorithm for the accumulation of the trustable score changed? Ideally, it does not change, otherwise the necessity for a new review arises. +* How has the data store interface changed? Ideally, it has not changed, but historical data and the documentation indicate that a change of the data store interface happened at some time. +* How has the the expected configuration for the items changed? It is known that this configuration changed (at least) once before. What does the potential change mean? +* Do all custom references and validators as well as the data store interface work as before? +* Has the algorithm for the hashing changed, or are there any changes to the trustable scores? If so, investigate carefully! + + +## Subject-Matter-Expert-scores + +The intention with the SME scores is to find the _true_ trustable score by means of a heuristic law-of-large-numbers argument. +Therefore, it is very much welcome if contributors add their SME scores to statements for which they feel confident to do so. +While the committer may check SME scores for plausibility, it is recommended to not question SME scores as this interferes with the assumed independence of the SME! +It is recommended that changes to SME scores are accumulated in the branch ``develop`` before the release of a new version of the documentation as to not clutter the release history. +It is highly recommended to not delete SME scores under usual circumstances; most certainly, the SME scores should never be changed by anybody except the original SME. +The following unusual circumstances can, after careful consideration, justify the removal or (much preferably!) the request for re-evaluation by the original SMEs: + +* change of references: + If, e.g. due to an update of ``nlohmann/json``, the references of any items (be it tests or code) changes, then this should trigger a re-evaluation of the statement. + In particular if the behaviour changed significantly, it can be justifiable to assume that the old SME scores do not reflect the statement anymore. +* addition of automatic validators: + Recall that the SME-scores have different meanings depending on whether or not an automatic validator is implemented. + In the absence of a validator, the SME shall assess their confidence in the statement based on linked artifacts (references) and their own knowledge. + In the presence of a validator, the SME shall assess only their confidence in the validator as an accurate measure of the truth of the statement. + +## Validators + +The automatic validators are intended to calculate a trustable score based on quantifiable data. +In particular the introduction of a validator changes the meaning of the (potential) SME scores associated to a statement. +Therefore, the change or introduction of an automatic validator is most critical. +It is highly recommended to urge the original SME to re-review the statement and adapt their scores, or (at the least) to enlist additional SME to judge the changed statement. +After careful consideration the highly critical decision to remove some SME scores no longer reflecting the statement could be made. + +## References + +References should be treated as validators, i.e. any update of a reference should trigger a re-review by the SME. +For references, however, the decision to remove a stale SME score is even more critical unless the reference reveals critical new information, which is highly unlikely, or the change of the reference is triggered by a significant change in the behaviour of the library, which heavily affected the statement. \ No newline at end of file diff --git a/TSF/__init__.py b/TSF/__init__.py new file mode 100644 index 0000000000..c2d4cd1531 --- /dev/null +++ b/TSF/__init__.py @@ -0,0 +1 @@ +# This file makes the directory a Python package \ No newline at end of file diff --git a/TSF/docs/concept.rst b/TSF/docs/concept.rst new file mode 100644 index 0000000000..e5ea97188e --- /dev/null +++ b/TSF/docs/concept.rst @@ -0,0 +1,100 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* +.. _concept: + +Concept +================= + +For the concept of the Trustable Software Framework (TSF), please refer to the `Eclipse Process Description `_. + +Scoring +--------------------- + +The Trustable scores in the TSF are calculated recursively using the underlying graph structure. + +Depending on whether the node is a leaf node (i.e., has no supporting statements) or a parent node (i.e., has supporting statements), the calculation of the trustability score differs. + +**Leaf nodes**: +For the calculation of the scores of leaf nodes, several attributes come into play. +In particular, the existence or absence of a validator plays a crucial role in how the trustability scores of leaf nodes are determined. +The following table displays the possible scenarios for calculating the scores of leaf nodes, depending on the existence or values of its attributes: + ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ +| normative_ | review-status_ | SME-score_ | validator_ | reference_ | Trustable-score_ | ++============+================+==============+==============+==============+==========================================+ +| ``false`` | ``*`` | ``*`` | ``*`` | ``*`` | no score | ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ +| ``true`` | ``unreviewed`` | ``*`` | ``*`` | ``*`` | 0.0 | ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ +| ``true`` | ``reviewed`` | ``provided`` | ``excluded`` | ``excluded`` | 0.0 | ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ +| ``true`` | ``reviewed`` | ``provided`` | ``excluded`` | ``included`` | SME-score x 1 | ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ +| ``true`` | ``reviewed`` | ``provided`` | ``included`` | ``*`` | SME-score × validator-score | ++------------+----------------+--------------+--------------+--------------+------------------------------------------+ + + +.. _normative: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/item.html#trudag.dotstop.core.item.BaseItem.normative +.. _review-status: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/graph/trustable_graph.html#trudag.dotstop.core.graph.trustable_graph.TrustableGraph.set_link_status +.. _SME-score: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/item.html#trudag.dotstop.core.item.BaseItem.sme_scores +.. _validator: https://codethinklabs.gitlab.io/trustable/trustable/trudag/usage.html#providing-evidence +.. _reference: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/item.html#trudag.dotstop.core.item.BaseItem.references +.. _Trustable-score: https://codethinklabs.gitlab.io/trustable/trustable/trudag/scoring-roadmap.html + + + +**Parent nodes**: +The score for any parent node is then recursively calculated as the normalized weighted sum of the scores of its supporting statements. +Note that currently, TSF only supports equal weighting. +In other words, the score of a parent node is the mean of the scores of its child nodes. +This behaviour is however likely to change in the future, to support different weighting schemes. +Any supporting statements with a suspect link are excluded from the calculation of the scores of parent nodes. +The following table displays the possible scenarios for calculating the scores of parent nodes: + ++------------+----------------+--------------+----------------------------------------------------+ +| normative_ | review-status_ | link-status_ | Trustable-score_ | ++============+================+==============+====================================================+ +| ``false`` | ``*`` | ``*`` | no score | ++------------+----------------+--------------+----------------------------------------------------+ +| ``true`` | ``unreviewed`` | ``*`` | 0.0 | ++------------+----------------+--------------+----------------------------------------------------+ +| ``true`` | ``reviewed`` | ``suspect`` | mean of supporting statements with no suspect links| ++------------+----------------+--------------+----------------------------------------------------+ +| ``true`` | ``reviewed`` | ``linked`` | mean of all supporting statements | ++------------+----------------+--------------+----------------------------------------------------+ + +.. _normative: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/item.html#trudag.dotstop.core.item.BaseItem.normative +.. _review-status: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/graph/trustable_graph.html#trudag.dotstop.core.graph.trustable_graph.TrustableGraph.set_link_status +.. _link-status: https://codethinklabs.gitlab.io/trustable/trustable/reference/trudag/dotstop/core/graph/trustable_graph.html#trudag.dotstop.core.graph.trustable_graph.LinkStatus +.. _Trustable-score: https://codethinklabs.gitlab.io/trustable/trustable/trudag/scoring-roadmap.html + + +Terminology +~~~~~~~~~~~ +- **normative**: Indicates whether the statement is normative (`true`) or not (`false`). If a statement is not normative, it does not contribute to the score calculation, and shall not be reviewed. This attribute is not to be set or changed by the SME reviewer. +- **review-status**: Indicates the current review status of the statement. A `false` means the statement needs to be reviewed or re-reviewed, and that the score is set to `0.0`. A `true` means the statement has been reviewed by a subject matter expert (SME). +- **SME-score**: A score reflecting the SME reviewer's confidence in the truth of the statement as a probability. +- **validator**: Automatic scripts that validate the correctness of a statement. Note that in the markdown files, validators are referred to as "evidence". +- **reference**: Supporting material for the SME reviewer to evaluate the statement. +- **link-status**: Indicates whether the statement has any suspect links. If suspect links exist, the score of a parent item is calculated based on child items without suspect links only. +- **Trustable-score**: Shows how the TSF score is calculated. + +Example scoring +~~~~~~~~~~~~~~~ +This diagram illustrates an example of the scoring process in the TSF and how the score propagates upwards in the graph. +Please note that the numbers shown in this graph are **example values only** and do not represent real assessments. + +.. image:: score_calculation_example.svg + :alt: Graph illustrating the scoring process in the Trustable Software Framework + :align: center \ No newline at end of file diff --git a/TSF/docs/conf.py b/TSF/docs/conf.py new file mode 100644 index 0000000000..22c2ff1eab --- /dev/null +++ b/TSF/docs/conf.py @@ -0,0 +1,66 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "S-CORE NLOHMANN JSON LIBRARY FORK" +author = "S-CORE" +version = "3.12.0" +project_url = "https://score-json.github.io/json" +project_prefix = "JSON_" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + + +extensions = [ + "sphinx_design", + "sphinx_needs", + "myst_parser", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] + +myst_enable_extensions = ["colon_fence"] + +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +suppress_warnings = ['ref.warning', 'toc.not_included', 'myst.xref_missing', 'myst.header', 'misc.highlighting_failure'] + +exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +templates_path = ["templates"] + +# Enable numref +numfig = True diff --git a/TSF/docs/index.rst b/TSF/docs/index.rst new file mode 100644 index 0000000000..457a3449d8 --- /dev/null +++ b/TSF/docs/index.rst @@ -0,0 +1,38 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _library_description: + +S-CORE NLOHMANN JSON LIBRARY FORK +=================================== + +This module is dedicated to implementing the Trustable Software Framework for the Niels Lohmann JSON Library. Initially, it emphasizes ensuring the reliability and correctness of the library's parsing functionality. The Niels Lohmann JSON Library is recognized for its efficient and straightforward approach to JSON parsing, manipulation, and serialization within modern C++ applications, aiming to provide developers with a flexible and robust tool for managing JSON data structures. The framework seeks to enhance these capabilities, aligning them with rigorous software quality standards to ensure dependable JSON processing across diverse applications. + +.. contents:: Table of Contents + :depth: 2 + :local: + +Overview +-------- + +The core of this documentation is the *Trustable Compliance Report*, which may be found in `Report `_ and which lists the supporting statements for the trustability of nlohmann/json together with their trustability score and the supporting artifacts. This report is augmented by the trustable graph, which is the graphical representation of the argumentation. + +.. toctree:: + :maxdepth: 1 + :glob: + + introduction/index.rst + concept.rst + report.rst + Eclipse S-CORE Home-Page \ No newline at end of file diff --git a/TSF/docs/introduction/context_diagram.svg b/TSF/docs/introduction/context_diagram.svg new file mode 100644 index 0000000000..f470bec4e9 --- /dev/null +++ b/TSF/docs/introduction/context_diagram.svg @@ -0,0 +1 @@ +Mirrorofnlohmann/jsonIntegratorChange managementDependencyandbuildmanagementAnalysisofdata managementnlohmann/jsonSoftware (downstreamsystem)ExceptionhandlingInput sourceOutputtargetWarningindicatorsandmitigationsErrorreportingJSON inputCompilerandtoolversionsUpdateChangeLog.mdMonitoringdataExceptionsDeserializedobjectIdentifiedmisbehavioursInputconstraintsUpdatesinmirrorJSON Parsingfunctionality(Deserializationonly) \ No newline at end of file diff --git a/TSF/docs/introduction/index.rst b/TSF/docs/introduction/index.rst new file mode 100644 index 0000000000..c2418a1297 --- /dev/null +++ b/TSF/docs/introduction/index.rst @@ -0,0 +1,77 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _introduction: + +Introduction +======================================================== + +This document outlines the application of the Trustable Software Framework (TSF) to the `JSON library `_ (version 3.12.0) developed by Niels Lohmann. The TSF aims to ensure software reliability and compliance by setting guidelines for evaluating various aspects of software development. Our focus here is the library's integration into the baselibs repository within the S-CORE project. The ultimate goal is to certify the library as trustable based on stringent evaluation criteria involving rigorous testing, intuitive design, and seamless integration. + +Design Goals of the JSON Library +-------------------------------- + +The JSON library by Niels Lohmann was developed with several design goals in mind: + +- **Intuitive Syntax**: The library leverages modern C++ operator magic to provide an intuitive syntax similar to the native JSON experience in languages like Python. + +- **Trivial Integration**: Comprising a single header file `json.hpp`, this library demands no complex build systems or dependencies, facilitating easy integration into any project using vanilla C++11. + +- **Serious Testing**: Extensive unit tests ensure 100% coverage of the code, including exceptional behavior. Tools like Valgrind and Clang Sanitizers verify memory safety, while Google OSS-Fuzz performs continuous fuzz testing. + +Notably, memory efficiency and speed were not primary focuses, allowing for slight trade-offs in these areas to prioritize ease of integration and reliability. + +Baselibs Project Context +------------------------ + +The integration effort is situated within the baselibs project, aimed at qualifying library performance and compliance with internal standards. As part of this project, the TSF process has been embedded into the score repository to generate and analyze evidence regarding software trustability. + +Component Classification Strategy +----------------------------------- + +- **Process Overview**: The baselibs project is upholding the TSF to define guidelines and generate reliable evidence of compliance, analyzing specific requirements such as MISRA and functionality consistency. + +- **Challenges and Decisions**: + - Divergence from the original library code has been discussed to minimize unnecessary interference while adhering to project needs. + - MISRA compliance is selectively applied, and necessary adaptations are considered at the upstream level where applicable. + +- **Strategic Directions**: + - Evidence requirements are mapped and analyzed for gaps, leading to possible code amendments or forks. + - Questions concerning the library's behavior are systematically answered, providing coverage details and tracing requirements to standards like ISO. + +Find more descriptions on the ongoing process and requirements at `Eclipse Process Description `_. + +Limitations of this documentation +--------------------------------- + +The present documentation covers a small part of the functionalities of Niels Lohmann's JSON library only, due to the integration into the baselibs project. +In the latter, it is intended to utilize the JSON library for the purpose of parsing JSON data into a user-datatype. +The underlying standard, which defines the syntax of JSON data and the necessary parsing capabilities, is given in `RFC8259 `_. +Therefore, this documentation asserts the trustability of the capabilities of the library to recognize ill-formed JSON data according to RFC8259 and parse well-formed JSON data. +In particular, the capabilities (and inabilities) according to different JSON formats, e.g. `RFC6902 `_, `RFC7396 `_, `RFC7493 `_, `RFC7049 `_ and `RFC8949 `_ are not covered in this documentation. + + +Context Diagram +----------------------------------- + +The aim of this context diagram is to provide a high-level overview of the JSON library's interactions with external entities in its environment. It illustrates the boundaries and expected interfaces of the JSON library within its operational context as assumed by this documentation. + +.. image:: context_diagram.svg + :alt: Context Diagram + :width: 1100px + +Conclusion +---------- + +The application of the Trustable Software Framework to Niels Lohmann's JSON library (version 3.12.0) involves a comprehensive assessment to ensure it meets our high-level requirements for external software. Key actions include formalizing necessary functionalities, addressing feature requests from S-CORE, and integrating trustable evidence into a coherent project workflow. The library is maintained with stringent quality checks and evidence generation processes, illustrating a commitment to high standards and the practicality required for certifiable software projects. diff --git a/TSF/docs/list_of_test_environments.md b/TSF/docs/list_of_test_environments.md new file mode 100644 index 0000000000..ab5e120c50 --- /dev/null +++ b/TSF/docs/list_of_test_environments.md @@ -0,0 +1,7530 @@ +## List of all unit-tests with test environments + + This list contains all unit-tests possibly running in this project. + These tests are compiled from the source-code, where the individual unit-tests are arranged in TEST_CASEs containing possibly nested SECTIONs. + To reflect the structure of the nested sections, nested lists are utilised, where the top-level list represents the list of TEST_CASEs. + + It should be noted that not all unit-tests in a test-file are executed with every compiler-configuration. + + +### List of tests in file unit-32bit.cpp + +* value_in_range_of trait +* 32bit +* BJData + * parse errors + * array + * optimized array: negative size + * optimized array: integer value overflow + + + +All tests in this file were run in the following configurations: + +* Linux-g++ with standard gnu++11 + + +### List of tests in file unit-algorithms.cpp + +* algorithms + * non-modifying sequence operations + * std::all_of + * std::any_of + * std::none_of + * std::for_each + * reading + * writing + * std::count + * std::count_if + * std::mismatch + * std::equal + * using operator== + * using user-defined comparison + * std::find + * std::find_if + * std::find_if_not + * std::adjacent_find + * modifying sequence operations + * std::reverse + * std::rotate + * std::partition + * sorting operations + * std::sort + * with standard comparison + * with user-defined comparison + * sorting an object + * std::partial_sort + * set operations + * std::merge + * std::set_difference + * std::set_intersection + * std::set_union + * std::set_symmetric_difference + * heap operations + * iota + * int + * double + * char + * copy + * copy without if + * copy if + * copy n + * copy n chars + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-allocator.cpp + +* bad_alloc + * bad_alloc +* controlled bad_alloc + * class json_value + * json_value(value_t) + * object + * array + * string + * json_value(const string_t&) + * class basic_json + * basic_json(const CompatibleObjectType&) + * basic_json(const CompatibleArrayType&) + * basic_json(const typename string_t::value_type*) + * basic_json(const typename string_t::value_type*) +* bad my_allocator::construct + * my_allocator::construct doesn't forward + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-alt-string.cpp + +* alternative string type + * dump + * parse + * items + * equality + * JSON pointer + * patch + * diff + * flatten + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-arrays.cpp + +* accept + * boundaries +* parse + * whitespace + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-assert_macro.cpp + +* JSON_ASSERT(x) + * basic_json(first, second) + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-binary_formats.cpp + +* Binary Formats + * canada.json + * twitter.json + * citm_catalog.json + * jeopardy.json + * sample.json + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-bjdata.cpp + +* value_in_range_of trait +* BJData + * binary_reader BJData LUT arrays are sorted + * individual values + * discarded + * null + * boolean + * true + * false + * byte + * 0..255 (uint8) + * number + * signed + * -9223372036854775808..-2147483649 (int64) + * -2147483648..-32769 (int32) + * -32768..-129 (int16) + * -9263 (int16) + * -128..-1 (int8) + * 0..127 (int8) + * 128..255 (uint8) + * 256..32767 (int16) + * 32768..65535 (uint16) + * 65536..2147483647 (int32) + * 2147483648..4294967295 (uint32) + * 4294967296..9223372036854775807 (int64) + * 9223372036854775808..18446744073709551615 (uint64) + * unsigned + * 0..127 (int8) + * 128..255 (uint8) + * 256..32767 (int16) + * 32768..65535 (uint16) + * 65536..2147483647 (int32) + * 2147483648..4294967295 (uint32) + * 4294967296..9223372036854775807 (int64) + * 9223372036854775808..18446744073709551615 (uint64) + * float64 + * 3.1415925 + * half-precision float + * simple half floats + * errors + * no byte follows + * only one byte follows + * half-precision float (edge cases) + * exp = 0b00000 + * 0 (0 00000 0000000000) + * -0 (1 00000 0000000000) + * 2**-24 (0 00000 0000000001) + * exp = 0b11111 + * infinity (0 11111 0000000000) + * -infinity (1 11111 0000000000) + * other values from https://en.wikipedia.org/wiki/Half-precision_floating-point_format + * 1 (0 01111 0000000000) + * -2 (1 10000 0000000000) + * 65504 (0 11110 1111111111) + * infinity + * NaN + * high-precision number + * unsigned integer number + * signed integer number + * floating-point number + * errors + * string + * N = 0..127 + * N = 128..255 + * N = 256..32767 + * N = 32768..65535 + * N = 65536..2147483647 + * binary + * N = 0..127 + * N = 128..255 + * N = 256..32767 + * N = 32768..65535 + * N = 65536..2147483647 + * Other Serializations + * No Count No Type + * Yes Count No Type + * array + * empty + * size=false type=false + * size=true type=false + * size=true type=true + * [null] + * size=false type=false + * size=true type=false + * size=true type=true + * [1,2,3,4,5] + * size=false type=false + * size=true type=false + * size=true type=true + * [[[[]]]] + * size=false type=false + * size=true type=false + * size=true type=true + * array with int16_t elements + * size=false type=false + * size=true type=false + * array with uint16_t elements + * size=false type=false + * size=true type=false + * array with int32_t elements + * size=false type=false + * size=true type=false + * object + * empty + * size=false type=false + * size=true type=false + * size=true type=true + * {\ + * size=false type=false + * size=true type=false + * {\ + * size=false type=false + * size=true type=false + * size=true type=true ignore object type marker +* errors + * strict mode + * non-strict mode + * strict mode +* SAX aborts + * start_array() + * start_object() + * key() in object + * start_array(len) + * start_object(len) + * key() in object with length + * start_array() in ndarray _ArraySize_ + * number_integer() in ndarray _ArraySize_ + * key() in ndarray _ArrayType_ + * string() in ndarray _ArrayType_ + * key() in ndarray _ArrayData_ + * string() in ndarray _ArrayData_ + * string() in ndarray _ArrayType_ + * start_array() in ndarray _ArrayData_ +* parsing values + * strings + * number + * float + * array + * optimized version (length only) + * optimized version (type and length) + * optimized ndarray (type and vector-size as optimized 1D array) + * optimized ndarray (type and vector-size ndarray with JData annotations) + * optimized ndarray (type and vector-size as 1D array) + * optimized ndarray (type and vector-size as size-optimized array) + * invalid ndarray annotations remains as object +* parse errors + * empty byte vector + * char + * eof after C byte + * byte out of range + * byte + * parse bjdata markers in ubjson + * strings + * eof after S byte + * invalid byte + * parse bjdata markers in ubjson + * array + * optimized array: no size following type + * optimized array: negative size + * optimized array: integer value overflow + * do not accept NTFZ markers in ndarray optimized type (with count) + * do not accept NTFZ markers in ndarray optimized type (without count) + * strings + * sizes + * parse bjdata markers as array size in ubjson + * types + * arrays + * ndarrays + * objects +* writing optimized values + * integer + * array of i + * array of U + * array of I + * array of u + * array of l + * array of m + * array of L + * unsigned integer + * array of i + * array of U + * array of I + * array of u + * array of l + * array of m + * array of L + * array of M +* Universal Binary JSON Specification Examples 1 + * Null Value + * No-Op Value + * Boolean Types + * Numeric Types + * Char Type + * Byte Type + * String Type + * English + * Russian + * Russian + * Array Type + * size=false type=false + * size=true type=false + * size=true type=true + * Object Type + * size=false type=false + * size=true type=false + * size=true type=true + * Optimized Format + * Array Example + * No Optimization + * Optimized with count + * Optimized with type & count + * Object Example + * No Optimization + * Optimized with count + * Optimized with type & count + * Special Cases (Null, No-Op and Boolean) + * Array + * Object +* all BJData first bytes +* BJData roundtrips + * input from self-generated BJData files + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-bson.cpp + +* BSON + * individual values not supported + * null + * boolean + * true + * false + * number + * float + * string + * array + * keys containing code-point U+0000 cannot be serialized to BSON + * string length must be at least 1 + * objects + * empty object + * non-empty object with bool + * non-empty object with bool + * non-empty object with double + * non-empty object with string + * non-empty object with null member + * non-empty object with integer (32-bit) member + * non-empty object with integer (64-bit) member + * non-empty object with negative integer (32-bit) member + * non-empty object with negative integer (64-bit) member + * non-empty object with unsigned integer (64-bit) member + * non-empty object with small unsigned integer member + * non-empty object with object member + * non-empty object with array member + * non-empty object with non-empty array member + * non-empty object with binary member + * non-empty object with binary member with subtype + * Some more complex document + * Examples from https://bsonspec.org/faq.html + * Example 1 + * Example 2 +* BSON input/output_adapters + * roundtrips + * std::ostringstream + * std::string + * std::vector +* Incomplete BSON Input + * Incomplete BSON Input 1 + * Incomplete BSON Input 2 + * Incomplete BSON Input 3 + * Incomplete BSON Input 4 + * Improve coverage + * key + * array +* Negative size of binary value +* Unsupported BSON input +* BSON numerical data + * number + * signed + * std::int64_t: INT64_MIN .. INT32_MIN-1 + * signed std::int32_t: INT32_MIN .. INT32_MAX + * signed std::int64_t: INT32_MAX+1 .. INT64_MAX + * unsigned + * unsigned std::uint64_t: 0 .. INT32_MAX + * unsigned std::uint64_t: INT32_MAX+1 .. INT64_MAX + * unsigned std::uint64_t: INT64_MAX+1 .. UINT64_MAX +* BSON roundtrips + * reference files + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-byte_container_with_subtype.cpp + +* byte_container_with_subtype + * empty container + * subtyped container + * comparisons + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-byte_order_mark.cpp + +* accept + * UTF-8 + * single BOM + * multiple BOM + * unexpected BOM + * Other byte-order marks + * UTF-16 + * UTF-32 +* parse + * UTF-8 + * multiple BOM + * unexpected BOM + * other BOM + * UTF-16 + * UTF-32 + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-capacity.cpp + +* capacity + * empty() + * boolean + * result of empty + * definition of empty + * string + * result of empty + * definition of empty + * array + * empty array + * result of empty + * definition of empty + * filled array + * result of empty + * definition of empty + * object + * empty object + * result of empty + * definition of empty + * filled object + * result of empty + * definition of empty + * number (integer) + * result of empty + * definition of empty + * number (unsigned) + * result of empty + * definition of empty + * number (float) + * result of empty + * definition of empty + * null + * result of empty + * definition of empty + * size() + * boolean + * result of size + * definition of size + * string + * result of size + * definition of size + * array + * empty array + * result of size + * definition of size + * filled array + * result of size + * definition of size + * object + * empty object + * result of size + * definition of size + * filled object + * result of size + * definition of size + * number (integer) + * result of size + * definition of size + * number (unsigned) + * result of size + * definition of size + * number (float) + * result of size + * definition of size + * null + * result of size + * definition of size + * max_size() + * boolean + * result of max_size + * string + * result of max_size + * array + * empty array + * result of max_size + * filled array + * result of max_size + * object + * empty object + * result of max_size + * filled object + * result of max_size + * number (integer) + * result of max_size + * number (unsigned) + * result of max_size + * number (float) + * result of max_size + * null + * result of max_size + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-cbor.cpp + +* CBOR + * individual values + * discarded + * NaN + * Infinity + * null + * boolean + * true + * false + * number + * signed + * -9223372036854775808..-4294967297 + * -4294967296..-65537 + * -65536..-257 + * -9263 (int 16) + * -256..-24 + * -24..-1 + * 0..23 + * 24..255 + * 256..65535 + * 65536..4294967295 + * 4294967296..4611686018427387903 + * -32768..-129 (int 16) + * unsigned + * 0..23 (Integer) + * 24..255 (one-byte uint8_t) + * 256..65535 (two-byte uint16_t) + * 65536..4294967295 (four-byte uint32_t) + * 4294967296..4611686018427387903 (eight-byte uint64_t) + * double-precision float + * 3.1415925 + * single-precision float + * 0.5 + * 0.0 + * -0.0 + * 100.0 + * 200.0 + * 3.40282e+38(max float) + * -3.40282e+38(lowest float) + * 1 + 3.40282e+38(more than max float) + * -1 - 3.40282e+38(less than lowest float) + * half-precision float (edge cases) + * errors + * no byte follows + * only one byte follows + * exp = 0b00000 + * 0 (0 00000 0000000000) + * -0 (1 00000 0000000000) + * 2**-24 (0 00000 0000000001) + * exp = 0b11111 + * infinity (0 11111 0000000000) + * -infinity (1 11111 0000000000) + * other values from https://en.wikipedia.org/wiki/Half-precision_floating-point_format + * 1 (0 01111 0000000000) + * -2 (1 10000 0000000000) + * 65504 (0 11110 1111111111) + * infinity + * NaN + * string + * N = 0..23 + * N = 24..255 + * N = 256..65535 + * N = 65536..4294967295 + * array + * empty + * [null] + * [1,2,3,4,5] + * [[[[]]]] + * array with uint16_t elements + * array with uint32_t elements + * object + * empty + * {\ + * {\ + * object with uint8_t elements + * object with uint16_t elements + * object with uint32_t elements + * binary + * N = 0..23 + * N = 24..255 + * N = 256..65535 + * N = 65536..4294967295 + * indefinite size + * binary in array + * binary in object + * SAX callback with binary + * additional deserialization + * 0x5b (byte array) + * 0x7b (string) + * 0x9b (array) + * 0xbb (map) + * errors + * empty byte vector + * too short byte vector + * unsupported bytes + * concrete examples + * all unsupported bytes + * invalid string in map + * strict mode + * non-strict mode + * strict mode + * SAX aborts + * start_array(len) + * start_object(len) + * key() +* single CBOR roundtrip + * sample.json + * roundtrips + * std::ostringstream + * std::string +* CBOR regressions + * fuzz test results +* CBOR roundtrips + * input from flynn +* all CBOR first bytes +* examples from RFC 7049 Appendix A + * numbers + * simple values + * strings + * byte arrays + * arrays + * objects +* Tagged values + * 0xC6..0xD4 + * 0xD8 - 1 byte follows + * success + * missing byte after tag + * 0xD9 - 2 byte follow + * success + * missing byte after tag + * 0xDA - 4 bytes follow + * success + * missing bytes after tag + * 0xDB - 8 bytes follow + * success + * missing byte after tag + * tagged binary + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_const_iterator.cpp + +* const_iterator class + * construction + * constructor + * null + * object + * array + * copy assignment + * copy constructor from non-const iterator + * create from uninitialized iterator + * create from initialized iterator + * initialization + * set_begin + * null + * object + * array + * set_end + * null + * object + * array + * element access + * operator* + * null + * number + * object + * array + * operator-> + * null + * number + * object + * array + * increment/decrement + * post-increment + * null + * number + * object + * array + * pre-increment + * null + * number + * object + * array + * post-decrement + * null + * number + * object + * array + * pre-decrement + * null + * number + * object + * array + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_iterator.cpp + +* iterator class + * construction + * constructor + * null + * object + * array + * copy assignment + * initialization + * set_begin + * null + * object + * array + * set_end + * null + * object + * array + * element access + * operator* + * null + * number + * object + * array + * operator-> + * null + * number + * object + * array + * increment/decrement + * post-increment + * null + * number + * object + * array + * pre-increment + * null + * number + * object + * array + * post-decrement + * null + * number + * object + * array + * pre-decrement + * null + * number + * object + * array + * equality-preserving + * post-increment + * primitive_iterator_t + * iter_impl + * json_reverse_iterator + * post-decrement + * primitive_iterator_t + * iter_impl + * json_reverse_iterator + * cert-dcl21-cpp + * post-increment + * primitive_iterator_t + * iter_impl + * json_reverse_iterator + * post-decrement + * primitive_iterator_t + * iter_impl + * json_reverse_iterator + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_lexer.cpp + +* lexer class + * scan + * structural characters + * literal names + * numbers + * whitespace + * token_type_name + * parse errors on first character + * very large string + * fail on comments + * ignore comments + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_parser.cpp + +* parser class + * parse + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * additional test for null byte + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * accept + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * parse errors + * parse errors (accept) + * tests found by mutate++ + * callback function + * filter nothing + * filter everything + * filter specific element + * filter object in array + * filter specific events + * first closing event + * special cases + * constructing from contiguous containers + * from std::vector + * from std::array + * from array + * from char literal + * from std::string + * from std::initializer_list + * from std::valarray + * improve test coverage + * parser with callback + * SAX parser + * } without value + * } with value + * second key + * ] without value + * ] with value + * float + * false + * null + * true + * unsigned + * integer + * string + * error messages for comments + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_parser_core.cpp + +* parser class - core + * parse + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * additional test for null byte + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * accept + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * parse errors + * parse errors (accept) + * tests found by mutate++ + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-class_parser_diagnostic_positions.cpp + +* parser class + * parse + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * additional test for null byte + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * accept + * null + * true + * false + * array + * empty array + * nonempty array + * object + * empty object + * nonempty object + * string + * errors + * escaped + * number + * integers + * without exponent + * with exponent + * edge cases + * over the edge cases + * floating-point + * without exponent + * with exponent + * overflow + * invalid numbers + * parse errors + * parse errors (accept) + * tests found by mutate++ + * callback function + * filter nothing + * filter everything + * filter specific element + * filter object in array + * filter specific events + * first closing event + * special cases + * constructing from contiguous containers + * from std::vector + * from std::array + * from array + * from char literal + * from std::string + * from std::initializer_list + * from std::valarray + * improve test coverage + * parser with callback + * SAX parser + * } without value + * } with value + * second key + * ] without value + * ] with value + * float + * false + * null + * true + * unsigned + * integer + * string + * error messages for comments + * with callback + * filter nothing + * filter element + * without callback + * retrieve start position and end position + * for object + * for array + * for array with objects + * for two levels of nesting objects + * for simple types + * no nested + * with callback + * without callback + * string type + * number type + * boolean type + * null type + * with leading whitespace and newlines around root JSON + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-comparison.cpp + +* lexicographical comparison operators + * types + * comparison: less + * comparison: 3-way + * values + * compares unordered + * compares unordered (inverse) + * comparison: equal + * comparison: not equal + * comparison: less + * comparison: less than or equal equal + * comparison: greater than + * comparison: greater than or equal + * comparison: 3-way + * parser callback regression + * filter specific element + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++20 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++20 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++20 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++20 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++20 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++20 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 + + +### List of tests in file unit-concepts.cpp + +* concepts + * container requirements for json + * class json + * DefaultConstructible + * MoveConstructible + * CopyConstructible + * MoveAssignable + * CopyAssignable + * Destructible + * StandardLayoutType + * class iterator + * CopyConstructible + * CopyAssignable + * Destructible + * Swappable + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-constructor1.cpp + +* constructors + * create an empty value with a given type + * null + * discarded + * object + * array + * boolean + * string + * number_integer + * number_unsigned + * number_float + * binary + * create a null object (implicitly) + * no parameter + * create a null object (explicitly) + * parameter + * create an object (explicit) + * empty object + * filled object + * create an object (implicit) + * std::map + * std::map #600 + * std::map + * std::multimap + * std::unordered_map + * std::unordered_multimap + * associative container literal + * create an array (explicit) + * empty array + * filled array + * create an array (implicit) + * std::list + * std::pair + * std::pair with discarded values + * std::tuple + * std::tuple with discarded values + * std::pair/tuple/array failures + * std::forward_list + * std::array + * std::valarray + * std::valarray + * std::vector + * std::deque + * std::set + * std::unordered_set + * sequence container literal + * create a string (explicit) + * empty string + * filled string + * create a string (implicit) + * std::string + * char[] + * const char* + * string literal + * create a boolean (explicit) + * empty boolean + * filled boolean (true) + * filled boolean (false) + * from std::vector::reference + * from std::vector::const_reference + * create a binary (explicit) + * empty binary + * filled binary + * create an integer number (explicit) + * uninitialized value + * initialized value + * create an integer number (implicit) + * short + * unsigned short + * int + * unsigned int + * long + * unsigned long + * long long + * unsigned long long + * int8_t + * int16_t + * int32_t + * int64_t + * int_fast8_t + * int_fast16_t + * int_fast32_t + * int_fast64_t + * int_least8_t + * int_least16_t + * int_least32_t + * int_least64_t + * uint8_t + * uint16_t + * uint32_t + * uint64_t + * uint_fast8_t + * uint_fast16_t + * uint_fast32_t + * uint_fast64_t + * uint_least8_t + * uint_least16_t + * uint_least32_t + * uint_least64_t + * integer literal without suffix + * integer literal with u suffix + * integer literal with l suffix + * integer literal with ul suffix + * integer literal with ll suffix + * integer literal with ull suffix + * create a floating-point number (explicit) + * uninitialized value + * initialized value + * NaN + * infinity + * create a floating-point number (implicit) + * float + * double + * long double + * floating-point literal without suffix + * integer literal with f suffix + * integer literal with l suffix + * create a container (array or object) from an initializer list + * empty initializer list + * explicit + * implicit + * one element + * array + * explicit + * implicit + * object + * explicit + * implicit + * string + * explicit + * implicit + * boolean + * explicit + * implicit + * number (integer) + * explicit + * implicit + * number (unsigned) + * explicit + * implicit + * number (floating-point) + * explicit + * implicit + * more elements + * explicit + * implicit + * implicit type deduction + * object + * array + * explicit type deduction + * empty object + * object + * object with error + * empty array + * array + * move from initializer_list + * string + * constructor with implicit types (array) + * constructor with implicit types (object) + * constructor with implicit types (object key) + * array + * constructor with implicit types (array) + * constructor with implicit types (object) + * assignment with implicit types (array) + * assignment with implicit types (object) + * object + * constructor with implicit types (array) + * constructor with implicit types (object) + * assignment with implicit types (array) + * assignment with implicit types (object) + * json + * constructor with implicit types (array) + * constructor with implicit types (object) + * assignment with implicit types (array) + * assignment with implicit types (object) + * create an array of n copies of a given value + * cnt = 0 + * cnt = 1 + * cnt = 3 + * create a JSON container from an iterator range + * object + * json(begin(), end()) + * json(begin(), begin()) + * construct from subrange + * incompatible iterators + * array + * json(begin(), end()) + * json(begin(), begin()) + * construct from subrange + * incompatible iterators + * other values + * construct with two valid iterators + * null + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + * binary + * construct with two invalid iterators + * string + * number (boolean) + * number (integer) + * number (integer) + * number (floating point) + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-constructor2.cpp + +* other constructors and destructor + * copy constructor + * object + * array + * null + * boolean + * string + * number (integer) + * number (unsigned) + * number (floating-point) + * binary + * move constructor + * copy assignment + * object + * array + * null + * boolean + * string + * number (integer) + * number (unsigned) + * number (floating-point) + * binary + * destructor + * object + * array + * string + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-convenience.cpp + +* convenience functions + * type name as string + * string escape + * string concat + * std::string + * alt_string_iter + * alt_string_data + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-conversions.cpp + +* value conversion + * get an object (explicit) + * json::object_t + * std::map + * std::multimap + * std::unordered_map + * std::unordered_multimap + * exception in case of a non-object type + * get an object (explicit, get_to) + * json::object_t + * std::map + * std::multimap + * std::unordered_map + * std::unordered_multimap + * get an object (implicit) + * json::object_t + * std::map + * std::multimap + * std::unordered_map + * std::unordered_multimap + * get an array (explicit) + * json::array_t + * std::list + * std::forward_list + * std::vector + * reserve is called on containers that supports it + * built-in arrays + * std::deque + * exception in case of a non-array type + * get an array (explicit, get_to) + * json::array_t + * std::valarray + * std::list + * std::forward_list + * std::vector + * built-in arrays + * built-in arrays: 2D + * built-in arrays: 3D + * built-in arrays: 4D + * std::deque + * get an array (implicit) + * json::array_t + * std::list + * std::forward_list + * std::vector + * std::deque + * get a string (explicit) + * string_t + * std::string + * std::string_view + * exception in case of a non-string type + * exception in case of a non-string type using string_view + * get a string (explicit, get_to) + * string_t + * std::string + * std::string_view + * get null (explicit) + * get a string (implicit) + * string_t + * std::string_view + * std::string + * get a boolean (explicit) + * boolean_t + * uint8_t + * bool + * exception in case of a non-number type + * get a boolean (implicit) + * boolean_t + * bool + * get an integer number (explicit) + * number_integer_t + * number_unsigned_t + * short + * unsigned short + * int + * unsigned int + * long + * unsigned long + * long long + * unsigned long long + * int8_t + * int16_t + * int32_t + * int64_t + * int8_fast_t + * int16_fast_t + * int32_fast_t + * int64_fast_t + * int8_least_t + * int16_least_t + * int32_least_t + * int64_least_t + * uint8_t + * uint16_t + * uint32_t + * uint64_t + * uint8_fast_t + * uint16_fast_t + * uint32_fast_t + * uint64_fast_t + * uint8_least_t + * uint16_least_t + * uint32_least_t + * uint64_least_t + * exception in case of a non-number type + * get an integer number (implicit) + * number_integer_t + * number_unsigned_t + * short + * unsigned short + * int + * unsigned int + * long + * unsigned long + * long long + * unsigned long long + * int8_t + * int16_t + * int32_t + * int64_t + * int8_fast_t + * int16_fast_t + * int32_fast_t + * int64_fast_t + * int8_least_t + * int16_least_t + * int32_least_t + * int64_least_t + * uint8_t + * uint16_t + * uint32_t + * uint64_t + * uint8_fast_t + * uint16_fast_t + * uint32_fast_t + * uint64_fast_t + * uint8_least_t + * uint16_least_t + * uint32_least_t + * uint64_least_t + * get a floating-point number (explicit) + * number_float_t + * float + * double + * exception in case of a non-string type + * get a floating-point number (implicit) + * number_float_t + * float + * double + * get a binary value (explicit) + * binary_t + * get_binary() + * non-const + * non-const + * exception in case of a non-string type + * get a binary value (implicit) + * binary_t + * get an enum + * more involved conversions + * object-like STL containers + * std::map + * std::unordered_map + * std::multimap + * std::unordered_multimap + * exception in case of a non-object type + * array-like STL containers + * std::list + * std::forward_list + * std::array + * std::array is larger than JSON + * std::array is smaller than JSON + * std::valarray + * std::vector + * std::deque + * std::set + * std::unordered_set + * std::map (array of pairs) + * superfluous entries + * std::unordered_map (array of pairs) + * superfluous entries + * exception in case of a non-object type +* JSON to enum mapping + * enum class + * traditional enum +* std::filesystem::path + * ascii + * utf-8 +* std::optional + * null + * string + * bool + * number + * array + * object + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++14 +* GNU 11.5.0 with standard gnu++17 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++14 +* GNU 8.5.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 4.9.3 with standard gnu++11 +* GNU 4.9.3 with standard gnu++14 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++14 +* Clang 18.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++14 +* GNU 12.5.0 with standard gnu++17 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++14 +* GNU 10.5.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++14 +* GNU 9.5.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 6.4.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++14 +* Clang 19.1.7 with standard gnu++17 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++14 +* Clang 17.0.6 with standard gnu++17 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++14 +* Intel 2021.5.0.20211109 with standard gnu++17 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++14 +* GNU 7.5.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++14 +* GNU 13.4.0 with standard gnu++17 +* GNU 5.5.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* GNU 4.8.5 with standard gnu++14 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++14 +* Linux-c++ with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 + + +### List of tests in file unit-custom-base-class.cpp + +* JSON Node Metadata + * type int + * type vector + * copy ctor + * move ctor + * move assign + * copy assign + * type unique_ptr + * type vector in json array +* JSON Visit Node + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-deserialization.cpp + +* deserialization + * successful deserialization + * stream + * string literal + * string_t + * operator<< + * operator>> + * user-defined string literal + * unsuccessful deserialization + * stream + * string + * operator<< + * operator>> + * user-defined string literal + * contiguous containers + * directly + * from std::vector + * from std::array + * from array + * from chars + * from std::string + * from std::initializer_list + * empty container + * via iterator range + * from std::vector + * from std::array + * from array + * from std::string + * from std::initializer_list + * from std::valarray + * with empty range + * iterator_input_adapter advances iterators correctly + * error cases + * case 1 + * case 2 + * case 3 + * case 4 + * case 5 + * case 6 + * case 7 + * case 8 + * case 9 + * case 10 + * case 11 + * case 12 + * case 13 + * case 14 + * case 15 + * case 16 + * ignoring byte-order marks + * BOM only + * BOM and content + * 2 byte of BOM + * 1 byte of BOM + * variations + * preserve state after parsing + * SAX and early abort + * JSON Lines + * Example file + * Example file without trailing newline +* deserialization of different character types (ASCII) +* deserialization of different character types (UTF-8) +* deserialization of different character types (UTF-16) +* deserialization of different character types (UTF-32) + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++20 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++20 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++20 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++20 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++20 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++20 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 + + +### List of tests in file unit-diagnostic-positions-only.cpp + +* Better diagnostics with positions only + * invalid type + * invalid type without positions + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-diagnostic-positions.cpp + +* Better diagnostics with positions + * invalid type + * invalid type without positions + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-diagnostics.cpp + +* Better diagnostics + * empty JSON Pointer + * invalid type + * missing key + * array index out of range + * array index at wrong type + * wrong iterator + * JSON Pointer escaping + * Parse error + * Wrong type in update() +* Regression tests for extended diagnostics + * Regression test for https://github.com/nlohmann/json/pull/2562#pullrequestreview-574858448 + * Regression test for https://github.com/nlohmann/json/pull/2562/files/380a613f2b5d32425021129cd1f371ddcfd54ddf#r563259793 + * Regression test for issue #2838 - Assertion failure when inserting into arrays with JSON_DIAGNOSTICS set + * Regression test for issue #2962 - JSON_DIAGNOSTICS assertion for ordered_json + * Regression test for issue #3007 - Parent pointers properly set when using update() + * Regression test for issue #3032 - Yet another assertion failure when inserting into arrays with JSON_DIAGNOSTICS set + * Regression test for issue #3915 - JSON_DIAGNOSTICS trigger assertion + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-disabled_exceptions.cpp + +* Tests with disabled exceptions + * issue #2824 - encoding of json::exception::what() + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-element_access1.cpp + +* element access 1 + * array + * access specified element with bounds checking + * access within bounds + * access outside bounds + * access on non-array type + * null + * boolean + * string + * object + * number (integer) + * number (unsigned) + * number (floating-point) + * front and back + * access specified element + * access within bounds + * access on non-array type + * null + * standard tests + * implicit transformation to properly filled array + * boolean + * string + * object + * number (integer) + * number (unsigned) + * number (floating-point) + * remove specified element + * remove element by index + * remove element by iterator + * erase(begin()) + * erase(begin(), end()) + * erase(begin(), begin()) + * erase at offset + * erase subrange + * different arrays + * remove element by index in non-array type + * null + * boolean + * string + * object + * number (integer) + * number (unsigned) + * number (floating-point) + * other values + * front and back + * null + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + * erase with one valid iterator + * null + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + * binary + * erase with one invalid iterator + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + * erase with two valid iterators + * null + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + * binary + * erase with two invalid iterators + * string + * number (boolean) + * number (integer) + * number (unsigned) + * number (floating point) + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-element_access2.cpp + +* element access 2 + * object + * access specified element with bounds checking + * access within bounds + * access outside bounds + * access on non-object type + * null + * boolean + * string + * array + * number (integer) + * number (unsigned) + * number (floating-point) + * access specified element with default value + * given a key + * access existing value + * access non-existing value + * access on non-object type + * null + * boolean + * string + * array + * number (integer) + * number (unsigned) + * number (floating-point) + * given a JSON pointer + * access existing value + * access on non-object type + * null + * boolean + * string + * array + * number (integer) + * number (unsigned) + * number (floating-point) + * non-const operator[] + * front and back + * access specified element + * access within bounds + * access within bounds (string_view) + * access on non-object type + * null + * boolean + * string + * array + * number (integer) + * number (unsigned) + * number (floating-point) + * remove specified element + * remove element by key + * remove element by key (string_view) + * remove element by iterator + * erase(begin()) + * erase(begin(), end()) + * erase(begin(), begin()) + * erase at offset + * erase subrange + * different objects + * remove element by key in non-object type + * null + * boolean + * string + * array + * number (integer) + * number (floating-point) + * find an element in an object + * existing element + * nonexisting element + * all types + * null + * string + * object + * array + * boolean + * number (integer) + * number (unsigned) + * number (floating-point) + * count keys in an object + * existing element + * nonexisting element + * all types + * null + * string + * object + * array + * boolean + * number (integer) + * number (unsigned) + * number (floating-point) + * check existence of key in an object + * existing element + * nonexisting element + * all types + * null + * string + * object + * array + * boolean + * number (integer) + * number (unsigned) + * number (floating-point) +* element access 2 (throwing tests) + * object + * access specified element with default value + * given a JSON pointer + * access non-existing value +* element access 2 (additional value() tests) + * deduced ValueType + * literal key + * const char * key + * const char(&)[] key + * string_t/object_t::key_type key + * std::string_view key + * explicit ValueType + * literal key + * const char * key + * const char(&)[] key + * string_t/object_t::key_type key + * std::string_view key + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++14 +* GNU 11.5.0 with standard gnu++17 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++14 +* GNU 8.5.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 4.9.3 with standard gnu++11 +* GNU 4.9.3 with standard gnu++14 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++14 +* Clang 18.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++14 +* GNU 12.5.0 with standard gnu++17 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++14 +* GNU 10.5.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* Clang 20.1.8 with standard gnu++17 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++14 +* GNU 9.5.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 6.4.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++14 +* Clang 19.1.7 with standard gnu++17 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++14 +* Clang 17.0.6 with standard gnu++17 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++14 +* Intel 2021.5.0.20211109 with standard gnu++17 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++14 +* GNU 7.5.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++14 +* GNU 13.4.0 with standard gnu++17 +* GNU 5.5.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* GNU 4.8.5 with standard gnu++14 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++17 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++14 +* Linux-c++ with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++17 + + +### List of tests in file unit-hash.cpp + +* hash +* hash + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-inspection.cpp + +* object inspection + * convenience type checker + * object + * array + * null + * boolean + * string + * number (integer) + * number (unsigned) + * number (floating-point) + * binary + * discarded + * serialization + * no indent / indent=-1 + * indent=0 + * indent=1, space='\t' + * indent=4 + * indent=x + * dump and floating-point numbers + * dump and small floating-point numbers + * dump and non-ASCII characters + * dump with ensure_ascii and non-ASCII characters + * full Unicode escaping to ASCII + * parsing yields the same JSON value + * dumping yields the same JSON text + * serialization of discarded element + * check that precision is reset after serialization + * round trips + * return the type of the object (explicit) + * null + * object + * array + * boolean + * string + * number (integer) + * number (unsigned) + * number (floating-point) + * return the type of the object (implicit) + * null + * object + * array + * boolean + * string + * number (integer) + * number (unsigned) + * number (floating-point) + * binary + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-items.cpp + +* iterator_wrapper + * object + * value + * reference + * const value + * const reference + * const object + * value + * reference + * const value + * const reference + * array + * value + * reference + * const value + * const reference + * const array + * value + * reference + * const value + * const reference + * primitive + * value + * reference + * const value + * const reference + * const primitive + * value + * reference + * const value + * const reference +* items() + * object + * value + * reference + * const value + * const reference + * structured bindings + * const object + * value + * reference + * const value + * const reference + * array + * value + * reference + * const value + * const reference + * const array + * value + * reference + * const value + * const reference + * primitive + * value + * reference + * const value + * const reference + * const primitive + * value + * reference + * const value + * const reference + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++17 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++17 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++17 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++17 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++17 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++17 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 + + +### List of tests in file unit-iterators1.cpp + +* iterators 1 + * basic behavior + * uninitialized + * boolean + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * additional tests + * !(begin != begin) + * !(end != end) + * begin < end + * begin <= end + * end > begin + * end >= begin + * end == end + * end <= end + * begin == begin + * begin <= begin + * begin >= begin + * !(begin == end) + * begin != end + * begin+1 == end + * begin == end-1 + * begin != end+1 + * end != end+1 + * begin+1 != begin+2 + * begin+1 < begin+2 + * begin+1 <= begin+2 + * end+1 != end+2 + * key/value + * string + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * array + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * object + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * number (integer) + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * number (unsigned) + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * number (float) + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * null + * json + begin/end + * const json + begin/end + * json + cbegin/cend + * const json + cbegin/cend + * json + rbegin/rend + * json + crbegin/crend + * const json + crbegin/crend + * key/value + * conversion from iterator to const iterator + * boolean + * string + * array + * object + * number (integer) + * number (unsigned) + * number (float) + * null + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-iterators2.cpp + +* iterators 2 + * iterator comparisons + * iterator arithmetic + * addition and subtraction + * object + * array + * null + * value + * subscript operator + * object + * array + * null + * value + * reverse iterator comparisons + * reverse iterator arithmetic + * addition and subtraction + * object + * array + * null + * value + * subscript operator + * object + * array + * null + * value + * ranges + * concepts + * algorithms + * copy + * find_if + * views + * reverse + * transform + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++20 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++20 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++20 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++20 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++20 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++20 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 + + +### List of tests in file unit-iterators3.cpp + +* checking forward-iterators + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++14 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 4.9.3 with standard gnu++11 +* GNU 4.9.3 with standard gnu++14 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++14 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++14 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++14 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 6.4.0 with standard gnu++14 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++14 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++14 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++14 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++14 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++14 +* GNU 5.5.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* GNU 4.8.5 with standard gnu++14 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++14 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++14 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++14 + + +### List of tests in file unit-json_patch.cpp + +* JSON patch + * examples from RFC 6902 + * 4. Operations + * 4.1 add + * 4.2 remove + * A.1. Adding an Object Member + * A.2. Adding an Array Element + * A.3. Removing an Object Member + * A.4. Removing an Array Element + * A.5. Replacing a Value + * A.6. Moving a Value + * A.7. Moving a Value + * A.8. Testing a Value: Success + * A.9. Testing a Value: Error + * A.10. Adding a Nested Member Object + * A.11. Ignoring Unrecognized Elements + * A.12. Adding to a Nonexistent Target + * A.14. Escape Ordering + * A.15. Comparing Strings and Numbers + * A.16. Adding an Array Value + * own examples + * add + * add to the root element + * add to end of the array + * copy + * replace + * documentation GIF + * errors + * unknown operation + * not an array + * not an array of objects + * missing 'op' + * non-string 'op' + * invalid operation + * add + * missing 'path' + * non-string 'path' + * missing 'value' + * invalid array index + * remove + * missing 'path' + * non-string 'path' + * nonexisting target location (array) + * nonexisting target location (object) + * root element as target location + * replace + * missing 'path' + * non-string 'path' + * missing 'value' + * nonexisting target location (array) + * nonexisting target location (object) + * move + * missing 'path' + * non-string 'path' + * missing 'from' + * non-string 'from' + * nonexisting from location (array) + * nonexisting from location (object) + * copy + * missing 'path' + * non-string 'path' + * missing 'from' + * non-string 'from' + * nonexisting from location (array) + * nonexisting from location (object) + * test + * missing 'path' + * non-string 'path' + * missing 'value' + * Examples from jsonpatch.com + * Simple Example + * Operations + * add + * remove + * replace + * copy + * move + * test + * Examples from bruth.github.io/jsonpatch-js + * add + * remove + * replace + * move + * copy + * copy + * Tests from github.com/json-patch/json-patch-tests + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-json_pointer.cpp + +* JSON pointers + * errors + * array index error + * examples from RFC 6901 + * nonconst access + * const access + * user-defined string literal + * array access + * nonconst access + * const access + * flatten + * string representation + * conversion + * array + * object + * empty, push, pop and parent + * operators + * equality comparison + * exceptions + * less-than comparison + * usable as map key + * backwards compatibility and mixing + * equality comparison + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++20 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++20 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++20 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++20 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++20 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++20 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++20 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++20 + + +### List of tests in file unit-large_json.cpp + +* tests on very large JSONs + * issue #1419 - Segmentation fault (stack overflow) due to unbounded recursion + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-literals.cpp + +* accept + * unicode + * capitalisation + * true + * null + * false + * illegal literals + * nil + * truth + * const + * none + * self + * super + * this + * undefined + * illegal literal numbers + * inf + * infinity + * NaN +* parse + * values + * whitespace + * capitalisation + * true + * null + * false + * illegal literals + * nil + * truth + * const + * none + * self + * super + * this + * undefined + * illegal literal numbers + * inf + * infinity + * NaN + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-locale-cpp.cpp + +* locale-dependent test (LC_NUMERIC=C) + * check if locale is properly set + * parsing + * SAX parsing +* locale-dependent test (LC_NUMERIC=de_DE) + * check if locale is properly set + * parsing + * SAX parsing + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-merge_patch.cpp + +* JSON Merge Patch + * examples from RFC 7396 + * Section 1 + * Section 3 + * Appendix A + * Example 1 + * Example 2 + * Example 3 + * Example 4 + * Example 5 + * Example 6 + * Example 7 + * Example 8 + * Example 9 + * Example 10 + * Example 11 + * Example 12 + * Example 13 + * Example 14 + * Example 15 + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-meta.cpp + +* version information + * meta() + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-modifiers.cpp + +* modifiers + * clear() + * boolean + * string + * array + * empty array + * filled array + * object + * empty object + * filled object + * binary + * empty binary + * filled binary + * number (integer) + * number (unsigned) + * number (float) + * null + * push_back() + * to array + * json&& + * null + * array + * other type + * const json& + * null + * array + * other type + * to object + * null + * object + * other type + * with initializer_list + * null + * array + * object + * emplace_back() + * to array + * null + * array + * multiple values + * other type + * emplace() + * to object + * null + * object + * other type + * operator+= + * to array + * json&& + * null + * array + * other type + * const json& + * null + * array + * other type + * to object + * null + * object + * other type + * with initializer_list + * null + * array + * object + * insert() + * value at position + * insert before begin() + * insert in the middle + * insert before end() + * rvalue at position + * insert before begin() + * insert in the middle + * insert before end() + * copies at position + * insert before begin() + * insert in the middle + * insert before end() + * insert nothing (count = 0) + * range for array + * proper usage + * empty range + * invalid iterators + * range for object + * proper usage + * empty range + * invalid iterators + * initializer list at position + * insert before begin() + * insert in the middle + * insert before end() + * invalid iterator + * non-array type + * update() + * non-recursive (default) + * const reference + * proper usage + * wrong types + * iterator range + * proper usage + * empty range + * invalid iterators + * recursive + * const reference + * extend object + * replace object + * swap() + * json + * member swap + * nonmember swap + * array_t + * array_t type + * non-array_t type + * object_t + * object_t type + * non-object_t type + * string_t + * string_t type + * non-string_t type + * binary_t + * binary_t type + * binary_t::container_type type + * non-binary_t type + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-msgpack.cpp + +* MessagePack + * individual values + * discarded + * null + * boolean + * true + * false + * number + * signed + * -32..-1 (negative fixnum) + * 0..127 (positive fixnum) + * 128..255 (int 8) + * 256..65535 (int 16) + * 65536..4294967295 (int 32) + * 4294967296..9223372036854775807 (int 64) + * -128..-33 (int 8) + * -9263 (int 16) + * -32768..-129 (int 16) + * -32769..-2147483648 + * -9223372036854775808..-2147483649 (int 64) + * unsigned + * 0..127 (positive fixnum) + * 128..255 (uint 8) + * 256..65535 (uint 16) + * 65536..4294967295 (uint 32) + * 4294967296..18446744073709551615 (uint 64) + * float + * 3.1415925 + * 1.0 + * 128.128 + * string + * N = 0..31 + * N = 32..255 + * N = 256..65535 + * N = 65536..4294967295 + * array + * empty + * [null] + * [1,2,3,4,5] + * [[[[]]]] + * array 16 + * array 32 + * object + * empty + * {\ + * {\ + * map 16 + * map 32 + * extension + * N = 0..255 + * N = 256..65535 + * N = 65536..4294967295 + * binary + * N = 0..255 + * N = 256..65535 + * N = 65536..4294967295 + * from float32 + * errors + * empty byte vector + * too short byte vector + * unexpected end inside int with stream + * misuse wchar for binary + * unsupported bytes + * concrete examples + * all unsupported bytes + * invalid string in map + * strict mode + * non-strict mode + * strict mode + * SAX aborts + * start_array(len) + * start_object(len) + * key() +* single MessagePack roundtrip + * sample.json + * roundtrips + * std::ostringstream + * std::string +* MessagePack roundtrips + * input from msgpack-python + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-no-mem-leak-on-adl-serialize.cpp + +* check_for_mem_leak_on_adl_to_json-1 +* check_for_mem_leak_on_adl_to_json-2 +* check_for_mem_leak_on_adl_to_json-2 + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-noexcept.cpp + +* noexcept + * nothrow-copy-constructible exceptions + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-numbers.cpp + +* accept + * exponents + * U+0425 + * U+0436 + * leading zeroes + * operators + * plus + * minus + * brackets + * factorial + * multiplication + * division + * comma + * whitespace + * space + * tab + * new line + * Carriage return + * Leading and tailing + * space + * tab + * newline + * Carriage return + * Mixed + * Leading zeroes + * bases + * Octal + * Hexadecimal +* parse + * exponents + * U+0425 + * U+0436 + * leading zeroes + * leading plus + * Capitalisation + * operators + * plus + * minus + * brackets + * factorial + * multiplication + * division + * comma + * trailing zeroes + * whitespace + * invalid whitespace + * space + * tab + * new line + * Carriage return + * Leading zeroes + * Precision + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-objects.cpp + +* accept + * names + * numbers + * arrays + * objects + * literals + * strings + * control characters + * unicode + * escaped UTF-16 surrogates + * whitespace + * empty object + * non-empty object + * member separator +* parse + * whitespace + * empty object + * non-empty object + * member separator + * names + * numbers + * arrays + * objects + * literals + * duplicate names + * 100,000 identical keys + * first and last key duplicate + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-ordered_json.cpp + +* ordered_json + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-ordered_map.cpp + +* ordered_map + * constructor + * constructor from iterator range + * copy assignment + * at + * with Key&& + * with const Key&& + * with string literal + * operator[] + * with Key&& + * with const Key&& + * with string literal + * erase + * with Key&& + * with const Key&& + * with string literal + * with iterator + * with iterator pair + * range in the middle + * range at the beginning + * range at the end + * count + * find + * insert + * const value_type& + * value_type&& + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-pointer_access.cpp + +* pointer access + * pointer access to object_t + * pointer access to const object_t + * pointer access to array_t + * pointer access to const array_t + * pointer access to string_t + * pointer access to const string_t + * pointer access to boolean_t + * pointer access to const boolean_t + * pointer access to number_integer_t + * pointer access to const number_integer_t + * pointer access to number_unsigned_t + * pointer access to const number_unsigned_t + * pointer access to number_float_t + * pointer access to const number_float_t + * pointer access to const binary_t + * pointer access to const binary_t + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-readme.cpp + +* README + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-reference_access.cpp + +* reference access + * reference access to object_t + * const reference access to const object_t + * reference access to array_t + * reference access to string_t + * reference access to boolean_t + * reference access to number_integer_t + * reference access to number_unsigned_t + * reference access to number_float_t + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-regression1.cpp + +* regression tests 1 + * issue #60 - Double quotation mark is not parsed correctly + * escape_doublequote + * issue #70 - Handle infinity and NaN cases + * NAN value + * infinity + * NAN value + * infinity + * pull request #71 - handle enum type + * issue #76 - dump() / parse() not idempotent + * issue #82 - lexer::get_number return NAN + * issue #89 - nonstandard integer type + * issue #93 reverse_iterator operator inheritance problem + * issue #100 - failed to iterator json object with reverse_iterator + * issue #101 - binary string causes numbers to be dumped as hex + * issue #111 - subsequent unicode chars + * issue #144 - implicit assignment to std::string fails + * issue #146 - character following a surrogate pair is skipped + * issue #171 - Cannot index by key of type static constexpr const char* + * issue #186 miloyip/nativejson-benchmark: floating-point parsing + * issue #228 - double values are serialized with commas as decimal points + * issue #378 - locale-independent num-to-str + * issue #379 - locale-independent str-to-num + * issue #233 - Can't use basic_json::iterator as a base iterator for std::move_iterator + * issue #235 - ambiguous overload for 'push_back' and 'operator+=' + * issue #269 - diff generates incorrect patch when removing multiple array elements + * issue #283 - value() does not work with _json_pointer types + * issue #304 - Unused variable warning + * issue #306 - Parsing fails without space at end of file + * issue #310 - make json_benchmarks no longer working in 2.0.4 + * issue #323 - add nested object capabilities to pointers + * issue #329 - serialized value not always can be parsed + * issue #360 - Loss of precision when serializing + * issue #366 - json::parse on failed stream gets stuck + * issue #367 - calling stream at EOF + * issue #367 - behavior of operator>> should more closely resemble that of built-in overloads + * (empty) + * (whitespace) + * one value + * one value + whitespace + * whitespace + one value + * three values + * literals without whitespace + * example from #529 + * second example from #529 + * issue #389 - Integer-overflow (OSS-Fuzz issue 267) + * issue #380 - bug in overflow detection when parsing integers + * issue #405 - Heap-buffer-overflow (OSS-Fuzz issue 342) + * issue #407 - Heap-buffer-overflow (OSS-Fuzz issue 343) + * issue #408 - Heap-buffer-overflow (OSS-Fuzz issue 344) + * issue #411 - Heap-buffer-overflow (OSS-Fuzz issue 366) + * issue #412 - Heap-buffer-overflow (OSS-Fuzz issue 367) + * issue #414 - compare with literal 0) + * issue #416 - Use-of-uninitialized-value (OSS-Fuzz issue 377) + * issue #452 - Heap-buffer-overflow (OSS-Fuzz issue 585) + * issue #454 - doubles are printed as integers + * issue #464 - VS2017 implicit to std::string conversion fix + * issue #465 - roundtrip error while parsing 1000000000000000010E5 + * issue #473 - inconsistent behavior in conversion to array type + * std::vector + * std::list + * std::forward_list + * issue #486 - json::value_t can't be a map's key type in VC++ 2015 + * issue #494 - conversion from vector to json fails to build + * issue #504 - assertion error (OSS-Fuzz 856) + * issue #512 - use of overloaded operator '<=' is ambiguous + * issue #575 - heap-buffer-overflow (OSS-Fuzz 1400) + * issue #600 - how does one convert a map in Json back to std::map? + * example 1 + * example 2 + * issue #602 - BOM not skipped when using json:parse(iterator) + * issue #702 - conversion from valarray to json fails to build + * original example + * full example + * issue #367 - Behavior of operator>> should more closely resemble that of built-in overloads. + * example 1 + * issue #714 - throw std::ios_base::failure exception when failbit set to true + * issue #805 - copy constructor is used with std::initializer_list constructor. + * issue #838 - incorrect parse error with binary data in keys + * issue #843 - converting to array not working + * issue #894 - invalid RFC6902 copy operation succeeds + * issue #961 - incorrect parsing of indefinite length CBOR strings + * issue #962 - Timeout (OSS-Fuzz 6034) + * issue #971 - Add a SAX parser - late bug + * issue #972 - Segmentation fault on G++ when trying to assign json string literal to custom json type + * issue #977 - Assigning between different json types +* regression tests, exceptions dependent + * issue #1340 - eof not set on exhausted input stream + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++17 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++17 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++17 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++17 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++17 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++17 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++17 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 + + +### List of tests in file unit-regression2.cpp + +* regression tests 2 + * issue #1001 - Fix memory leak during parser callback + * issue #1021 - to/from_msgpack only works with standard typization + * issue #1045 - Using STL algorithms with JSON containers with expected results? + * issue #1292 - Serializing std::variant causes stack overflow + * issue #1299 - compile error in from_json converting to container + * issue #1445 - buffer overflow in dumping invalid utf-8 strings + * a bunch of -1, ensure_ascii=true + * a bunch of -2, ensure_ascii=false + * test case in issue #1445 + * issue #1447 - Integer Overflow (OSS-Fuzz 12506) + * issue #1708 - minimum value of int64_t can be outputted + * issue #1727 - Contains with non-const lvalue json_pointer picks the wrong overload + * issue #1647 - compile error when deserializing enum if both non-default from_json and non-member operator== exists for other type + * issue #1715 - json::from_cbor does not respect allow_exceptions = false when input is string literal + * string literal + * string array + * std::string + * issue #1805 - A pair is json constructible only if T1 and T2 are json constructible + * issue #1825 - A tuple is json constructible only if all T in Args are json constructible + * issue #1983 - JSON patch diff for op=add formation is not as per standard (RFC 6902) + * issue #2067 - cannot serialize binary data to text JSON + * PR #2181 - regression bug with lvalue + * issue #2293 - eof doesn't cause parsing to stop + * issue #2315 - json.update and vectordoes not work with ordered_json + * issue #2330 - ignore_comment=true fails on multiple consecutive lines starting with comments + * issue #2546 - parsing containers of std::byte + * issue #2574 - Deserialization to std::array, std::pair, and std::tuple with non-default constructable types fails + * std::array + * std::pair + * std::tuple + * issue #4530 - Serialization of empty tuple + * issue #2865 - ASAN detects memory leaks + * issue #2824 - encoding of json::exception::what() + * issue #2825 - Properly constrain the basic_json conversion operator + * issue #2958 - Inserting in unordered json using a pointer retains the leading slash + * issue #2982 - to_{binary format} does not provide a mechanism for specifying a custom allocator for the returned type + * issue #3070 - Version 3.10.3 breaks backward-compatibility with 3.10.2 + * issue #3077 - explicit constructor with default does not compile + * issue #3108 - ordered_json doesn't support range based erase + * issue #3343 - json and ordered_json are not interchangeable + * issue #3171 - if class is_constructible from std::string wrong from_json overload is being selected, compilation failed + * issue #3312 - Parse to custom class from unordered_json breaks on G++11.2.0 with C++20 + * issue #3428 - Error occurred when converting nlohmann::json to std::any + * issue #3204 - ambiguous regression + * issue #3333 - Ambiguous conversion from nlohmann::basic_json<> to custom class + * issue #3810 - ordered_json doesn't support construction from C array of custom type + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* GNU 11.5.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++17 +* GNU 11.5.0 with standard gnu++20 +* GNU 8.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* Clang 18.1.8 with standard gnu++17 +* Clang 18.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* GNU 14.3.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 12.5.0 with standard gnu++17 +* GNU 12.5.0 with standard gnu++20 +* GNU 10.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++17 +* GNU 10.5.0 with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* Clang 20.1.8 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.5.0 with standard gnu++17 +* GNU 9.5.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 19.1.7 with standard gnu++17 +* Clang 19.1.7 with standard gnu++20 +* Clang 17.0.6 with standard gnu++11 +* Clang 17.0.6 with standard gnu++17 +* Clang 17.0.6 with standard gnu++20 +* Intel 2021.5.0.20211109 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++17 +* Intel 2021.5.0.20211109 with standard gnu++20 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 7.5.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++17 +* GNU 13.4.0 with standard gnu++20 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++17 +* GNU 9.4.0 with standard gnu++20 +* Linux-c++ with standard gnu++11 +* Linux-c++ with standard gnu++17 +* Linux-c++ with standard gnu++20 +* GNU 13.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++17 +* GNU 13.3.0 with standard gnu++20 + + +### List of tests in file unit-serialization.cpp + +* serialization + * operator<< + * no given width + * given width + * given fill + * operator>> + * no given width + * given width + * given fill + * dump + * invalid character + * ending with incomplete character + * unexpected character + * U+FFFD Substitution of Maximal Subparts + * to_string +* serialization for extreme integer values + * minimum + * maximum +* dump with binary values + * normal + * pretty-printed + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-strings.cpp + +* accept + * noncharacter code positions + * overlong sequences + * Examples of an overlong ASCII character + * Maximum overlong sequences + * Overlong representation of the NUL character + * malformed sequences + * Unexpected continuation bytes + * Lonely start characters + * Sequences with last continuation byte missing + * Concatenation of incomplete sequences + * Impossible bytes +* Unicode + * escaped unicode + * unescaped unicode + * escaped utf-16 surrogates + * well-formed + * ill-formed +* parse + * whitespace + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-testsuites.cpp + +* compliance tests from json.org + * expected failures + * no failures with trailing literals (relaxed) + * expected passes +* compliance tests from nativejson-benchmark + * doubles + * strings + * roundtrip +* test suite from json-test-suite + * read all sample.json +* json.org examples + * 1.json + * 2.json + * 3.json + * 4.json + * 5.json + * FILE 1.json + * FILE 2.json + * FILE 3.json + * FILE 4.json + * FILE 5.json +* RFC 8259 examples + * 7. Strings + * 8.3 String Comparison + * 13 Examples +* nst's JSONTestSuite + * test_parsing + * y + * n + * n -> y (relaxed) + * i -> y + * i/y -> n (out of range) + * i -> n +* nst's JSONTestSuite (2) + * test_parsing + * y + * n + * n (previously overflowed) + * i -> y + * i -> n +* Big List of Naughty Strings + * parsing blns.json + * roundtripping + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-to_chars.cpp + +* digit gen + * single precision + * double precision +* formatting + * single precision + * double precision + * integer + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-type_traits.cpp + +* type traits + * is_c_string + * char * + * char[] + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-ubjson.cpp + +* UBJSON + * individual values + * discarded + * null + * boolean + * true + * false + * number + * signed + * -9223372036854775808..-2147483649 (int64) + * -2147483648..-32769 (int32) + * -32768..-129 (int16) + * -9263 (int16) + * -128..-1 (int8) + * 0..127 (int8) + * 128..255 (uint8) + * 256..32767 (int16) + * 65536..2147483647 (int32) + * 2147483648..9223372036854775807 (int64) + * unsigned + * 0..127 (int8) + * 128..255 (uint8) + * 256..32767 (int16) + * 65536..2147483647 (int32) + * 2147483648..9223372036854775807 (int64) + * float64 + * 3.1415925 + * high-precision number + * unsigned integer number + * signed integer number + * floating-point number + * errors + * serialization + * string + * N = 0..127 + * N = 128..255 + * N = 256..32767 + * N = 65536..2147483647 + * binary + * N = 0..127 + * N = 128..255 + * N = 256..32767 + * N = 32768..2147483647 + * Other Serializations + * No Count No Type + * Yes Count No Type + * array + * empty + * size=false type=false + * size=true type=false + * size=true type=true + * [null] + * size=false type=false + * size=true type=false + * size=true type=true + * [1,2,3,4,5] + * size=false type=false + * size=true type=false + * size=true type=true + * [[[[]]]] + * size=false type=false + * size=true type=false + * size=true type=true + * array with uint16_t elements + * size=false type=false + * size=true type=false + * size=true type=true + * array with uint32_t elements + * size=false type=false + * size=true type=false + * size=true type=true + * object + * empty + * size=false type=false + * size=true type=false + * size=true type=true + * {\ + * size=false type=false + * size=true type=false + * size=true type=true + * {\ + * size=false type=false + * size=true type=false + * size=true type=true + * errors + * strict mode + * non-strict mode + * strict mode + * excessive size + * array + * object + * SAX aborts + * start_array() + * start_object() + * key() in object + * start_array(len) + * start_object(len) + * key() in object with length + * parsing values + * strings + * number + * float + * array + * optimized version (length only) + * optimized version (type and length) + * parse errors + * empty byte vector + * char + * eof after C byte + * byte out of range + * strings + * eof after S byte + * invalid byte + * array + * optimized array: no size following type + * strings + * sizes + * types + * arrays + * objects + * writing optimized values + * integer + * array of i + * array of U + * array of I + * array of l + * array of L + * unsigned integer + * array of i + * array of U + * array of I + * array of l + * array of L + * discarded +* Universal Binary JSON Specification Examples 1 + * Null Value + * No-Op Value + * Boolean Types + * Numeric Types + * Char Type + * String Type + * English + * Russian + * Russian + * Array Type + * size=false type=false + * size=true type=false + * size=true type=true + * Object Type + * size=false type=false + * size=true type=false + * size=true type=true + * Optimized Format + * Array Example + * No Optimization + * Optimized with count + * Optimized with type & count + * Object Example + * No Optimization + * Optimized with count + * Optimized with type & count + * Special Cases (Null, No-Op and Boolean) + * Array + * Object +* all UBJSON first bytes +* UBJSON roundtrips + * input from self-generated UBJSON files + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using GNU 11.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 8.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.9.3 with standard gnu++11 +* 1 test case was skipped when using Clang 18.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 12.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 10.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 9.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 6.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 19.1.7 with standard gnu++11 +* 1 test case was skipped when using Clang 17.0.6 with standard gnu++11 +* 1 test case was skipped when using Intel 2021.5.0.20211109 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 7.5.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.4.0 with standard gnu++11 +* 1 test case was skipped when using GNU 5.5.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 4.8.5 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-udl.cpp + +* user-defined string literals + * using namespace nlohmann::literals::json_literals + * using namespace nlohmann::json_literals + * using namespace nlohmann::literals + * using namespace nlohmann + * global namespace + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-udt.cpp + +* basic usage + * conversion to json via free-functions + * conversion from json via free-functions + * via explicit calls to get + * via explicit calls to get_to + * implicit conversions +* adl_serializer specialization + * partial specialization + * to_json + * from_json + * total specialization + * to_json + * from_json +* even supported types can be specialized +* Non-copyable types + * to_json + * from_json +* custom serializer for pods +* custom serializer that does adl by default +* different basic_json types conversions + * null + * boolean + * discarded + * array + * integer + * float + * unsigned + * string + * binary + * object + * get +* an incomplete type does not trigger a compiler error in non-evaluated context +* Issue #924 +* Issue #1237 +* compatible array type, without iterator type alias + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-udt_macro.cpp + +* Serialization/deserialization via NLOHMANN_DEFINE_TYPE_INTRUSIVE and NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE + * person +* Serialization/deserialization via NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE and NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE + * person +* Serialization/deserialization via NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT and NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT + * person with default values +* Serialization/deserialization via NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT and NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT + * derived person with default values +* Serialization/deserialization of classes with 26 public/private member variables via NLOHMANN_DEFINE_TYPE_INTRUSIVE and NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE + * alphabet +* Serialization of non-default-constructible classes via NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE and NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE + * person +* Serialization of non-default-constructible classes via NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE and NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE + * derived person only serialize + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-unicode1.cpp + +* Unicode (1/5) + * \\uxxxx sequences + * correct sequences + * incorrect sequences + * incorrect surrogate values + * incorrect sequences + * high surrogate without low surrogate + * high surrogate with wrong low surrogate + * low surrogate without high surrogate + * read all unicode characters + * check JSON Pointers + * ignore byte-order-mark + * in a stream + * with an iterator + * error for incomplete/wrong BOM +* Markus Kuhn's UTF-8 decoder capability and stress test + * 1 Some correct UTF-8 text + * 2 Boundary condition test cases + * 2.1 First possible sequence of a certain length + * 2.2 Last possible sequence of a certain length + * 2.3 Other boundary conditions + * 3 Malformed sequences + * 3.1 Unexpected continuation bytes + * 3.2 Lonely start characters + * 3.3 Sequences with last continuation byte missing + * 3.4 Concatenation of incomplete sequences + * 3.5 Impossible bytes + * 4 Overlong sequences + * 4.1 Examples of an overlong ASCII character + * 4.2 Maximum overlong sequences + * 4.3 Overlong representation of the NUL character + * 5 Illegal code positions + * 5.1 Single UTF-16 surrogates + * 5.2 Paired UTF-16 surrogates + * 5.3 Noncharacter code positions + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-unicode2.cpp + +* Unicode (2/5) + * RFC 3629 + * ill-formed first byte + * UTF8-1 (x00-x7F) + * well-formed + * UTF8-2 (xC2-xDF UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: wrong second byte + * UTF8-3 (xE0 xA0-BF UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * UTF8-3 (xE1-xEC UTF8-tail UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * UTF8-3 (xED x80-9F UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * UTF8-3 (xEE-xEF UTF8-tail UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-unicode3.cpp + +* Unicode (3/5) + * RFC 3629 + * UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: missing fourth byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * ill-formed: wrong fourth byte + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-unicode4.cpp + +* Unicode (4/5) + * RFC 3629 + * UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: missing fourth byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * ill-formed: wrong fourth byte + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-unicode5.cpp + +* Unicode (5/5) + * RFC 3629 + * UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail) + * well-formed + * ill-formed: missing second byte + * ill-formed: missing third byte + * ill-formed: missing fourth byte + * ill-formed: wrong second byte + * ill-formed: wrong third byte + * ill-formed: wrong fourth byte + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 + +In the following configuration, however, some test-cases were skipped: + +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using GNU 14.3.0 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++17 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++23 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++14 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++20 +* 1 test case was skipped when using Clang 20.1.8 with standard gnu++11 +* 1 test case was skipped when using GNU 9.4.0 with standard gnu++11 +* 1 test case was skipped when using Linux-c++ with standard gnu++11 +* 1 test case was skipped when using GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-user_defined_input.cpp + +* Use arbitrary stdlib container +* Custom container non-member begin/end +* Custom container member begin/end +* Custom iterator + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-windows_h.cpp + +* include windows.h + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + + +### List of tests in file unit-wstring.cpp + +* wide strings + * std::wstring + * invalid std::wstring + * std::u16string + * invalid std::u16string + * std::u32string + * invalid std::u32string + + + +All tests in this file were run in the following configurations: + +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++20 +* GNU 9.4.0 with standard gnu++11 +* GNU 11.5.0 with standard gnu++11 +* GNU 8.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++14 +* GNU 9.4.0 with standard gnu++11 +* GNU 4.9.3 with standard gnu++11 +* Clang 18.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 14.3.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 12.5.0 with standard gnu++11 +* GNU 10.5.0 with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.5.0 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++14 +* Clang 20.1.8 with standard gnu++23 +* GNU 6.4.0 with standard gnu++11 +* GNU 14.3.0 with standard gnu++17 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++17 +* Clang 19.1.7 with standard gnu++11 +* Clang 17.0.6 with standard gnu++11 +* Intel 2021.5.0.20211109 with standard gnu++11 +* GNU 14.3.0 with standard gnu++23 +* Clang 20.1.8 with standard gnu++17 +* GNU 7.5.0 with standard gnu++11 +* GNU 13.4.0 with standard gnu++11 +* GNU 5.5.0 with standard gnu++11 +* Clang 20.1.8 with standard gnu++23 +* Clang 20.1.8 with standard gnu++14 +* GNU 4.8.5 with standard gnu++11 +* Clang 20.1.8 with standard gnu++20 +* Clang 20.1.8 with standard gnu++11 +* GNU 9.4.0 with standard gnu++11 +* Linux-c++ with standard gnu++11 +* GNU 13.3.0 with standard gnu++11 + diff --git a/TSF/docs/nlohmann_misbehaviours_comments.md b/TSF/docs/nlohmann_misbehaviours_comments.md new file mode 100644 index 0000000000..6f09697b9f --- /dev/null +++ b/TSF/docs/nlohmann_misbehaviours_comments.md @@ -0,0 +1,75 @@ +# List of known misbehaviours + +This file collects and comments the known misbehaviours opened after the release of version 3.12.0 as identified in the [issues](https://github.com/nlohmann/json/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22kind%3A%20bug%22) on nlohmann/json. + +## known open misbehaviours + +issue-id | applies to S-CORE | comment +---------|-------------------|-------- +4946 | No | Compatibility with CMake < 3.5 has been removed from CMake as of [CMake 4.0+](https://cmake.org/cmake/help/latest/command/cmake_minimum_required.html) +4925 | No | Optimized binary arrays have to be explicitly enabled when parsing from BJdata; otherwise an exception is thrown. +4916 | No | Version 3.12.0 of nlohmann::json does not contain a constructor accepting std::views. +4903 | No | Defining the namespace "nlohmann" multiple times within the same project leads to an error. +4901 | No | Using json::from_ubjson() (cf. [here](https://json.nlohmann.me/api/basic_json/from_ubjson/)) on long nested inputs can lead to stack overflow. +4898 | No | Brace initialisation yields array, cf. [here](https://json.nlohmann.me/home/faq/#brace-initialization-yields-arrays). +4864 | No | Using std::optional with nlohmann::json is broken in version 3.12.0, but shall be fixed in version 3.12.1. +4842 | No | Instead of the provided allocator, the standard allocator is used in the non-recursive destructor. +4813 | No | This issue is observed under specific circumstances only; in particular, basic_json is not affected. +4810 | No | nlohmann::json currently does not allow selecting a custom allocator. +4714 | No | Binary formats are creating broken outputs when discarded values are included in arrays/objects. +4621 | No | Deprecation warning when using the .at or .value functions on a ordered_json object with a parameter type of json_pointer; this issue is still open in version 3.12.0. +4552 | No | Invalid UTF-8 characters are not ignored when passing error_handler_t::ignore to dump(); this issue is still open in version 3.12.0. +4104 | No | This bug was observed in version 3.11.2; in version 3.12.0 it appears that the minimal code example does not trigger an error. +4041 | No | This issue was observed in version 3.11.2; it is fixed in version 3.12.0. +3970 | No | The use of C++20 modules with nlohmann/json may lead to errors; this issue still exists in version 3.12.0 +3912 | No | There is currently no way to query object keys via std::string_view; this issue still exists in version 3.12.0. +3907 | No | Using CUDA with gcc as host compiler can lead to compiler errors. This issue still exists in version 3.12.0. +3885 | No | Using meson instead of cmake to build the library does not work; use cmake to guarantee the expected outcome. +3868 | No | This issue regards the compatibility with the latest C++ standard. +3859 | No | json.value() with optional fallback value does not compile; this issue is still open in version 3.12.0. +3732 | No | Using iteration_proxy_value with ordered_json as shown below fails to compile due to an incomplete type error in iterator set_parents(iterator it, typename iterator::difference_type count_set_parents); this issue still exists in version 3.12.0. +3669 | No | This issue was observed in version 3.10.3; it appears fixed in version 3.12.0. +3659 | No | Moving a directory into or out of the serve_header.py root is not detected; this is not an issue if the release version 3.12.0 is used without any changes. +3583 | No | The performance of destroy() is quite slow. +3578 | No | Custom number types with non-trivial destructors and move-constructors are not permitted. +3425 | No | This issue is fixed in version 3.12.0 with the corresponding test in line 323 of unit-alt-string.cpp +3381 | No | Keys of objects are required to be strings; and the literal null is not a string. +3106 | No | Setting JSON_DIAGNOSTICS was broken in version 3.10.4. +2649 | No | This issue was observed in version 3.9.1; it appears fixed in version 3.12.0. +2226 | No | std::tuple::tuple(std::tuple&&) constructor creates a temporary object and a dangling reference. This issue still exists in version 3.12.0. + +## known misbehaviours closed since being opened + +issue-id | applies to S-CORE | comment +---------|-------------------|-------- +4733 | No | Clang 11.0.x with libc++ fails to compile tests in C++20 mode due to incomplete char8_t support in std::filesystem::path. +4740 | No | Using std::optional with nlohmann::json is broken in version 3.12.0, but shall be fixed in version 3.12.1. +4745 | No | Compiling version 3.12.0 with /std:c++ latest in Visual Studio 2022 17.12.7 raises compiler errors. +4746 | No | If you do not use the single_include json.hpp as intended, then the library may not quite work as intended. +4755 | No | The serialization of floating-point numbers is handled in two code paths. If number_float_t is double or long_double, then no issue arises. +4756 | No | nlohmann::ordered_json::from_msgpack() does not work with buffer of type std::vector\ using Xcode 16.3 and C++20. +4759 | No | Wrapping the library into a module fails due to `static` in lines 9832 and 3132. +4762 | No | Default return value for type_name() is number, which makes some error messages more than cryptic. +4778 | No | std::is_trivial is deprecated in C++26, using GCC 15.1.1 produces a deprecation warning. +4780 | No | The conversion from JSON to std::optional does not work. +4792 | No | C++20 support of NVHPC 25.5 is broken. +4798 | No | The float value is encoded to msgpack as double if it contains float NaN or infinity. +4804 | No | Trying to use json::from_cbor with a binary_t set to std::vector\ will fail. +4812 | No | Only binary formats like CBOR or MessagePack allow writing and reading binary values; no misbehaviour. +4819 | No | This is a bug in gcc 14.2, which will not be suppressed by the library. +4821 | No | Cf. https://json.nlohmann.me/home/faq/#brace-initialization-yields-arrays +4825 | No | template class nlohmann::basic_json<>; leads to a compilation error "ambigious static_caststd::string" inside binary_writer::write_bjdata_ndarray. +4826 | No | Issue closed due to inactivity. +4828 | No | Cryptic issue with joining objects on keys, no minimal working example provided. +4834 | No | Using std::optional with nlohmann::json is broken in version 3.12.0, but shall be fixed in version 3.12.1. +4842 | No | The vector used to track nested objects and arrays is allocated with the standard allocators, but the issue expects a different allocator. This issue is not a critical bug. +4852 | No | CONTRIBUTING.md does not mention the code style that is enforced for this project. +4854 | No | nullptr as SAX handler is not explicitly handled, shall be fixed in 3.12.1. +4863 | No | Shall be fixed in 3.12.1. +4869 | No | The linkage of this [link](https://raw.githubusercontent.com/nlohmann/json/v3.11.3/single_include/nlohmann/json.hpp) pointed erroneously to version 3.12.0 for some time. +4890 | No | If the coveralls service website is down, then the CI-pipeline fails by default. +4892 | No | This feature request is obsolete. + + + + diff --git a/TSF/docs/report.rst b/TSF/docs/report.rst new file mode 100644 index 0000000000..0a07171593 --- /dev/null +++ b/TSF/docs/report.rst @@ -0,0 +1,26 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _report: + +Report +================= + +.. toctree:: + :maxdepth: 2 + :caption: Report + :glob: + + generated/trustable_graph.rst + generated/trustable_report_for_Software.md \ No newline at end of file diff --git a/TSF/docs/score_calculation_example.svg b/TSF/docs/score_calculation_example.svg new file mode 100644 index 0000000000..91834b5678 --- /dev/null +++ b/TSF/docs/score_calculation_example.svg @@ -0,0 +1,102 @@ +

TIJ-01: The parsing service
throws an exception on
ill-formed literal names.
Trustable-score = mean(TIJ-01.1, TIJ-01.2) = mean(0.72, 0.9) = 0.81

TIJ-01.1: The service
throws an exception
on capitalised literal
names.
SME-score = included (0.9)
Validator = included (0.8)
Reference = included
Trustable-score = SME-score × Validator-score
= 0.9 × 0.8 = 0.72

TIJ-01.2: The service
throws an exception
on any other than the three
literal names true, false, null.
SME-score = included (0.9)
Validator = excluded
Reference = included
Trustable-score = SME-score = 0.9

\ No newline at end of file diff --git a/TSF/docs/tsf_overview.drawio.svg b/TSF/docs/tsf_overview.drawio.svg new file mode 100644 index 0000000000..76fc02dbd4 --- /dev/null +++ b/TSF/docs/tsf_overview.drawio.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/TSF/scripts/README.md b/TSF/scripts/README.md new file mode 100644 index 0000000000..4095db22e9 --- /dev/null +++ b/TSF/scripts/README.md @@ -0,0 +1,71 @@ +# Scripts used for the documentation + +The following scripts are used in the automatic compilation of the trustable report. + +## capture_test_data.py + +The python-script [capture_test_data.py](capture_test_data.py) is intended to run at the end of the [ubuntu-workflow](../../.github/workflows/ubuntu.yml). It collects and combines the test-results that were collected in the individual jobs, appends the persistent test data storage and generates a database containing the results of the most recent tests only. Since storage space on the github is limited, the storage of test data is limited to two weeks only. It must be noted that this implementation is only intended as a temporary proof of concept! + +## capture_test_data_memory_sensitive.py + +The python script [capture_test_data_memory_sensitive.py](capture_test_data_memory_sensitive.py) is an alternative to [capture_test_data.py](capture_test_data.py), which is more memory sensitive. +Instead of capturing the full test-data, a first snapshot of the data is captured, which is then updated whenever a relevant change is detected. + +The characterisation of "relevant change" follows the following heuristic argumentation: + +* The execution time of each test is of lesser interest, therefore it is not continuously updated. +* A test is uniquely identified by the columns ctest_target, name and cpp_standard. + +Moreover, a size-check is performed on the persistent storage, using the following heuristic assumptions + +* It is expected, that an update of a test-result only happens, when a test fails, which leads automatically to the failure of the pipeline, which should trigger a manual review by the maintainer, or failed before. +* It is expected, that tests fail rarely, and in particular on average less than 10 different test-cases per workflow-run fail. +* It is expected, that less than 1,000 workflow runs, where a record of the test-result is triggered, happen per year, since these are only triggered once daily and on each push to main. +* It is expected, that a ten-year-record of test-results is sufficient for documentation purposes. + +In view of these assumptions, we limit the storage to approximately 100,000 test-results and 100,000 workflow-metadata, which guarantees *en passant* that github's file size limit of 100MB is respected. +In the worst case that every recorded test result is detected as a relevant change, this restraint collects test-results from 27 workflows in total. +Whenever either limit of 100,000 test-results and 100,000 workflow-metadata in the persistent data storage is violated, the script returns the error "The persistent data storage is too large! Please move persistent data to external storage.", thereby failing the workflow, which advises the maintainer to take up action. + +## clean_trudag_report.py + +The python-script [clean_trudag_report.py](clean_trudag_report.py) runs at the end of generate_report.sh. +By default, the report generated by trudag has the format that is expected by MkDocs. +This format, however, is not properly rendered by sphinx, so that an adaption of the output is necessary. +This adaption happens within this script. + +## generate_report.sh + +The shell-script [generate_report.sh](generate_report.sh) is used to generate the trustable report and the graphs of which the documentation consists. +In particular, it first cleans up potentially left-over obsolete reports, then generates the report and the full view of the trustable graph using trudag. +Thereafter, plot_partial_graphs.py is called, before finally clean_trudag_report.py is executed. + +## plot_partial_graphs.py + +The python-script [plot_partial_graphs.py](plot_partial_graphs.py) is used to generate a myriad of partial graphs with links amongst them. +These are then used to augment the full graph, which is, due to its size, quite hard to display, in the documentation. +By the means of the links of the items, a more or less \"navigable\" version of the trustable graph is created, which displays a handful of items at a time, only. + +# Helper-scripts for offline development + +## fetch_json.cpp + +The C++ script [fetch_json.cpp](fetch_json.cpp) is used to extract lists of paths to json testsuites from unit-testsuites.cpp, which are then used in testsuite-references. This script is purely used during the manual compilation of the references. + +## generate_documentation.sh + +The shell-script [generate_documentation.sh](generate_documentation.sh) generates a locally hosted version of the documentation, which is used for testing purposes. + +## generate_list_of_tests.py + +The python script [generate_list_of_tests.py](generate_list_of_tests.py) is used to generate the [list_of_test_environments.md](../docs/list_of_test_environments.md) listing the expected test-cases with their execution environments. +The file [list_of_test_environments.md](../docs/list_of_test_environments.md) is then persistently stored within the documentation. +Additionally, this script is utilised within the validator ``check_list_of_tests`` to generate a list of test environments from the most recent test results and to compare this list with the persistently stored one. +In case that new tests are added, tests are removed or test environments change, it is therefore necessary to have an accurate list_of_test_environments.md, which is highly recommended to be generated using this script. +To run the script, the database `MemoryEfficientTestResults.db` must be downloaded from the artifact of the most recent successful ubuntu workflow, and placed in the folder `artifacts` in the root of the repository. +Moreover, it must be ensured that this script is run from within the root of the repository, only. + +## generate_subgraph_plots.sh + +The shell-script [generate_subgraph_plots.sh](generate_subgraph_plots.sh) is used to generate subgraphs of the trustable graph using trudag plot with the --pick option. +Since the options of adding custom hyperlinks to the individual nodes of the generated subgraphs are not quite as flexible as desired for the \"navigable\" version of the trustable graph, we opted to use the custom script plot_partial_graphs.py. diff --git a/TSF/scripts/__init__.py b/TSF/scripts/__init__.py new file mode 100644 index 0000000000..c2d4cd1531 --- /dev/null +++ b/TSF/scripts/__init__.py @@ -0,0 +1 @@ +# This file makes the directory a Python package \ No newline at end of file diff --git a/TSF/scripts/capture_test_data.py b/TSF/scripts/capture_test_data.py new file mode 100644 index 0000000000..4383c13962 --- /dev/null +++ b/TSF/scripts/capture_test_data.py @@ -0,0 +1,264 @@ +import sys +import sqlite3 +import os +import xml.etree.ElementTree as ET +import re +from datetime import datetime, timezone + +def setup_environment_variables() -> dict[str, str]: + # Retrieves and validates the necessary environment variables for GitHub workflows. + # Raises a RuntimeError if any required variables are missing. + required_vars = ["GITHUB_RUN_ID", "GITHUB_REPOSITORY", "GITHUB_RUN_ATTEMPT"] + environment = {var: os.getenv(var) for var in required_vars} + + missing_vars = [var for var, value in environment.items() if not value] + if missing_vars: + raise RuntimeError(f"Missing required environment variables: {', '.join(missing_vars)}") + + return environment + +def clean_test_case(testcase: str) -> tuple[str,str]: + # This function expects a testcase of the form "testcase_name_cppxx". + # It returns the tuple ["testcase_name","gnu++xx"]. + name, appendix = testcase.rsplit('_',1) + return [name, "gnu++"+appendix.replace('cpp','')] + +def read_result_table(input: list[str]) -> dict: + """ + This function expects console output of doctest. + It is assumed that this has the following form + [doctest] doctest version is "2.4.11" + [doctest] run with "--help" for options + =============================================================================== + [doctest] test cases: 1 | 1 passed | 0 failed | 0 skipped + [doctest] assertions: 45 | 45 passed | 0 failed | + [doctest] Status: SUCCESS! + + It extracts the number of passed/failed/skipped test cases, and passed/skipped assertions. + """ + metadata = dict() + raw_data = input[0] + data = re.findall(r'(\d+)\s+(passed|failed|skipped)\b', raw_data) + if len(data) < 5: + raise RuntimeError("Fatal Error: Received incomplete or wrong result table.") + metadata["passed test cases"] = int(data[0][0]) + metadata["failed test cases"] = int(data[1][0]) + metadata["skipped test cases"] = int(data[2][0]) + metadata["passed assertions"] = int(data[3][0]) + metadata["failed assertions"] = int(data[4][0]) + return metadata + + +def get_metadata(testcase: ET.Element) -> dict: + # expects testcase extracted from a junit xml-file as input + # extracts the data interesting to us + # Assumption of Use: before execution, it is checked if is_unit_test(testcase)==True + metadata = dict() + # from name both name of the test and C++ standard can be extracted + unsplit_name = testcase.get("name", None) + if unsplit_name is None: + raise RuntimeError("Fatal error: Can not read name of test-case!") + name, standard = clean_test_case(unsplit_name) + metadata["name"] = name + metadata["standard"] = standard + metadata["execution time"] = float(testcase.get("time")) + # results are not as easily extracted but must be processed further + metadata = metadata | read_result_table(list(testcase.find("system-out").itertext())) + return metadata + +def is_unit_test(testcase: ET.Element) -> bool: + # crude test if the element belongs to a unit-test + return "_cpp" in testcase.get('name') + +def get_all_xml_files(directory: str = '.') -> list[str]: + # search the folder containing all the artifacts and list the paths of expected test-reports + result = [] + try: + content = os.listdir(directory) + except FileNotFoundError as e: + print(e) + return result + for entry in content: + if os.path.isdir(directory+'/'+entry): + result = result + get_all_xml_files(directory+'/'+entry) + if entry.endswith('.xml'): + file = directory+'/'+entry if directory != '.' else entry + result.append(file) + return result + +########################## +# Below starts the script. +########################## + +if __name__ == "__main__": + + # check if argument was delivered + if len(sys.argv) != 2: + raise RuntimeError("Expected status of workflow as argument. Aborting!") + # expected argument: status of workflow + # check if the argument has the expected form + status = sys.argv[1] + if status not in ["successful", "failed", "cancelled"]: + raise RuntimeError("The input does not match the expected format! Permissible are 'successful', 'failed' and 'cancelled'. Aborting!") + + # get environment variables + try: + environment = setup_environment_variables() + except RuntimeError as e: + raise RuntimeError("Critical error: Can not uniquely identify environment data! Aborting recording of data.") + + # initiate connection to database + connector = sqlite3.connect("TSF/TestResultData.db") + connector.execute("PRAGMA foreign_keys = ON") + cursor = connector.cursor() + + # load expected tables + command = ( + "CREATE TABLE IF NOT EXISTS workflow_info(", + "repo TEXT, ", # repository + "run_id INT, ", # ID of workflow run + "run_attempt INT, ", # Attempt-number of workflow run + "status TEXT ", # Termination-status of workflow + "CHECK(status IN ('successful', 'failed', 'cancelled')) DEFAULT 'failed', ", + "time INT, ", # the time that is associated to this workflow run + "PRIMARY KEY(repo, run_id, run_attempt))" + ) + cursor.execute(''.join(command)) + command = ( + "CREATE TABLE IF NOT EXISTS test_results(", + "timestamp INT, " # when the test-run was started + "name TEXT, ", # name of the test + "execution_time REAL, ", # execution time in seconds + "compiler TEXT, ", # compiler information + "cpp_standard TEXT, ", # cpp-standard + "passed_cases INT, ", # number of passed test-cases + "failed_cases INT, ", # number of failed test-cases + "skipped_cases INT, ", # number of skipped test-cases + "passed_assertions INT, ", # number of passed assertions + "failed_assertions INT, ", # number of failed assertions + "repo TEXT, ", # repository + "run_id INT, ", # ID of workflow run + "run_attempt INT, ", # Attempt-number of workflow run + "FOREIGN KEY(repo, run_id, run_attempt) REFERENCES workflow_info)" + ) + cursor.execute(''.join(command)) + + # Due to storage space constraints, only most recent 100 test-results are stored. + # Heuristic calculations have demonstrated that this should ensure that + # the TestResultData.db is below 100MiB, which is github's hard file size limit. + + cursor.execute("SELECT COUNT(*) FROM workflow_info") + saved_test_data = int(cursor.fetchone()[0]) + while saved_test_data>=100: + # delete oldest saved data + cursor.execute("SELECT MIN(time) FROM workflow_info") + oldest_time = int(cursor.fetchone()[0]) + cursor.execute("SELECT repo, run_id, run_attempt FROM workflow_info WHERE \"time\" = ?", (oldest_time,)) + results = cursor.fetchall() + # Delete all data associated to all the oldest workflow runs + for result in results: + # it is expected that there is only one result + cursor.execute("DELETE FROM test_results WHERE repo = \"?\" AND run_id = ? AND run_attempt = ?", (result[0],result[1],result[2])) + cursor.execute("DELETE FROM workflow_info WHERE repo = \"?\" AND run_id = ? AND run_attempt = ?", (result[0],result[1],result[2])) + connector.commit() + # don't forget to update! + cursor.execute("SELECT COUNT(*) FROM workflow_info") + saved_test_data = int(cursor.fetchone()[0]) + + # fill in metadata + # OBSERVE: This script expects the status of the github workflow as argument + repo = environment.get('GITHUB_REPOSITORY') + run_id = environment.get('GITHUB_RUN_ID') + run_attempt = environment.get('GITHUB_RUN_ATTEMPT') + time = int(datetime.now(timezone.utc).timestamp()) + command = f"INSERT INTO workflow_info VALUES(?,?,?,?,?)" + cursor.execute(command,(repo, run_id, run_attempt, status, time)) + # Don't forget to save! + connector.commit() + + # Load my artifacts + failed_data = [] + junit_logs = get_all_xml_files("./my_artifacts/") + + #extract data + for junit_log in junit_logs: + tree = ET.parse(junit_log) + file_root = tree.getroot() + testsuite = next(file_root.iter('testsuite'), None) + if testsuite is None: + print(f"Error: Could not find testsuite data in {junit_log}.") + failed_data.append(junit_log) + continue + for testcase in (case for case in file_root.iter('testcase') if is_unit_test(case)): + metadata = get_metadata(testcase) + command = ( + "INSERT INTO test_results VALUES(", + f"{int(datetime.fromisoformat(testsuite.get('timestamp')).timestamp())}, ", + f"'{metadata.get('name')}', ", + f"{metadata.get('execution time')}, ", + f"'{testsuite.get('name')}', ", + f"'{metadata.get('standard')}', ", + f"{metadata.get('passed test cases')}, ", + f"{metadata.get('failed test cases')}, ", + f"{metadata.get('skipped test cases')}, ", + f"{metadata.get('passed assertions')}, ", + f"{metadata.get('failed assertions')}, ", + f"'{repo}', ", + f"{run_id}, ", + f"{run_attempt}" + ")" + ) + command = ''.join(command) + cursor.execute(command) + connector.commit() + + # storage space on the github is limited. + + # finally, most recent test data are stored separately + + # initialise database connection + conn = sqlite3.connect("TestResults.db") + cur = conn.cursor() + # add the expected table + command = ( + "CREATE TABLE IF NOT EXISTS test_results(", + "name TEXT, ", # name of the test + "execution_time REAL, ", # execution time in seconds + "compiler TEXT, ", # compiler information + "cpp_standard TEXT, ", # cpp-standard + "passed_cases INT, ", # number of passed test-cases + "failed_cases INT, ", # number of failed test-cases + "skipped_cases INT, ", # number of skipped test-cases + "passed_assertions INT, ", # number of passed assertions + "failed_assertions INT", # number of failed assertions + ")" + ) + cur.execute(''.join(command)) + # copy most recent data from persistent data storage + cur.execute("ATTACH DATABASE 'TSF/TestResultData.db' AS source") + command = """ + INSERT INTO test_results ( + name, execution_time, compiler, cpp_standard, + passed_cases, failed_cases, skipped_cases, + passed_assertions, failed_assertions + ) + SELECT + name, execution_time, compiler, cpp_standard, + passed_cases, failed_cases, skipped_cases, + passed_assertions, failed_assertions + FROM source.test_results + WHERE repo = ? AND run_id = ? AND run_attempt = ? + """ + cur.execute(command, (repo, run_id, run_attempt)) + conn.commit() + # detach persistent database + cur.execute("DETACH DATABASE source") + # terminate connection to temporary database + # don't forget to commit the changes + conn.commit() + conn.close() + + # terminate connection to persistent database + # don't forget to commit the changes again, for good measure + connector.commit() + connector.close() diff --git a/TSF/scripts/capture_test_data_memory_sensitive.py b/TSF/scripts/capture_test_data_memory_sensitive.py new file mode 100644 index 0000000000..260299e41a --- /dev/null +++ b/TSF/scripts/capture_test_data_memory_sensitive.py @@ -0,0 +1,278 @@ +import sys +import sqlite3 +import os +import xml.etree.ElementTree as ET +import re +from datetime import datetime, timezone + +def setup_environment_variables() -> dict[str, str]: + # Retrieves and validates the necessary environment variables for GitHub workflows. + # Raises a RuntimeError if any required variables are missing. + required_vars = ["GITHUB_RUN_ID", "GITHUB_REPOSITORY", "GITHUB_RUN_ATTEMPT"] + environment = {var: os.getenv(var) for var in required_vars} + + missing_vars = [var for var, value in environment.items() if not value] + if missing_vars: + raise RuntimeError(f"Missing required environment variables: {', '.join(missing_vars)}") + + return environment + +def clean_test_case(testcase: str) -> tuple[str,str]: + # This function expects a testcase of the form "testcase_name_cppxx". + # It returns the tuple ["testcase_name","gnu++xx"]. + name, appendix = testcase.rsplit('_',1) + return [name, "gnu++"+appendix.replace('cpp','')] + +def read_result_table(input: list[str]) -> dict: + """ + This function expects console output of doctest. + It is assumed that this has the following form + [doctest] doctest version is "2.4.11" + [doctest] run with "--help" for options + =============================================================================== + [doctest] test cases: 1 | 1 passed | 0 failed | 0 skipped + [doctest] assertions: 45 | 45 passed | 0 failed | + [doctest] Status: SUCCESS! + + It extracts the number of passed/failed/skipped test cases, and passed/skipped assertions. + """ + metadata = dict() + raw_data = input[0] + data = re.findall(r'(\d+)\s+(passed|failed|skipped)\b', raw_data) + if len(data) < 5: + raise RuntimeError("Fatal Error: Received incomplete or wrong result table.") + metadata["passed test cases"] = int(data[0][0]) + metadata["failed test cases"] = int(data[1][0]) + metadata["skipped test cases"] = int(data[2][0]) + metadata["passed assertions"] = int(data[3][0]) + metadata["failed assertions"] = int(data[4][0]) + return metadata + + +def get_metadata(testcase: ET.Element) -> dict: + # expects testcase extracted from a junit xml-file as input + # extracts the data interesting to us + # Assumption of Use: before execution, it is checked if is_unit_test(testcase)==True + metadata = dict() + # from name both name of the test and C++ standard can be extracted + unsplit_name = testcase.get("name", None) + if unsplit_name is None: + raise RuntimeError("Fatal error: Can not read name of test-case!") + name, standard = clean_test_case(unsplit_name) + metadata["name"] = name + metadata["standard"] = standard + metadata["execution time"] = float(testcase.get("time")) + # results are not as easily extracted but must be processed further + metadata = metadata | read_result_table(list(testcase.find("system-out").itertext())) + return metadata + +def is_unit_test(testcase: ET.Element) -> bool: + # crude test if the element belongs to a unit-test + return "_cpp" in testcase.get('name') + +def get_all_xml_files(directory: str = '.') -> list[str]: + # search the folder containing all the artifacts and list the paths of expected test-reports + result = [] + try: + content = os.listdir(directory) + except FileNotFoundError as e: + print(e) + return result + for entry in content: + if os.path.isdir(directory+'/'+entry): + result = result + get_all_xml_files(directory+'/'+entry) + if entry.endswith('.xml'): + file = directory+'/'+entry if directory != '.' else entry + result.append(file) + return result + +def get_ctest_target(log_name: str) -> str: + # extracts name of ctest target from junit log + # log_name has the form "path/to/log/file/target_junit.xml", and target is expected + log = log_name.split('/')[-1] + return log.removesuffix("_junit.xml") + +def find_most_recent_results(target: str, name: str, compiler: str, cpp_standard: str, database: sqlite3.Connection) -> list[int]: + cursor = database.cursor() + cursor.execute(""" + WITH combination AS ( + SELECT workflow_info.repo, workflow_info.run_id, workflow_info.run_attempt, workflow_info.time + FROM test_results INNER JOIN workflow_info ON + workflow_info.repo = test_results.repo + AND workflow_info.run_id = test_results.run_id + AND workflow_info.run_attempt = test_results.run_attempt + WHERE test_results.ctest_target = ? AND test_results.name = ? AND test_results.compiler = ? AND test_results.cpp_standard = ? + ) + SELECT repo, run_id, run_attempt FROM combination + ORDER BY time DESC, run_id DESC, run_attempt DESC + LIMIT 1; + """,(target,name,compiler,cpp_standard)) + result = cursor.fetchone() + if result is None: + # if no recent run is found, data need to be stored + return [] + repo, run_id, run_attempt = result + cursor.execute(""" + SELECT passed_cases, failed_cases, skipped_cases, passed_assertions, failed_assertions + FROM test_results WHERE + ctest_target = ? AND name = ? AND compiler = ? AND cpp_standard = ? AND repo = ? AND run_id = ? AND run_attempt = ? + """, (target,name,compiler,cpp_standard,repo,run_id,run_attempt)) + result = cursor.fetchone() + return [] if result is None else list(result) + +########################## +# Below starts the script. +########################## + +if __name__ == "__main__": + + # check if argument was delivered + if len(sys.argv) != 2: + raise RuntimeError("Expected status of workflow as argument. Aborting!") + # expected argument: status of workflow + # check if the argument has the expected form + status = sys.argv[1] + if status not in ["successful", "failed", "cancelled"]: + raise RuntimeError("The input does not match the expected format! Permissible are 'successful', 'failed' and 'cancelled'. Aborting!") + + # get environment variables + try: + environment = setup_environment_variables() + except RuntimeError as e: + raise RuntimeError("Critical error: Can not uniquely identify environment data! Aborting recording of data.") + + # Step 1: store metadata of workflow run persistently + + # initiate connection to database + connector = sqlite3.connect("TSF/MemoryEfficientTestResultData.db") + connector.execute("PRAGMA foreign_keys = ON") + + # load expected tables + # table workflow_info contains metadata of workflow and is updated every time + command = ( + "CREATE TABLE IF NOT EXISTS workflow_info(", + "repo TEXT, ", # repository + "run_id INT, ", # ID of workflow run + "run_attempt INT, ", # Attempt-number of workflow run + "status TEXT ", # Termination-status of workflow + "CHECK(status IN ('successful', 'failed', 'cancelled')) DEFAULT 'failed', ", + "time INT, ", # the time that is associated to this workflow run + "PRIMARY KEY(repo, run_id, run_attempt))" + ) + connector.execute(''.join(command)) + # table test_results contains detailed results for each individual test + command = ( + "CREATE TABLE IF NOT EXISTS test_results(", + "ctest_target TEXT, ", # name of the ctest target located in ci.cmake + "name TEXT, ", # name of the test + "execution_time REAL, ", # execution time in seconds + "compiler TEXT, ", # compiler information + "cpp_standard TEXT, ", # cpp-standard + "passed_cases INT, ", # number of passed test-cases + "failed_cases INT, ", # number of failed test-cases + "skipped_cases INT, ", # number of skipped test-cases + "passed_assertions INT, ", # number of passed assertions + "failed_assertions INT, ", # number of failed assertions + "repo TEXT, ", # repository + "run_id INT, ", # ID of workflow run + "run_attempt INT, ", # Attempt-number of workflow run + "FOREIGN KEY(repo, run_id, run_attempt) REFERENCES workflow_info);" + ) + connector.execute(''.join(command)) + cursor = connector.cursor() + + # Count number of rows as heuristic size-checker. + # In case that the update-check fails, and every result is stored, allow for approximately 26 complete results to be stored + cursor.execute("SELECT MAX(COALESCE((SELECT MAX(rowid) FROM workflow_info),0),COALESCE((SELECT MAX(rowid) FROM test_results),0));") + if cursor.fetchone()[0] > 1e5: + connector.close() + raise RuntimeError("The persistent data storage is too large! Please move persistent data to external storage.") + + # fill in metadata + # OBSERVE: This script expects the status of the github workflow as argument + repo = environment.get('GITHUB_REPOSITORY') + run_id = environment.get('GITHUB_RUN_ID') + run_attempt = environment.get('GITHUB_RUN_ATTEMPT') + time = int(datetime.now(timezone.utc).timestamp()) + command = "INSERT INTO workflow_info VALUES(?,?,?,?,?)" + cursor.execute(command,(repo, run_id, run_attempt, status, time)) + # Don't forget to save! + connector.commit() + + # Step 2: generate report of most recent test run and update persistent storage if necessary + + # initialise database connection + conn = sqlite3.connect("MemoryEfficientTestResults.db") + cur = conn.cursor() + # add the expected table + # the table TestResults.test_results differs from TestResultData.test_results in that the metadata of the commit are not saved. + command = ( + "CREATE TABLE IF NOT EXISTS test_results(", + "ctest_target TEXT, ", # name of the ctest target located in ci.cmake + "name TEXT, ", # name of the test + "execution_time REAL, ", # execution time in seconds + "compiler TEXT, ", # compiler information + "cpp_standard TEXT, ", # cpp-standard + "passed_cases INT, ", # number of passed test-cases + "failed_cases INT, ", # number of failed test-cases + "skipped_cases INT, ", # number of skipped test-cases + "passed_assertions INT, ", # number of passed assertions + "failed_assertions INT", # number of failed assertions + ")" + ) + conn.execute(''.join(command)) + + # Load my artifacts + junit_logs = get_all_xml_files("./my_artifacts/") + + #extract data + for junit_log in junit_logs: + tree = ET.parse(junit_log) + file_root = tree.getroot() + testsuite = next(file_root.iter('testsuite'), None) + if testsuite is None: + print(f"Error: Could not find testsuite data in {junit_log}.") + continue + for testcase in (case for case in file_root.iter('testcase') if is_unit_test(case)): + metadata = get_metadata(testcase) + target = get_ctest_target(junit_log) + compiler = testsuite.get('name') + more_compiler_info = [case for case in file_root.iter('testcase') if case.get("name") == "cmake_target_include_directories_configure"] + if len(more_compiler_info) != 0: + compiler_information = more_compiler_info[0] + information = list(compiler_information.find("system-out").itertext())[0].split('\n')[0] + compiler = information.replace("-- The CXX compiler identification is ","") + name = metadata.get('name') + cpp_standard = metadata.get('standard') + data = ( + target, + name, + metadata.get('execution time'), + compiler, + cpp_standard, + metadata.get('passed test cases'), + metadata.get('failed test cases'), + metadata.get('skipped test cases'), + metadata.get('passed assertions'), + metadata.get('failed assertions') + ) + command ="INSERT INTO test_results VALUES(?,?,?,?,?,?,?,?,?,?);" + cur.execute(command, data) + conn.commit() + most_recently_stored_results = find_most_recent_results(target,name,compiler,cpp_standard,connector) + current_results = [metadata.get('passed test cases'),metadata.get('failed test cases'),metadata.get('skipped test cases'),metadata.get('passed assertions'),metadata.get('failed assertions')] + if (len(most_recently_stored_results) != 5) or any(most_recently_stored_results[i]!=current_results[i] for i in range(0,5)): + data = data + (repo, run_id, run_attempt) + command ="INSERT INTO test_results VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);" + cursor.execute(command,data) + + + # terminate connection to temporary database + # don't forget to commit the changes + conn.commit() + conn.close() + + # terminate connection to persistent database + # don't forget to commit the changes again, for good measure + connector.commit() + connector.close() diff --git a/TSF/scripts/clean_trudag_output.py b/TSF/scripts/clean_trudag_output.py new file mode 100644 index 0000000000..e7222b3575 --- /dev/null +++ b/TSF/scripts/clean_trudag_output.py @@ -0,0 +1,122 @@ +import os +import re +import sys +import html + +# The trudag report is not in standard markdown format, so we need to clean it up. +# This script will remove specific patterns from the markdown files in the current directory and its subdirectories +# It requires 1 command line argument which is the root folder we want to be processed + +# List of regex patterns to remove only the matched part, not the whole line +replace_by_empty_string_patterns = [ + r"\{class[:=][^}]*\}", # {class:...} or {class=...} with any attributes inside + r"\{style[:=][^}]*\}", # {style:...} or {style=...} with any attributes inside + r"\{\%[\s]*raw[\s]*\%\}", # {% raw %} + r"\{\%[\s]*endraw[\s]*\%\}", # {% endraw %} + r"#{1,3}\s*\{[^}]*\}", # one to three # followed by {: ... } + r"\{\.[^}]*\}", # {.something ... } + r"\{ \.[^}]*\}", # { .something ... } + r"\{: [^}]*\}", # {: ... } +] + +remove_line_patterns = [ + r"localplugins\.CPPTestReference", # Lines containing localplugins.CPPTestReference + r'"Click to view reference"', # "Click to view reference" lines + r'\?\?\? example', # Lines "??? example "Graph Data as Table"" are not needed +] + + +compiled_patterns_replace_by_empty_string = [re.compile(p) for p in replace_by_empty_string_patterns] +compiled_patterns_remove_line = [re.compile(p) for p in remove_line_patterns] + +def clean_line(line): + while any((re.search(pat,line) is not None) for pat in compiled_patterns_replace_by_empty_string): + for pat in compiled_patterns_replace_by_empty_string: + line = pat.sub("", line) + return line.lstrip() if line.lstrip().startswith("|") else line + +def remove_line(line): + return any((re.search(pat,line) is not None) for pat in compiled_patterns_remove_line) + +def remove_invalid_markdown_start(lines: list[str]) -> list[str]: + """ + Remove file start of the form: + ' + + --- + ' as this leads to errors in doc-as-code + """ + if len(lines) > 2: + first_two_lines_empty = not lines[0].strip() and not lines[1].strip() + if first_two_lines_empty and lines[2].startswith("---"): + return lines[3:] + return lines + +def insert_line(filepath): + """Insert a new line explaining the abbreviation ABBR in '## Compliance for ABBR' in the trustable report.""" + with open(filepath, 'r', encoding='utf-8') as f: + lines = f.readlines() + + modified = False + updated_lines = [] + for line in lines: + updated_lines.append(line) + stripped_line = line.strip() + if stripped_line.startswith("## Compliance for"): + if stripped_line == '## Compliance for AOU': + updated_lines.append("This presents the compliance for the _Assumptions of Use_ (AOU) in tabular form.\n") + elif stripped_line == '## Compliance for JLEX': + updated_lines.append("This presents the compliance for the _JSON-Library Expectations_ (JLEX) in tabular form.\n") + elif stripped_line == '## Compliance for JLS': + updated_lines.append("This presents the compliance for the _JSON-Library Statements_ (JLS) in tabular form.\n") + elif stripped_line == '## Compliance for NJF': + updated_lines.append("This presents the compliance for the _No JSON Faults_ (NJF) in tabular form.\n") + elif stripped_line == '## Compliance for NPF': + updated_lines.append("This presents the compliance for the _No Parsing Faults_ (NPF) in tabular form.\n") + elif stripped_line == '## Compliance for PJD': + updated_lines.append("This presents the compliance for the _Parse JSON Data_ (PJD) in tabular form.\n") + elif stripped_line == '## Compliance for TA': + updated_lines.append("This presents the compliance for the _Trustable Assertions_ (TA) in tabular form.\n") + elif stripped_line == '## Compliance for TIJ': + updated_lines.append("This presents the compliance for the _Throw Ill-Formed JSON_ (TIJ) in tabular form.\n") + elif stripped_line == '## Compliance for TRUSTABLE': + updated_lines.append("This presents the ultimate trustability score for nlohmann/json.\n") + elif stripped_line == '## Compliance for TT': + updated_lines.append("This presents the compliance for the _Trustable Tenets_ (TT) in tabular form.\n") + elif stripped_line == '## Compliance for WFJ': + updated_lines.append("This presents the compliance for _Well Formed JSON_ (WFJ) in tabular form.\n") + modified = True + if modified: + with open(filepath, 'w', encoding='utf-8') as f: + f.writelines(updated_lines) + +def clean_file(filepath): + with open(filepath, 'r', encoding='utf-8') as f: + lines = f.readlines() + lines = remove_invalid_markdown_start(lines) + new_lines = [clean_line(line) for line in lines] + new_lines = [line for line in new_lines if not remove_line(line)] # Remove empty lines + new_lines = [line[2:] if line.startswith('\t\t') else line for line in new_lines] + if new_lines != lines: + with open(filepath, 'w', encoding='utf-8') as f: + f.writelines(new_lines) + print(f"Cleaned: {filepath}") + +def main(): + input_path = '.' + if(len(sys.argv) != 2): + sys.exit('ERROR:' + sys.argv[0] + ' expects 1 command line argument which is the processing path. Instead ' + str(len(sys.argv) - 1) + ' arguments were passed.') + else: + input_path = sys.argv[1] + + for root, _, files in os.walk(input_path): + for file in files: + # all .md files are potentially ill-formatted + if file.endswith('.md'): + clean_file(os.path.join(root, file)) + # abbreviations are only explained in the main report + if file == "trustable_report_for_Software.md": + insert_line(os.path.join(root, file)) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/TSF/scripts/fetch_json.cpp b/TSF/scripts/fetch_json.cpp new file mode 100644 index 0000000000..583d5938cf --- /dev/null +++ b/TSF/scripts/fetch_json.cpp @@ -0,0 +1,292 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + +/* +This is a command line tool that fetches the .json file-names from the file unit-testsuites.cpp and +prints the result into a file using the desired format for use in the items in the trustable graph. + +User inputs are asked from the command line and are + - relative path to file from which names are read (i.e. unit-testsuites.cpp) + - relative path to output file + - name of the test + - description of the test + - whether a single line at a time is to be read or all lines from a starting line until an ending line + - these lines + +Assumptions: + - A .json filename is the only value within quotation marks in a line containing "TEST_DATA_DIRECTORY". + - The provided lines are valid. + +Dangers: + - output file is overwritten by default. + +*/ + +string replace_tab_with_spaces(const string& input, const int num_of_spaces = 4); +string wrapper_for_trudag(string evidence, string wrap_left = "\t\t\t- \"", string wrap_right = "\"\n"); +bool get_json_from_line(int line, ifstream& source, ofstream& target); +bool read_line_by_line(ifstream& source, ofstream& target); +bool read_region(ifstream& source, ofstream& target); + + +// Horizontal tabs are automatically replaced by four spaces in VSCode except if a '\t' is pasted into the file. +// This function does the same. +string replace_tab_with_spaces(const string& input, const int num_of_spaces){ + string output = input; + string spaces = " "; + regex tab("\t"); + if (num_of_spaces!=4){ + spaces = ""; + for (int i = 0; iend_line) { + cout << "Invalid configuration: start-line is after end-line.\n"; + return false; + } + for (int line = start_line; line <= end_line; line++){ + cout << "Reading line " << line << "\n"; + bool success; + try { + success = get_json_from_line(line,source,target); + } catch (const invalid_argument& ia){ + cout << ia.what(); + return false; + } catch (const out_of_range& oor) { + cout << oor.what(); + return false; + } + if (!success) { + cout << "Could not find json reference in line " << line << " !\n"; + } + } + cout << "Add another region? y/n? "; + getline(std::cin, ans); + if (ans != "" && ans != "y" && ans != "Y"){ + break; + } + } + return true; +} + +int main(){ + // define standard paths + string path_to_testsuite = "../../tests/src/unit-testsuites.cpp"; + string path_to_evidence = "temp.md"; + + // Setup source + cout << "Read from standard file " << path_to_testsuite << " y/n? "; + string ans; + getline(std::cin, ans); + if (ans!="y" && ans!="Y" && ans!=""){ + cout << "Please insert file to read from: "; + getline(std::cin, path_to_testsuite); + } + cout << "Attempting to read from " << path_to_testsuite << "..." << "\n"; + ifstream testsuite; + try { + testsuite.open(path_to_testsuite); + } catch (const ifstream::failure& e) { + cout << "Could not open file " << path_to_testsuite << "\n"; + return -1; + } + if (testsuite.fail()||!testsuite.is_open()){ + cout << "Could not open file " << path_to_testsuite << "\n"; + testsuite.close(); + return -1; + } else { + cout << "Reading successful\n\n"; + } + + // Setup target + cout << "Write to standard file " << path_to_evidence << " y/n? "; + getline(std::cin, ans); + if (ans!="y" && ans!="Y" && ans!=""){ + cout << "Please specify where to write results: "; + getline(std::cin, path_to_evidence); + } + cout << "Opening " << path_to_evidence << "..." << "\n"; + ofstream evidence; + try { + evidence.open(path_to_evidence); + } catch (const ifstream::failure& e) { + cout << "Could not open file " << path_to_evidence; + testsuite.close(); + evidence.close(); + return -1; + } + cout << "Done!\n\n"; + + // initialise target + + // get name + string description; + while (true){ + evidence << replace_tab_with_spaces("\t\t- type: JSON_testsuite\n"); + cout << "Testname "; + getline(std::cin, ans); + cout << "Initialising collection of evidence for " << ans << "\n"; + evidence << replace_tab_with_spaces("\t\t name: \"") << ans << "\"\n"; + evidence << replace_tab_with_spaces("\t\t path: \""); + if (path_to_testsuite.substr(0,3)=="../") { + evidence << "/workspaces/json/" << path_to_testsuite.substr(3); + } else { + evidence << "/workspaces/json/scripts/" << path_to_testsuite; + } + evidence << replace_tab_with_spaces("\"\n\t\t test_suite_paths:\n"); + cout << "Description of the test: "; + getline(std::cin, description); + + cout << "Read single lines? y/n? "; + getline(std::cin, ans); + if (ans!="y"&&ans!="Y"&&ans!="") { + cout << "Read region? y/n? "; + getline(std::cin, ans); + if (ans!="y"&&ans!="Y"&&ans!="") { + testsuite.close(); + evidence.close(); + return -1; + } + if (!read_region(testsuite, evidence)){ + testsuite.close(); + evidence.close(); + return -1; + } + } else { + if (!read_line_by_line(testsuite, evidence)){ + testsuite.close(); + evidence.close(); + return -1; + } + } + evidence << replace_tab_with_spaces("\t\t description: \"") << description << "\"\n"; + cout << "Add another test? y/n? "; + getline(std::cin, ans); + if (ans!="y"&&ans!="Y"&&ans!="") { + break; + } + } + testsuite.close(); + evidence.close(); + return 0; +} \ No newline at end of file diff --git a/TSF/scripts/generate_documentation.sh b/TSF/scripts/generate_documentation.sh new file mode 100755 index 0000000000..679ad82262 --- /dev/null +++ b/TSF/scripts/generate_documentation.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# generate TSF report +TSF_SCRIPT_FOLDER=$(dirname "$(realpath $0)") +# The first input for this script is the base_url used in plot_partial_graphs.py +# for local testing, "http://localhost:8000" is recommended. +$TSF_SCRIPT_FOLDER/generate_report.sh $1 + +# prepare docs +bazel run //:docs + +# run http server +python3 -m http.server --directory _build diff --git a/TSF/scripts/generate_list_of_misbehaviours.py b/TSF/scripts/generate_list_of_misbehaviours.py new file mode 100644 index 0000000000..c6cac25d12 --- /dev/null +++ b/TSF/scripts/generate_list_of_misbehaviours.py @@ -0,0 +1,45 @@ +import json +from datetime import datetime, timezone +from identify_nlohmann_issue import comment_nlohmann_misbehaviours + +version = "3.12.0" +release_date = "2025-04-11T08:43:39Z" + +if __name__ == "__main__": + release_time = datetime.strptime(release_date,"%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).timestamp() + + # fetch relevant issues + with open("raw_open_issues.json") as list_1: + all_open_issues = json.load(list_1) + relevant_open_issues = [all_open_issues[i] for i in range(0,len(all_open_issues)) + if len(all_open_issues[i].get("labels",[]))!=0 + and any(label.get("name") == "kind: bug" for label in all_open_issues[i].get("labels", [])) + ] + with open("raw_closed_issues.json") as list_2: + all_closed_issues = json.load(list_2) + relevant_closed_issues = [all_closed_issues[i] for i in range(0,len(all_closed_issues)) + if len(all_closed_issues[i].get("labels",[]))!=0 + and any(label.get("name") == "kind: bug" for label in all_closed_issues[i].get("labels", [])) + and datetime.strptime(all_closed_issues[i].get("createdAt","2000-01-01T00:00:00Z"),"%Y-%m-%dT%H:%M:%SZ") + .replace(tzinfo=timezone.utc) + .timestamp() + >=release_time + ] + + print("# Misbehaviours Report\n") + print(f"This report lists known misbehaviours or bugs of version {version} of the nlohmann/json repository.") + print("The misbehaviours are compiled from github issues of the nlohmann/json repository, and link to each corresponding issue.\n") + + + print("## Open issues\n") + for issue in relevant_open_issues: + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + + print(f"\n## Closed Issues (since version {version})\n") + for issue in relevant_closed_issues: + print(f"### [#{issue.get('number')}]({issue.get('url')})\n- **Title:** {issue.get('title')}\n- **State:** {issue.get('state')}\n- **Created At:** {issue.get('createdAt')}\n") + comment_nlohmann_misbehaviours(int(issue.get("number"))) + print("\n") + diff --git a/TSF/scripts/generate_list_of_tests.py b/TSF/scripts/generate_list_of_tests.py new file mode 100644 index 0000000000..000775d740 --- /dev/null +++ b/TSF/scripts/generate_list_of_tests.py @@ -0,0 +1,208 @@ +from pathlib import Path +import sqlite3 + +class ListOfTestsGenerator: + + def __init__(self): + self._database = "./artifacts/MemoryEfficientTestResults.db" + self._table = "test_results" + self._test_files = ["./tests/src", "./TSF/tests"] + + def set_database(self,db:str): + self._database = db + + def set_table(self,table:str): + self._table = table + + def set_sources(self,sources:list): + self._test_files = sources + + @staticmethod + def compile_string(items: list[str]) -> str: + # input: list of strings representing the structure of TEST_CASE, SECTION etc., + # e.g. items = ["lexer class", "scan", "literal names"] + # output: the last item of the list, representing the most recent SECTION, + # indented as in the source code + # throws error if input is empty + if len(items) == 0: + raise RuntimeError("Received empty structural list; nonempty list expected.") + result = "" + for _ in range(1, len(items)): + result += " " + if items: + result += "* " + items[-1] + return result + + @staticmethod + def extract_quotation(s: str) -> str: + # input: string containing at least one quoted substring, e.g. s = "my \"input\"" + # output: the first quoted substring of the input + # throws error if no quoted substring can be found. + first = s.find('"') + if first == -1: + raise RuntimeError("Expected quotation mark; none were detected.") + second = s.find('"', first + 1) + if second == -1: + raise RuntimeError("Expected quotation marks; only one was detected.") + return s[first + 1 : second] + + @staticmethod + def transform_test_file_to_test_name(test_file: str) -> str: + return "test-"+"-".join((test_file.split('.')[0]).split('-')[1:]) + + @staticmethod + def head_of_list() -> str: + return """## List of all unit-tests with test environments + + This list contains all unit-tests possibly running in this project. + These tests are compiled from the source-code, where the individual unit-tests are arranged in TEST_CASEs containing possibly nested SECTIONs. + To reflect the structure of the nested sections, nested lists are utilised, where the top-level list represents the list of TEST_CASEs. + + It should be noted that not all unit-tests in a test-file are executed with every compiler-configuration. + """ + + @staticmethod + def remove_and_count_indent(s: str) -> tuple[int, str]: + # input: string with possibly leading whitespace (space of horizontal tab) + # output: the number of leading spaces and the string with leading whitespace removed; + # tab counted as four spaces + cnt = 0 + i = 0 + n = len(s) + while i < n and (s[i] == " " or s[i] == "\t"): + if s[i] == " ": + cnt += 1 + elif s[i] == "\t": + cnt += 4 + i += 1 + return (cnt, s[i:]) + + @staticmethod + def extract_test_structure(file_path: Path) -> str: + # input: path to a file potentially containing unit-tests + # output: the extracted arrangement of TEST_CASE and SECTION + # in the form of nested markdown lists + + indent = 0 # the indent of the currently read line + current_indent = 0 # the indent of the last TEST_CASE or SECTION + current_path = [] # the current path + lines_out = [] # the collection of lines to be outputted + + # open file_path as read-only, and process line by line + with file_path.open("r", encoding="utf-8", errors="replace") as source: + for line in source: + # count and remove leading whitespace + indent, trimmed = ListOfTestsGenerator.remove_and_count_indent(str(line)) + + # check whether we have found a TEST_CASE + if trimmed.startswith("TEST_CASE(") or trimmed.startswith("TEST_CASE_TEMPLATE(") or trimmed.startswith("TEST_CASE_TEMPLATE_DEFINE("): + # remember the current indent + current_indent = indent + # TEST_CASE is always the head of a new arrangement-structure + # remove stored structure + current_path.clear() + # extract name of TEST_CASE and append path + current_path.append(ListOfTestsGenerator.extract_quotation(trimmed)) + lines_out.append(ListOfTestsGenerator.compile_string(current_path)) + + # check whether we have found a SECTION + if trimmed.startswith("SECTION("): + # update path to reflect arrangement of current section + while indent <= current_indent and current_path: + current_path.pop() + current_indent -= 4 + # remember the current indent + current_indent = indent + # extract name of SECTION and append path + current_path.append(ListOfTestsGenerator.extract_quotation(trimmed)) + lines_out.append(ListOfTestsGenerator.compile_string(current_path)) + + # process extracted lines + return ("\n".join(lines_out) + "\n") if lines_out else "" + + def extract_recent_test_environments(self) -> dict: + fetched_data = dict() + try: + # initialise connection to test result database + connector = sqlite3.connect(self._database) + cursor = connector.cursor() + # verify that the expected table does exist + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name = ?;",(self._table,)) + if cursor.fetchone() is None: + raise RuntimeError(f"Fatal Error: Could not find table {self._table} in database {self._database}.") + except sqlite3.Error as e: + raise RuntimeError(f"Fatal Error accessing database {self._database}: {e}") + # get all test-files from recent test executions + command = f"SELECT name FROM {self._table};" + cursor.execute(command) + raw_cases = cursor.fetchall() + cases = set([raw_case[0] for raw_case in raw_cases]) + # for each test-file + for case in cases: + case_data = dict() + # get the test-environments + command = f"SELECT compiler, cpp_standard FROM {self._table} WHERE name = ? and skipped_cases == 0" + cursor.execute(command,(case,)) + results = cursor.fetchall() + case_data["noskip"] = [{"compiler":result[0], "standard":result[1]} for result in results] + # some test-cases are skipped with certain environments + # It is unclear from the log, which cases are skipped; + # we leave this to the interested reader + command = f"SELECT compiler, cpp_standard, skipped_cases FROM {self._table} WHERE name = ? and skipped_cases != 0" + cursor.execute(command, (case,)) + results = cursor.fetchall() + case_data["skip"] = [{"compiler": result[0], "standard": result[1], "skipped": result[2]} for result in results] + fetched_data[case] = case_data + return fetched_data + + def fetch_all_test_data(self): + # inputs: path(s) to directory potentially containing some test-data + extracted_test_data = [] + recent_test_data = self.extract_recent_test_environments() + for arg in self._test_files: + p = Path(arg) + if p.is_file() and p.suffix == ".cpp" and p.name.startswith("unit-"): + extracted_test_data.append((p.name,ListOfTestsGenerator.extract_test_structure(p))) + elif p.is_dir(): + for entry in p.rglob("*"): + if entry.is_file() and entry.suffix == ".cpp" and entry.name.startswith("unit-"): + extracted_test_data.append((entry.name,ListOfTestsGenerator.extract_test_structure(entry))) + extracted_test_data.sort(key= lambda x: x[0]) + result = ListOfTestsGenerator.head_of_list() + for test_file, list_of_tests in extracted_test_data: + result += f"\n\n### List of tests in file {test_file}\n\n" + result += list_of_tests + result += "\n\n" + if recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file), None) is None: + result += "Unfortunately, none of the following tests seems to have been executed. Very strange indeed!\n\n" + else: + if recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("noskip",None) is not None: + if len(recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("noskip")) != 0: + result += "\nAll tests in this file were run in the following configurations:\n\n" + for datum in recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("noskip"): + result += "* " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + if recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("skip",None) is not None: + if len(recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("skip")) != 0: + result += "\nIn the following configuration, however, some test-cases were skipped:\n\n" + for datum in recent_test_data.get(ListOfTestsGenerator.transform_test_file_to_test_name(test_file)).get("skip"): + result += "* " + how_many = datum.get("skipped",None) + result += str(how_many) + if how_many == 1: + result += " test case was skipped when using " + else: + result += " test cases were skipped when using " + result += datum.get("compiler",None) + result += " with standard " + result += datum.get("standard",None) + result += "\n" + return result + +if __name__ == "__main__": + generator = ListOfTestsGenerator() + with open("./TSF/docs/list_of_test_environments.md",'w') as f: + print(generator.fetch_all_test_data(),file=f) \ No newline at end of file diff --git a/TSF/scripts/generate_report.sh b/TSF/scripts/generate_report.sh new file mode 100755 index 0000000000..d2f6ae50d2 --- /dev/null +++ b/TSF/scripts/generate_report.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# global variables +TSF_SCRIPT_FOLDER=$(dirname "$(realpath $0)") +TSF_FOLDER="$TSF_SCRIPT_FOLDER/.." +TSF_REPORT_FOLDER="$TSF_FOLDER/docs/generated" + +# cleanup previously generated content if exists +if [ -d "$TSF_REPORT_FOLDER" ]; then + rm -Rf "$TSF_REPORT_FOLDER" +fi + +# create output folder +mkdir -p "$TSF_REPORT_FOLDER" # -p ensures no error if the folder already exists + +# generate TSF report +echo "Generating TSF report in: $TSF_REPORT_FOLDER" +trudag publish --validate --figures --output-dir "$TSF_REPORT_FOLDER" --dump data_store || exit 1 + +# generate TSF graph +trudag plot -o "$TSF_REPORT_FOLDER/graph.svg" --url "$1/generated" || exit 1 + +# cleanup previously generated content if exists +if [ -f "$TSF_REPORT_FOLDER/trustable_graph.rst" ]; then + rm "$TSF_REPORT_FOLDER/trustable_graph.rst" + touch "$TSF_REPORT_FOLDER/trustable_graph.rst" +fi +# plot all partial graphs with links based on the url given in the first input +# in the workflow publish_documentation.yml, this input is https://${OWNER_NAME}.github.io/${REPO_NAME}/main +python3 "$TSF_SCRIPT_FOLDER/plot_partial_graphs.py" "$1" ||exit 1 + + +# cleanup TSF report content from trudag unwanted artifacts +echo "Cleaning up TSF report from trudag unwanted artifacts" +python3 "$TSF_SCRIPT_FOLDER/clean_trudag_output.py" "$TSF_REPORT_FOLDER" diff --git a/TSF/scripts/generate_subgraph_plots.sh b/TSF/scripts/generate_subgraph_plots.sh new file mode 100755 index 0000000000..5fab970c80 --- /dev/null +++ b/TSF/scripts/generate_subgraph_plots.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Specify the folder where .md files are located (provide the path here) +TARGET_FOLDER="docs/s-core/trustable/assertions" + +# Ensure the folder exists +if [[ ! -d "$TARGET_FOLDER" ]]; then + echo "The folder $TARGET_FOLDER does not exist." + exit 1 +fi + +# Create the 'temp' directory at the top level (relative to this script) +TEMP_FOLDER="temp/graphs" +mkdir -p "$TEMP_FOLDER" # -p ensures no error if the folder already exists + +# Iterate through all .md files in the specified folder +for file in "$TARGET_FOLDER"/*.md; do + # Check if the file exists (to handle cases where there are no .md files) + if [[ -f "$file" ]]; then + # Extract the filename without the .md extension + filename=$(basename "$file" .md) + + # Create the output file name in the 'temp' folder + output_file="${TEMP_FOLDER}/${filename}_subgraph.svg" + + # Run the command on the file and save to the 'temp' folder + echo "Processing file: $filename" + trudag plot --pick "$filename" 0: -o "$output_file" + fi +done + +output_file="${TEMP_FOLDER}/Trustable.svg" +trudag plot -o "$output_file" + +echo "All files processed. Output saved in the 'temp/graphs' directory." diff --git a/TSF/scripts/identify_nlohmann_issue.py b/TSF/scripts/identify_nlohmann_issue.py new file mode 100644 index 0000000000..8a73b8b371 --- /dev/null +++ b/TSF/scripts/identify_nlohmann_issue.py @@ -0,0 +1,33 @@ +import sys + +def comment_nlohmann_misbehaviours(id: int) -> None: + with open("./TSF/docs/nlohmann_misbehaviours_comments.md") as f: + for line in f: + # look for the issue id + cols = line.split("|") + if cols[0].strip() == str(id) and len(cols)>2: + # Does the issue apply to us? + if cols[1].strip().upper() == "NO": + candidate = "This issue does not apply to the use of nlohmann/json in Eclipse S-CORE. " + else: + candidate = "" + # read the candidate comment + candidate += cols[2].strip() + # if there is something to comment, do it + if candidate != "": + print(f"- **Comment:** {candidate}") + # each issue can only have one comment listed + return + # if there is no comment to be found, nothing is done + + +if __name__ == "__main__" and len(sys.argv)>1: + # split input into what is assumed to be the numbers of the issues. + inputs = sys.argv[1:] + # try to parse inputs into integer + try: + numerical_inputs = [int(i) for i in inputs] + except ValueError: + raise RuntimeError("Only integer numbers are accepted to identify issues.") + + list(map(comment_nlohmann_misbehaviours,numerical_inputs)) diff --git a/TSF/scripts/plot_partial_graphs.py b/TSF/scripts/plot_partial_graphs.py new file mode 100644 index 0000000000..c4f19ac194 --- /dev/null +++ b/TSF/scripts/plot_partial_graphs.py @@ -0,0 +1,173 @@ +import sys +import pydot +import warnings +import graphviz as gz +from trudag.dotstop.core.graph import TrustableGraph, PydotGraph +import trudag.dotstop.core.graph.graph_factory as factory +import trudag.plot as plt +from pathlib import Path + +def get_my_url(vertex: str, base_url: str, full_graph: TrustableGraph) -> str: + # This function compiles the url that is attached to a node in the custom representation of the trustable graph. + if vertex in full_graph._graph.leaf_nodes(): + # leaf nodes are linked to their entry in the documentation + return base_url+"/generated/"+full_graph.get_item(vertex).document+".html#"+vertex.lower() + else: + # non-leaf nodes are linked to their single-layer subgraphs + return base_url+"/_images/custom_"+vertex+"_graph.svg" + +def get_pydot_graph(vertices: list[str], edges: list[tuple[str,str]]) -> PydotGraph: + # generates a PydotGraph object from a given collection of nodes and edges + # first, test if the graph is even valid + if any((e not in vertices) or (f not in vertices) for (e,*_,f) in edges): + raise RuntimeError("Fatal Error: Edges may only use existing vertices.") + # generate string in DOT language + graph = "digraph G {" + for vertex in vertices: + graph += f"\"{vertex}\";" + for source, target in edges: + graph += f"\"{source}\" -> \"{target}\";" + graph += "}" + # generate PydotGraph from DOT string + return PydotGraph.from_string(graph) + +def get_subgraph(full_graph: TrustableGraph, vertices: list[str]) -> TrustableGraph: + # generate the subgraph of full_graph spanned by a given list of vertices + # first, test if the vertices are valid + if any(vertex not in full_graph._graph.nodes() for vertex in vertices): + raise RuntimeError("Fatal Error: Attempting to generate subgraph with foreign vertices!") + # get subgraph of underlying PydotGraph + edges = [(src,dst) for src, dst in full_graph._graph.edges() if src in vertices and dst in vertices] + graph = get_pydot_graph(vertices,edges) + # get items associated to vertices and generate TrustableGraph + nodes = [full_graph.get_item(vertex) for vertex in vertices] + return TrustableGraph(graph,nodes) + +def plot_all_single_layer_subgraphs(full_graph: TrustableGraph, path: list[str], base_url: str = "") -> list[tuple[str,int]]: + # recursively plots all "single-layer subgraphs" along a path + # path -- expected to be list of nodes of directed path in the trustable graph whose target is the last entry + # plots the subgraph generated by the path and the targets of all edges whose source is the target of the path + # for each of the latter, append the path and repeat as long as there are no further targets + # returns the list of all targets together with the list of their paths + + # if no path is inputted + if len(path) == 0: + # do nothing + return [] + result = [] + # get most recent target + bud = path[-1] + # find targets that have bud as source + new_children = full_graph._graph.successors(bud) + # if the bud is fruitful + if len(new_children) > 0: + # compile subgraph that emerges from the bud + vertices = path+new_children + my_graph = get_subgraph(full_graph,vertices) + # plot + plot_blank(my_graph,full_graph,base_url,"./TSF/docs/generated/custom_"+bud+"_graph.svg") + result.append([bud,len(path)]) + # iterate over new targets + for child in new_children: + new_path = path + [child] + result = result + plot_all_single_layer_subgraphs(full_graph,new_path,base_url) + return result + +def write_documentation(plots: list[tuple[str,int]]): + # generates the documentation so that pictures are shown online + # for ease of viewing, plots are sorted by length of the underlying path + sorted_plots = sorted(plots, key=lambda x: x[1]) + for bud, length in sorted_plots: + with open("./TSF/docs/generated/trustable_graph.rst", "a", encoding="utf-8") as documentation: + documentation.write("\n\n.. image:: custom_"+bud+"_graph.svg\n") + documentation.write("\t:alt: Root of the trustable graph\n\t:width: 6000px\n\n") + documentation.write("Trustable graph centered at "+bud) + +def plot_blank(graph: TrustableGraph, full_graph: TrustableGraph, base_url = "", name = "./graph.svg"): + # plot trustable graph by hand + # prepare the trustable graph as in trudag.plot.format_source_from_graph + # format trustable graph for plotting purposes + formatted_graph = pydot.graph_from_dot_data(str(graph))[0] + formatted_graph.set("rankdir", "TB") + formatted_graph.set("newrank", "true") + # increase vertical distance between the nodes for improved viewing experience + formatted_graph.set("ranksep", "2.0") + + # Remove edge and node sha's, required to support some plantuml servers. + for element in formatted_graph.get_nodes() + formatted_graph.get_edges(): + if "sha" in element.get_attributes(): + element.get_attributes().pop("sha") + + for item in graph.items: + # remove non-normative-nodes + if not item.normative: + formatted_graph.del_node(pydot.quote_id_if_necessary(str(item))) + else: + formatted_node = formatted_graph.get_node( + pydot.quote_id_if_necessary(str(item)) + )[0] + formatted_node.set("label", plt.break_line_at(item.header(), 20)) + # add custom url + if name != "./TSF/docs/generated/TRUSTABLE-SOFTWARE.svg" or str(item) != "TRUSTABLE-SOFTWARE": + formatted_node.set("URL", get_my_url(str(item),base_url,full_graph)) + else: + formatted_node.set("URL", base_url+"/_images/graph.svg") + # Set target to avoid URLs opening within the image + formatted_node.set("target", "_top") + for key, value in plt.NODE_STYLE.items(): + formatted_node.set(key, value) + # plot + dot_source = gz.Source(formatted_graph.to_string()) + dot_source.format = 'svg' + dot_source.render(Path(name).with_suffix("")) + +def documentation_header(): + # This is the very top of trustable_graph.rst + return """ +.. _ta-analysis-subgraph: + +Trustable Graph +==================== + +The trustable graph is the graphical representation of the argumentation. + +.. image:: graph.svg + :alt: Trustable Graph + :width: 6000px + +This image presents the full trustable graph, in which each item links to its entry in the documentation. Smaller scale representations of arguments, which are navigable among each other, can be found below. + """ + +def plot_orchestrator(full_graph: TrustableGraph, base_url: str = ""): + # orchestrate plottage and generation of documentation + # documentation is completely rewritten + # initialise the documentation and overwrite obsolete data if necessary + with open("./TSF/docs/generated/trustable_graph.rst", "w", encoding="utf-8") as documentation: + documentation.write(documentation_header()) + # get all roots and leafs of the graph + roots = full_graph._graph.root_nodes() + leafs = full_graph._graph.leaf_nodes() + for root in roots: + # if the root is an orphaned node, discard it + if root in leafs: + continue + write_documentation(plot_all_single_layer_subgraphs(full_graph,[root],base_url)) + +def main(base_url: str): + # build trustable graph from .dotstop.dot + full_trustable_graph = factory.build_trustable_graph(Path('.dotstop.dot'),Path('.')) + + # base_url is expected as argument from console execution + plot_orchestrator(full_trustable_graph,base_url) + +########################## +# Below starts the script. +########################## + +if __name__ == "__main__": + if len(sys.argv) != 2: + base_url = "" + warnings.warn("No base-url was transmitted. Hyperlinks amongst the partial graphs might be broken.") + else: + base_url = sys.argv[1] + main(base_url) \ No newline at end of file diff --git a/TSF/scripts/test_scripts.py b/TSF/scripts/test_scripts.py new file mode 100644 index 0000000000..0303569cb2 --- /dev/null +++ b/TSF/scripts/test_scripts.py @@ -0,0 +1,399 @@ +import pytest +import xml.etree.ElementTree as ET +import pydot +from trudag.dotstop.core.graph import TrustableGraph +import trudag.dotstop.core.graph.graph_factory as factory +from pathlib import Path + +from capture_test_data import is_unit_test, get_metadata, clean_test_case, read_result_table, get_all_xml_files +from plot_partial_graphs import get_pydot_graph, get_subgraph, get_my_url +from clean_trudag_output import clean_line, remove_line, remove_invalid_markdown_start, clean_file +from generate_list_of_tests import ListOfTestsGenerator + +def snapshot(root: Path): + # Return a stable, content-based snapshot of the tree + return sorted( + (str(p.relative_to(root)).replace("\\", "/"), p.read_bytes()) + for p in root.rglob("*") + if p.is_file() + ) + +@pytest.fixture +def ET_Element_test(): + content = """ + + [doctest] doctest version is "2.4.11" +[doctest] run with "--help" for options +=============================================================================== +[doctest] test cases: 24 | 24 passed | 0 failed | 1 skipped +[doctest] assertions: 693860 | 693860 passed | 0 failed | +[doctest] Status: SUCCESS! + + + """ + yield ET.fromstring(content) + +@pytest.fixture +def ET_Element_nontest(): + content = """ + + [1/1] Downloading test data from https://github.com/eclipse-score/inc_nlohmann_json (branch: json_test_data_version_3_1_0_mirror) + + + """ + yield ET.fromstring(content) + +@pytest.fixture +def ET_Element_nonsense(): + content = """ + + [doctest] doctest version is "2.4.11" +[doctest] run with "--help" for options +=============================================================================== +[doctest] test cases: 7 | 4 passed | 2 failed | 1 skipped +[doctest] assertions: 1914 | 1453 passed | 461 failed | +[doctest] Status: NAJA! + + + """ + yield ET.fromstring(content) + +@pytest.fixture +def mock_a_trustable_repository(tmpdir): + repo = tmpdir.mkdir("home") + repo.join("some_nonsense_file.md").write("Hallo Welt") + repo.join("some_cpp_code.cpp").write("#include \n\nint main()\{std::cout << \"Hello world\";\}") + repo.join("Root-1.md").write("---\nlevel: 1.1\nnormative: true\n---\n\ntest") + sd = repo.mkdir("subdir") + sd.join("node-1.md").write("---\nlevel: 1.1\nnormative: true\n---\n\ntest") + sd.join("node-2.md").write("---\nlevel: 1.1\nnormative: true\n---\n\ntest") + return repo + +@pytest.fixture +def trustable_graph(mock_a_trustable_repository): + my_graph = get_pydot_graph(["Root-1","node-1","node-2"],[("Root-1","node-1"),("Root-1","node-2")]) + return TrustableGraph(my_graph,factory._load_base_items(["Root-1","node-1","node-2"],Path(mock_a_trustable_repository))) + +@pytest.fixture +def trudag_output(): + return """ + +--- + +### AOU-01 ### {: .item-element .item-section class="tsf-score" style="background-color:hsl(0.0, 100%, 65%)" .status-unreviewed} + +The integrator shall report problems with nlohmann/json's implementation to the upstream nlohmann/json project whenever a problem is detected. +{: .expanded-item-element } + +**Supported Requests:** + +- [TA-FIXES](TA.md#ta-fixes){.item-element class="tsf-score" style="background-color:hsl(0.0, 100%, 65%)"} + +**Supporting Items:** + +_None_ + +{% raw %} + +**References:** + +_None_ + +{% endraw %} + +**Fallacies:** + +_None_ + +**Graph:** + +_No Historic Data Found_ +""" + +@pytest.fixture +def trudag_output_with_cleaned_start(): + return""" +### AOU-01 ### {: .item-element .item-section class="tsf-score" style="background-color:hsl(0.0, 100%, 65%)" .status-unreviewed} + +The integrator shall report problems with nlohmann/json's implementation to the upstream nlohmann/json project whenever a problem is detected. +{: .expanded-item-element } + +**Supported Requests:** + +- [TA-FIXES](TA.md#ta-fixes){.item-element class="tsf-score" style="background-color:hsl(0.0, 100%, 65%)"} + +**Supporting Items:** + +_None_ + +{% raw %} + +**References:** + +_None_ + +{% endraw %} + +**Fallacies:** + +_None_ + +**Graph:** + +_No Historic Data Found_ +""" + +@pytest.fixture +def clean_trudag_output(): + return""" +### AOU-01 + +The integrator shall report problems with nlohmann/json's implementation to the upstream nlohmann/json project whenever a problem is detected. + + +**Supported Requests:** + +- [TA-FIXES](TA.md#ta-fixes) + +**Supporting Items:** + +_None_ + + + +**References:** + +_None_ + + + +**Fallacies:** + +_None_ + +**Graph:** + +_No Historic Data Found_ +""" + +@pytest.fixture +def mock_a_trudag_report(tmp_path,trudag_output): + root = tmp_path / "report" + root.mkdir() + report = root / "AOU.md" + report.write_text(trudag_output, encoding='utf8') + return report + +@pytest.fixture +def mock_a_clean_trudag_report(tmp_path,clean_trudag_output): + root = tmp_path / "clean_report" + root.mkdir() + report = root / "AOU.md" + report.write_text(clean_trudag_output, encoding='utf8') + return report + + +##################### +# below are the tests +##################### + +def test_unit_test_recognition(ET_Element_test,ET_Element_nontest,ET_Element_nonsense): + assert is_unit_test(ET_Element_test) + assert not is_unit_test(ET_Element_nontest) + assert not is_unit_test(ET_Element_nonsense) + +def test_get_metadata(ET_Element_test): + metadata = get_metadata(ET_Element_test) + assert metadata.get("name") == "test-bjdata" + assert metadata.get("standard") == "gnu++11" + assert metadata.get("passed test cases") == 24 + assert metadata.get("failed test cases") == 0 + assert metadata.get("skipped test cases") == 1 + assert metadata.get("passed assertions") == 693860 + assert metadata.get("failed assertions") == 0 + +def test_get_nonsense_metadata(ET_Element_nonsense): + metadata = get_metadata(ET_Element_nonsense) + assert metadata.get("name") == "test-bjdata" + assert metadata.get("standard") == "gnu++c#11" + assert metadata.get("passed test cases") == 4 + assert metadata.get("failed test cases") == 2 + assert metadata.get("skipped test cases") == 1 + assert metadata.get("passed assertions") == 1453 + assert metadata.get("failed assertions") == 461 + +def test_clean_test_case(): + name, standard = clean_test_case("Hallo_Welt") + assert name == "Hallo" + assert standard == "gnu++Welt" + + name, standard = clean_test_case("Test_super_many_underscores_and_cpp_1") + assert name == "Test_super_many_underscores_and_cpp" + assert standard == "gnu++1" + + name, standard = clean_test_case("Test_non_nonsensical_appendix_cpp19") + assert name == "Test_non_nonsensical_appendix" + assert standard == "gnu++19" + +def test_read_result_table(): + result_table = """[doctest] doctest version is "2.4.11" +[doctest] run with "--help" for options +=============================================================================== +[doctest] test cases: 1 | 1 passed | 0 failed | 0 skipped +[doctest] assertions: 28 | 28 passed | 0 failed | +[doctest] Status: SUCCESS! +""" + result = read_result_table([result_table]) + assert result.get("passed test cases") == 1 + assert result.get("failed test cases") == 0 + assert result.get("skipped test cases") == 0 + assert result.get("passed assertions") == 28 + assert result.get("failed assertions") == 0 + + incomplete_result_table = """[doctest] doctest version is "2.4.11" +[doctest] run with "--help" for options +=============================================================================== +[doctest] test cases: 1 | 1 passed | 0 failed | 0 skipped +""" + with pytest.raises(RuntimeError): + result = read_result_table([incomplete_result_table]) + +def test_get_all_xml_files(tmpdir): + tmpdir.mkdir("ci_test_standards_clang_artefact_libcxx_17").join("clang-17_junit.xml").write("test") + tmpdir.mkdir("ci_test_standards_clang_artefact_libcxx_14").join("clang-14_junit.xml").write("test") + tmpdir.mkdir("ci_cmake_options_artefact_ci_test_noglobaludls").join("noglobaludls_junit.xml").write("test") + tmpdir.mkdir("my_dir").join("graded_Hamiltonian_mechanics.tex").write("test") + tmpdir.join("Hallo_Welt.xml").write("Hallo") + tmpdir.mkdir("This").mkdir("is").mkdir("quite").mkdir("the").mkdir("nested").join("test.xml").write("test") + result = get_all_xml_files(tmpdir.strpath) + assert tmpdir+"/ci_test_standards_clang_artefact_libcxx_17/clang-17_junit.xml" in result + assert tmpdir+"/ci_test_standards_clang_artefact_libcxx_14/clang-14_junit.xml" in result + assert tmpdir+"/ci_cmake_options_artefact_ci_test_noglobaludls/noglobaludls_junit.xml" in result + assert tmpdir+"/Hallo_Welt.xml" in result + assert tmpdir+"/This/is/quite/the/nested/test.xml" in result + assert tmpdir+"/my_dir/graded_Hamiltonian_mechanics.tex" not in result + +def test_get_pydot_graph_failure(): + with pytest.raises(RuntimeError): + get_pydot_graph(["a"],[("a","b")]) + +def test_comparison_of_pydot_graphs(): + # pydot graphs can not quite so easily compared. Let me demonstrate + graph1 = pydot.Dot("G", graph_type = "digraph") + graph1.add_node(pydot.Node("\"a\"")) + graph1.add_node(pydot.Node("\"b\"")) + graph1.add_edge(pydot.Edge("\"a\"","\"b\"")) + + graph2 = pydot.Dot("G", graph_type = "digraph") + graph2.add_node(pydot.Node("\"a\"")) + graph2.add_node(pydot.Node("\"b\"")) + graph2.add_edge(pydot.Edge("\"a\"","\"b\"")) + + assert graph1 != graph2 + assert graph1.to_string() == graph2.to_string() + assert graph1.to_string() == pydot.graph_from_dot_data(graph1.to_string())[0].to_string() + +def test_get_pydot_graph_success(): + expected = pydot.Dot("G", graph_type = "digraph") + # for some weird reason, Codethink expects quotation marks + expected.add_node(pydot.Node("\"a\"")) + expected.add_node(pydot.Node("\"b\"")) + expected.add_edge(pydot.Edge("\"a\"","\"b\"")) + assert expected.to_string() == get_pydot_graph(["a","b"],[("a","b")])._graph.to_string() + +def test_get_subgraph(trustable_graph): + result = get_subgraph(trustable_graph,["Root-1"]) + expected = "digraph G {\n\"Root-1\";\n}\n" + assert result._graph.to_string() == expected + result = get_subgraph(trustable_graph,["Root-1","node-1"]) + expected = "digraph G {\n\"Root-1\";\n\"node-1\";\n\"Root-1\" -> \"node-1\";\n}\n" + assert result._graph.to_string() == expected + +def test_get_my_url(trustable_graph): + assert get_my_url("Hallo","test",trustable_graph) == "test/_images/custom_Hallo_graph.svg" + assert get_my_url("Root-1","test",trustable_graph) == "test/_images/custom_Root-1_graph.svg" + assert get_my_url("node-1","test",trustable_graph) == "test/generated/node.html#node-1" + +def test_remove_line(): + assert not remove_line("test") + assert remove_line("\"Click to view reference\"") + assert not remove_line("\"Click to view Reference\"") + assert remove_line("localplugins.CPPTestReference") + assert not remove_line("localplugins.CPPTestreference") + + r"\{class[:=][^}]*\}", # {class:...} or {class=...} with any attributes inside + r"\{\%[\s]*raw[\s]*\%\}", # {% raw %} + r"\{\%[\s]*endraw[\s]*\%\}", # {% endraw %} + r"#{1,3}\s*\{[^}]*\}", # one to three # followed by {: ... } + r"\{\.[^}]*\}", # {.something ... } + r"\{ \.[^}]*\}", # { .something ... } + r"\{: [^}]*\}", # {: ... } + +def test_clean_line(): + assert clean_line("{class: Hallo, Welt!}") == "" + assert clean_line("This here {class: Hallo, Welt!} and a test") == "This here and a test" + assert clean_line("{class= Hallo, Welt!}") == "" + assert clean_line("This here {class= Hallo, Welt!} and a test") == "This here and a test" + assert clean_line("{% raw %}Hallo") == "Hallo" + assert clean_line("{% endraw %} Welt") == " Welt" + assert clean_line("{:test}") == "{:test}" + assert clean_line("{: test} trailing garbage") == " trailing garbage" + assert clean_line("#{:test}") == "" + assert clean_line("##{:test}") == "" + assert clean_line("###{:test}") == "" + assert clean_line("{.interesting}") == "" + assert clean_line("{ .interesting}") == "" + assert clean_line("{ .interesting}") == "{ .interesting}" + assert clean_line("{ {class: test} .interesting{: test}}") == "{ .interesting}" + assert clean_line("{{class: test}{: test} .interesting}") == "" + +def test_remove_invalid_markdown_start(trudag_output,trudag_output_with_cleaned_start): + assert remove_invalid_markdown_start(["\t","\t","---test"]) == [] + assert remove_invalid_markdown_start(trudag_output.split('\n')) == trudag_output_with_cleaned_start.split('\n') + assert remove_invalid_markdown_start(["","","- -- Hallo"]) == ["","","- -- Hallo"] + +def test_clean_file(mock_a_trudag_report,clean_trudag_output): + clean_file(mock_a_trudag_report) + report = (mock_a_trudag_report).read_text(encoding="utf-8") + assert report == clean_trudag_output + + +def test_default_init_ListOfTestsGenerator(): + ref = ListOfTestsGenerator() + assert ref._test_files == ["./tests/src", "./TSF/tests"] + assert ref._database == "./artifacts/MemoryEfficientTestResults.db" + assert ref._table == "test_results" + +def test_variable_setting_ListOfTestCases(): + ref = ListOfTestsGenerator() + ref.set_database("my_database.db") + ref.set_sources(["file_1","file_2"]) + ref.set_table("my_fancy_table") + assert ref._test_files == ["file_1","file_2"] + assert ref._database == "my_database.db" + assert ref._table == "my_fancy_table" + +def test_compile_string(): + with pytest.raises(RuntimeError): + ListOfTestsGenerator.compile_string([]) + +def test_remove_and_count_indent(): + assert ListOfTestsGenerator.remove_and_count_indent("Hallo")== (0,"Hallo") + assert ListOfTestsGenerator.remove_and_count_indent(" Hallo") == (1,"Hallo") + assert ListOfTestsGenerator.remove_and_count_indent("\t Hallo Welt \t\t") == (5,"Hallo Welt \t\t") + +def test_extract_quotation(): + assert ListOfTestsGenerator.extract_quotation("\"Hallo\" Welt") == "Hallo" + assert ListOfTestsGenerator.extract_quotation("This is quite \"exciting\", isn't it.") == "exciting" + assert ListOfTestsGenerator.extract_quotation("\"Hallo\" \"Welt\"") == "Hallo" + +def test_extract_faulty_quotation(): + with pytest.raises(RuntimeError, match=r"Expected quotation mark; none were detected."): + ListOfTestsGenerator.extract_quotation("Hallo Welt") + with pytest.raises(RuntimeError, match=r"Expected quotation marks; only one was detected."): + ListOfTestsGenerator.extract_quotation("Hallo \"Welt") + +def test_transform_test_file_to_test_name(): + assert ListOfTestsGenerator.transform_test_file_to_test_name("unit-dummy-test.cpp") == "test-dummy-test" + assert ListOfTestsGenerator.transform_test_file_to_test_name("unit-dummy_test.cpp") == "test-dummy_test" + diff --git a/TSF/scripts/update_helper.py b/TSF/scripts/update_helper.py new file mode 100644 index 0000000000..a8472b99a2 --- /dev/null +++ b/TSF/scripts/update_helper.py @@ -0,0 +1,274 @@ +import argparse +import re +import requests +import hashlib +from pathlib import Path + +def main() -> None: + ap = argparse.ArgumentParser(description="little helper script automatically updating version numbers and release dates") + ap.add_argument("-v", + "--version", + help="version number to be updated to; if unspecified, most recent version is chosen", + default=None + ) + ap.add_argument("-c", + "--check", + help="checks whether current and specified (or most recent) version of single_include/nlohmann/json.hpp coincide; no other action is performed", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-a", + "--auto", + help="automatically updates all options", + action=argparse.BooleanOptionalAction + ) + ap.add_argument("-u", + "--update", + action='append', + choices=["JLS-01","JLS-06","JLS-07","JLS-11","JLS-14","introduction","misbehaviours"], + help="updates the specified file(s):" \ + " JLS-01 - TSF/trustable/JLS-01.md," \ + " JLS-06 - TSF/trustable/JLS-06.md," \ + " JLS-07 - TSF/trustable/JLS-07.md," \ + " JLS-11 - TSF/trustable/JLS-11.md," \ + " JLS-14 - TSF/trustable/JLS-14.md," \ + " introduction - TSF/docs/introduction/index.rst," \ + " misbehaviours - TSF/scripts/generate_list_of_misbehaviours.py", + default=None + ) + ap.add_argument("-b", + "--branch", + help="name of the branch to which the references for branch protection and workflow-failures point to", + default=None + ) + ap.add_argument("-bo", + "--branch_only", + help="adapts branch-names only", + action=argparse.BooleanOptionalAction + ) + args = ap.parse_args() + + root = Path(__file__).resolve().parent.parent.parent + + print(args) + + if (not args.check + and ( + (not args.auto + and args.update is None) + or (args.branch_only + and args.branch is None) + ) + ): + # do nothing + return None + + # Fetch the metadata + version, url, release_date, expected_sha = fetch_metadata(args.version) + + # if flag check is set, then the sha of single_include/nlohmann/json.hpp is cross-checked with the sha of the specified version + if args.check: + if not check(expected_sha,root): + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is not the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp does not coincide with {version}.") + else: + if args.version is None: + print(f"The current version of single_include/nlohmann/json.hpp is the most recent one, which is {version}.") + else: + print(f"The current version of single_include/nlohmann/json.hpp coincides with {version}.") + # No other action is performed. + return None + if not check(expected_sha,root): + print("\nWARNING! The current version of single_include/nlohmann/json.hpp does not coincide with {version}.\n\nIf you proceed, then the documentation is expected to contain wrong data!") + user = input("Proceed anyway? [y/n] ").strip().lower() + if user != "y": + print("Aborting update ...") + return None + # if flag auto is set, then all is updated automatically + if args.auto: + if args.branch is not None: + update_JLS_01(args.branch,root) + update_JLS_06(args.branch,root) + update_JLS_07(args.branch,root) + if not args.branch_only: + update_JLS_11(release_date,root) + update_JLS_14(url,expected_sha,root) + update_intro(version,root) + update_misbehaviours(version,release_date,root) + # no other action is necessary + return None + if "JLS-01" in args.update: + update_JLS_01(args.branch,root) + if "JLS-06" in args.update: + update_JLS_06(args.branch,root) + if "JLS-07" in args.update: + update_JLS_07(args.branch,root) + if args.branch_only: + return None + if "JLS-11" in args.update: + update_JLS_11(release_date,root) + if "JLS-14" in args.update: + update_JLS_14(url,expected_sha,root) + if "introduction" in args.update: + update_intro(version,root) + if "misbehaviours" in args.update: + update_misbehaviours(version,release_date,root) + +def update_JLS_01(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-01.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-01.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_06(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-06.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-06.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_07(branch: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-07.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-07.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*branch:\s*")([^"]*)(")', r'\g<1>' + branch + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_11(release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_11 = root / "trustable/statements/JLS-11.md" + else: + path_to_jls_11 = root / "TSF/trustable/statements/JLS-11.md" + data = path_to_jls_11.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*release_date:\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_jls_11.write_text(data) + +def update_JLS_14(url: str, sha: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_jls_14 = root / "trustable/statements/JLS-14.md" + else: + path_to_jls_14 = root / "TSF/trustable/statements/JLS-14.md" + data = path_to_jls_14.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*sha:\s*")([^"]*)(")', r'\g<1>' + sha + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*url:\s*")([^"]*)(")', r'\g<1>' + url + r'\g<3>', data) + path_to_jls_14.write_text(data) + +def update_intro(version: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent.parent + path_to_intro = root / "docs/introduction/index.rst" + else: + path_to_intro = root / "TSF/docs/introduction/index.rst" + data = path_to_intro.read_text(encoding='utf-8') + data = re.sub(r'(\(version\s+)([^)]*)(\))', + lambda m: f"{m.group(1)}{version}{m.group(3)}", + data) + path_to_intro.write_text(data) + +def update_misbehaviours(version: str, release_date: str, root: Path | None = None) -> None: + if root is None: + root = Path(__file__).resolve().parent + path_to_script = root / "generate_list_of_misbehaviours.py" + else: + path_to_script = root / "TSF/scripts/generate_list_of_misbehaviours.py" + data = path_to_script.read_text(encoding='utf-8') + data = re.sub(r'(?m)^(\s*version\s*=\s*")([^"]*)(")', r'\g<1>' + version + r'\g<3>', data) + data = re.sub(r'(?m)^(\s*release_date\s*=\s*")([^"]*)(")', r'\g<1>' + release_date + r'\g<3>', data) + path_to_script.write_text(data) + + +def fetch_metadata(version = None) -> tuple[str,str,str]: + # This function fetches the metadata of the release of the version of nlohmann/json specified in the input. + # If the input is None, then the most recent version is fetched. + # The function returns the version number, the release date in the format %Y-%m-%dT%H:%M:%SZ + # and the sha256-value of the json.hpp of the released version + + if version is None: + version = "" + + # fetch the sha-value of most recent release + releases = requests.get("https://github.com/nlohmann/json/releases") + if releases.status_code != 200: + raise Warning("The release page of nlohmann/json appears to be currently not reachable.") + releases_by_the_line = releases.text.splitlines() + # releases is expected to be huge, delete to free up room + del releases + found_version = False + found_sha = False + found_release_date = False + found_tag = False + for line in releases_by_the_line: + # look for + if not found_version and f"JSON for Modern C++ version {version}" not in line: + continue + elif not found_version: + if version == "": + m = re.search(r'JSON for Modern C\+\+ version\s*([^<"]*)<',line) + if m is None: + raise RuntimeError("Critical Error: Can not find version number of most recent release!") + version = m.group(1) + found_version = True + continue + if not found_release_date and "datetime=" in line: + m = re.search(r'datetime\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find release-date of version {version}!") + release_date = m.group(1) if m else None + found_release_date = True + if not found_sha and "SHA-256:" in line and "(json.hpp)" in line: + expected_sha = line.split("SHA-256:", 1)[1].split("(json.hpp)", 1)[0].strip() + found_sha = True + if not found_tag and "/nlohmann/json/tree" in line: + m = re.search(r'href\s*=\s*"([^"]*)"', line) + if m is None: + raise RuntimeError(f"Critical Error: Can not find link to release version {version}!") + url = "https://github.com" + m.group(1) + found_tag = True + if found_version and found_sha and found_release_date and found_tag: + return (version, url, release_date, expected_sha) + if "JSON for Modern C++ version" in line and f"JSON for Modern C++ version {version}" not in line: + if not found_version and not found_release_date and not found_tag: + error_message = "Could not find any metadata" + elif not found_sha: + error_message = "Could not find SHA-value for json.hpp" + if not found_release_date: + error_message += " and relase-date" + elif not found_tag: + error_message += " and link to code" + elif not found_release_date: + error_message = "Could not find release-date" + if not found_tag: + error_message += " and link to code" + else: + error_message = "Could not find link to code" + error_message += f" of version {version}!" if version!="" else " of most recent version!" + raise RuntimeError(error_message) + # If ever the for-loop comes to its end, the specified version can not be found! + raise RuntimeError(f"Can not locate the release of version {version}!") + + +def check(expected_sha: str, root: Path | None = None) -> bool: + # get the actual sha-value of the single_include.json + if root is None: + root = Path(__file__).resolve().parent.parent.parent + single_include_json_path = root / "single_include/nlohmann/json.hpp" + with single_include_json_path.open('rb') as f: + actual_sha = hashlib.file_digest(f, 'sha256').hexdigest() + return actual_sha == expected_sha + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/TSF/tests/unit-arrays.cpp b/TSF/tests/unit-arrays.cpp new file mode 100644 index 0000000000..8ce0f087a0 --- /dev/null +++ b/TSF/tests/unit-arrays.cpp @@ -0,0 +1,95 @@ + +#include "doctest_compatibility.h" + +#include +#include +#include +using nlohmann::json; + +namespace{ +// The pipeline complains if json::parse is called without an output. +// Masking this call within a helper function suppresses this complaint. +json parser_helper(const std::string& input); + +json parser_helper(const std::string& input){ + return json::parse(input); +} +}//namespace + +TEST_CASE("accept") +{ + // An interval accepts only [ and ] as left and right boundary, resp. + // everything else, in particular invalid tokens, are illegal. + SECTION("boundaries") + { + CHECK(!json::accept("[}")); + CHECK(!json::accept("[\"foobar\"}")); + CHECK(!json::accept("[1.23\u004513}")); + CHECK(!json::accept("[[1,32,5,\"foo\"]}")); + CHECK(!json::accept("{]")); + CHECK(!json::accept("{\"foobar\"]")); + CHECK(!json::accept("{1.23\u004513]")); + CHECK(!json::accept("{[1,32,5,\"foo\"]]")); + CHECK(!json::accept("(]")); + CHECK(!json::accept("(\"foobar\"]")); + CHECK(!json::accept("(1.23\u004513]")); + CHECK(!json::accept("([1,32,5,\"foo\"]]")); + + // Test whether 100,000 opening brackets with "Moin!" and 99,999 closing brackets are rejected. + std::stringstream faulty_array; + for (int i = 0; i < 100000; i++){ + faulty_array << "["; + } + faulty_array << "\"Moin!\""; + for (int i = 1; i < 100000; i++){ + faulty_array << "]"; + } + CHECK(!json::accept(faulty_array.str())); + // double check if rejection is not due to overflow + CHECK_THROWS_AS(parser_helper(faulty_array.str()),json::parse_error&); + } + SECTION("whitespace") + { + CHECK(json::accept(" [true] ")); + CHECK(json::accept(" [true]\t")); + CHECK(json::accept(" [true]\n")); + CHECK(json::accept(" [true]\u000d")); + CHECK(json::accept("\t[true] ")); + CHECK(json::accept("\t[true]\t")); + CHECK(json::accept("\t[true]\n")); + CHECK(json::accept("\t[true]\u000d")); + CHECK(json::accept("\n[true] ")); + CHECK(json::accept("\n[true]\t")); + CHECK(json::accept("\n[true]\n")); + CHECK(json::accept("\n[true]\u000d")); + CHECK(json::accept("\u000d[true] ")); + CHECK(json::accept("\u000d[true]\t")); + CHECK(json::accept("\u000d[true]\n")); + CHECK(json::accept("\u000d[true]\u000d")); + CHECK(json::accept(" \n\t\t\n \t\u000d[true] \n\n\n \t\t\u000d \n")); + } +} + +TEST_CASE("parse") +{ + SECTION("whitespace") + { + json j = json::parse(R"(["1","2","test","foo","bar"])"); + CHECK(parser_helper("[ \"1\" , \"2\" , \"test\" , \"foo\" , \"bar\" ]")==j); + CHECK(parser_helper("[ \"1\"\t, \"2\"\t, \"test\"\t, \"foo\"\t, \"bar\"\t]")==j); + CHECK(parser_helper("[ \"1\"\n, \"2\"\n, \"test\"\n, \"foo\"\n, \"bar\"\n]")==j); + CHECK(parser_helper("[ \"1\"\u000d, \"2\"\u000d, \"test\"\u000d, \"foo\"\u000d, \"bar\"\u000d]")==j); + CHECK(parser_helper("[\t\"1\" ,\t\"2\" ,\t\"test\" ,\t\"foo\" ,\t\"bar\" ]")==j); + CHECK(parser_helper("[\t\"1\"\t,\t\"2\"\t,\t\"test\"\t,\t\"foo\"\t,\t\"bar\"\t]")==j); + CHECK(parser_helper("[\t\"1\"\n,\t\"2\"\n,\t\"test\"\n,\t\"foo\"\n,\t\"bar\"\n]")==j); + CHECK(parser_helper("[\t\"1\"\u000d,\t\"2\"\u000d,\t\"test\"\u000d,\t\"foo\"\u000d,\t\"bar\"\u000d]")==j); + CHECK(parser_helper("[\n\"1\" ,\n\"2\" ,\n\"test\" ,\n\"foo\" ,\n\"bar\" ]")==j); + CHECK(parser_helper("[\n\"1\"\t,\n\"2\"\t,\n\"test\"\t,\n\"foo\"\t,\n\"bar\"\t]")==j); + CHECK(parser_helper("[\n\"1\"\n,\n\"2\"\n,\n\"test\"\n,\n\"foo\"\n,\n\"bar\"\n]")==j); + CHECK(parser_helper("[\n\"1\"\u000d,\n\"2\"\u000d,\n\"test\"\u000d,\n\"foo\"\u000d,\n\"bar\"\u000d]")==j); + CHECK(parser_helper("[\u000d\"1\" ,\u000d\"2\" ,\u000d\"test\" ,\u000d\"foo\" ,\u000d\"bar\" ]")==j); + CHECK(parser_helper("[\u000d\"1\"\t,\u000d\"2\"\t,\u000d\"test\"\t,\u000d\"foo\"\t,\u000d\"bar\"\t]")==j); + CHECK(parser_helper("[\u000d\"1\"\n,\u000d\"2\"\n,\u000d\"test\"\n,\u000d\"foo\"\n,\u000d\"bar\"\n]")==j); + CHECK(parser_helper("[\u000d\"1\"\u000d,\u000d\"2\"\u000d,\u000d\"test\"\u000d,\u000d\"foo\"\u000d,\u000d\"bar\"\u000d]")==j); + } +} diff --git a/TSF/tests/unit-byte_order_mark.cpp b/TSF/tests/unit-byte_order_mark.cpp new file mode 100644 index 0000000000..b37aaa36f8 --- /dev/null +++ b/TSF/tests/unit-byte_order_mark.cpp @@ -0,0 +1,112 @@ + +#include "doctest_compatibility.h" + +#include +using nlohmann::json; + +namespace{ +void parser_helper(const std::string& input); +// build throws a warning when json::parse(foo) is used without output. +// The parser_helper prevents this. +void parser_helper(const std::string& input) +{ + const json temp = json::parse(input); +} +} //namespace + +TEST_CASE("accept") +{ + SECTION("UTF-8") + { + SECTION("single BOM") + { + // a single byte order mark is treated as an empty token, which is not a valid json token. + CHECK(!json::accept("\xEF\xBB\xBF")); + CHECK(json::accept("\xEF\xBB\xBF\n\"foo\"")); + CHECK(json::accept("\xEF\xBB\xBF\"foo\"")); + CHECK(json::accept("\xEF\xBB\xBF 123")); + CHECK(json::accept("\xEF\xBB\xBF[1,2,3]")); + CHECK(json::accept("\xEF\xBB\xBF{\"foo\":1,\"bar\":2,\"test\":3}")); + } + SECTION("multiple BOM") + { + CHECK(!json::accept("\xEF\xBB\xBF\xEF\xBB\xBF")); + CHECK(!json::accept("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF")); + CHECK(!json::accept("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF")); + CHECK(!json::accept("\xEF\xBB\xBF\xEF\xBB")); + CHECK(!json::accept("\xEF\xBB\xBF foo")); + } + SECTION("unexpected BOM") + { + CHECK(!json::accept(" \xEF\xBB\xBF")); + CHECK(!json::accept("\t\xEF\xBB\xBF")); + CHECK(!json::accept("\n\xEF\xBB\xBF")); + CHECK(!json::accept("\xEF\xBB\xBF")); + CHECK(!json::accept("\u000d\xEF\xBB\xBF")); + CHECK(!json::accept("1\xEF\xBB\xBF")); + CHECK(!json::accept("\"foo\"\xEF\xBB\xBF")); + CHECK(!json::accept("[42]\xEF\xBB\xBF")); + CHECK(!json::accept("{\"foo\":\"bar\"}\xEF\xBB\xBF")); + } + } + SECTION("Other byte-order marks") + { + SECTION("UTF-16") + { + CHECK(!json::accept("\xFE\xFF")); + CHECK(!json::accept("\xFF\xFE")); + } + SECTION("UTF-32") + { + const std::string utf32bom1("\x00\x00\xFE\xFF", 4); + const std::string utf32bom2("\xFF\xFE\x00\x00", 4); + CHECK(!json::accept(utf32bom1)); + CHECK(!json::accept(utf32bom2)); + } + } +} + +TEST_CASE("parse") +{ + SECTION("UTF-8") + { + SECTION("multiple BOM") + { + // Whenever a fourth character of a BOM-candidate is read, an error is thrown. + // This error does not depend on any trailing garbage. + CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF\xEF\xBB\xBF"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF\xBB"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\xEF\xBB\xBF\xEF foo"),"[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '\xEF\xBB\xBF\xEF'", json::parse_error&); + } + SECTION("unexpected BOM") + { + // A byte order mark at any other position than the very first character is illegal and an error is thrown. + CHECK_THROWS_AS(parser_helper(" \xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\t\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\n\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\u000d\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("1\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("\"foo\"\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("[42]\xEF\xBB\xBF"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("{\"foo\":\"bar\"}\xEF\xBB\xBF"), json::parse_error&); + } + } + SECTION("other BOM") + { + SECTION("UTF-16") + { + CHECK_THROWS_AS(parser_helper("\xFE\xFF\"foo\""),json::parse_error&); + CHECK_THROWS_AS(parser_helper("\xFF\xFE\"foo\""),json::parse_error&); + } + SECTION("UTF-32") + { + const std::string utf32bom1("\x00\x00\xFE\xFF\x30", 5); + const std::string utf32bom2("\xFF\xFE\x00\x00\x30", 5); + CHECK_THROWS_AS(parser_helper(utf32bom1),json::parse_error&); + CHECK_THROWS_AS(parser_helper(utf32bom2),json::parse_error&); + } + } +} diff --git a/TSF/tests/unit-class_parser_core.cpp b/TSF/tests/unit-class_parser_core.cpp new file mode 100644 index 0000000000..bb908f0a44 --- /dev/null +++ b/TSF/tests/unit-class_parser_core.cpp @@ -0,0 +1,1114 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ (supporting code) +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + +/* +This file has been adapted from the original nlohmann/json library (tests/src/unit-class_parser.cpp) +to use the plain json::accept() and json::parse() functions instead of advanced helper functions, +which make many additional function calls (see the definitions of parse_helper and accept_helper in +tests/src/unit-class_parser.cpp). This allows to directly attribute a test result to the accept() or +parse() function, which is needed to use the test results as evidence for the well-formedness and parsing + of JSON requirements. Unnecessary code and test sections have been removed. +*/ + +#include "doctest_compatibility.h" + +#define JSON_TESTS_PRIVATE +#include +using nlohmann::json; +#ifdef JSON_TEST_NO_GLOBAL_UDLS + using namespace nlohmann::literals; // NOLINT(google-build-using-namespace) +#endif + +#include + +namespace +{ + +json parser_helper(const std::string& s); +bool accept_helper(const std::string& s); + +json parser_helper(const std::string& s) +{ + return json::parse(s); +} + +bool accept_helper(const std::string& s) +{ + CAPTURE(s) + return json::accept(s); +} + +} // namespace + +TEST_CASE("parser class - core") +{ + SECTION("parse") + { + SECTION("null") + { + CHECK(parser_helper("null") == json(nullptr)); + } + + SECTION("true") + { + CHECK(parser_helper("true") == json(true)); + } + + SECTION("false") + { + CHECK(parser_helper("false") == json(false)); + } + + SECTION("array") + { + SECTION("empty array") + { + CHECK(parser_helper("[]") == json(json::value_t::array)); + CHECK(parser_helper("[ ]") == json(json::value_t::array)); + } + + SECTION("nonempty array") + { + CHECK(parser_helper("[true, false, null]") == json({true, false, nullptr})); + } + } + + SECTION("object") + { + SECTION("empty object") + { + CHECK(parser_helper("{}") == json(json::value_t::object)); + CHECK(parser_helper("{ }") == json(json::value_t::object)); + } + + SECTION("nonempty object") + { + CHECK(parser_helper("{\"\": true, \"one\": 1, \"two\": null}") == json({{"", true}, {"one", 1}, {"two", nullptr}})); + } + } + + SECTION("string") + { + // empty string + CHECK(parser_helper("\"\"") == json(json::value_t::string)); + + SECTION("errors") + { + // error: tab in string + CHECK_THROWS_WITH_AS(parser_helper("\"\t\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t; last read: '\"'", json::parse_error&); + // error: newline in string + CHECK_THROWS_WITH_AS(parser_helper("\"\n\""), "[json.exception.parse_error.101] parse error at line 2, column 0: syntax error while parsing value - invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\r\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r; last read: '\"'", json::parse_error&); + // error: backspace in string + CHECK_THROWS_WITH_AS(parser_helper("\"\b\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b; last read: '\"'", json::parse_error&); + // improve code coverage + CHECK_THROWS_AS(parser_helper("\uFF01"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("[-4:1,]"), json::parse_error&); + // unescaped control characters + CHECK_THROWS_WITH_AS(parser_helper("\"\x00\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: missing closing quote; last read: '\"'", json::parse_error&); // NOLINT(bugprone-string-literal-with-embedded-nul) + CHECK_THROWS_WITH_AS(parser_helper("\"\x01\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0001 (SOH) must be escaped to \\u0001; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x02\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0002 (STX) must be escaped to \\u0002; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x03\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0003 (ETX) must be escaped to \\u0003; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x04\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0004 (EOT) must be escaped to \\u0004; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x05\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0005 (ENQ) must be escaped to \\u0005; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x06\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0006 (ACK) must be escaped to \\u0006; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x07\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0007 (BEL) must be escaped to \\u0007; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x08\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x09\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0a\""), "[json.exception.parse_error.101] parse error at line 2, column 0: syntax error while parsing value - invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0b\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000B (VT) must be escaped to \\u000B; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0c\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000C (FF) must be escaped to \\u000C or \\f; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0d\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0e\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000E (SO) must be escaped to \\u000E; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x0f\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+000F (SI) must be escaped to \\u000F; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x10\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0010 (DLE) must be escaped to \\u0010; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x11\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0011 (DC1) must be escaped to \\u0011; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x12\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0012 (DC2) must be escaped to \\u0012; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x13\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0013 (DC3) must be escaped to \\u0013; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x14\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0014 (DC4) must be escaped to \\u0014; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x15\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0015 (NAK) must be escaped to \\u0015; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x16\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0016 (SYN) must be escaped to \\u0016; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x17\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0017 (ETB) must be escaped to \\u0017; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x18\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0018 (CAN) must be escaped to \\u0018; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x19\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0019 (EM) must be escaped to \\u0019; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1a\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001A (SUB) must be escaped to \\u001A; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1b\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001B (ESC) must be escaped to \\u001B; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1c\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001C (FS) must be escaped to \\u001C; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1d\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001D (GS) must be escaped to \\u001D; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1e\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001E (RS) must be escaped to \\u001E; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\x1f\""), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+001F (US) must be escaped to \\u001F; last read: '\"'", json::parse_error&); + + SECTION("additional test for null byte") + { + // The test above for the null byte is wrong, because passing + // a string to the parser only reads int until it encounters + // a null byte. This test inserts the null byte later on and + // uses an iterator range. + std::string s = "\"1\""; + s[1] = '\0'; + json _; + CHECK_THROWS_WITH_AS(_ = json::parse(s.begin(), s.end()), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: control character U+0000 (NUL) must be escaped to \\u0000; last read: '\"'", json::parse_error&); + } + } + + SECTION("escaped") + { + // quotation mark "\"" + auto r1 = R"("\"")"_json; + CHECK(parser_helper("\"\\\"\"") == r1); + // reverse solidus "\\" + auto r2 = R"("\\")"_json; + CHECK(parser_helper("\"\\\\\"") == r2); + // solidus + CHECK(parser_helper("\"\\/\"") == R"("/")"_json); + // backspace + CHECK(parser_helper("\"\\b\"") == json("\b")); + // formfeed + CHECK(parser_helper("\"\\f\"") == json("\f")); + // newline + CHECK(parser_helper("\"\\n\"") == json("\n")); + // carriage return + CHECK(parser_helper("\"\\r\"") == json("\r")); + // horizontal tab + CHECK(parser_helper("\"\\t\"") == json("\t")); + + CHECK(parser_helper("\"\\u0001\"").get() == "\x01"); + CHECK(parser_helper("\"\\u000a\"").get() == "\n"); + CHECK(parser_helper("\"\\u00b0\"").get() == "°"); + CHECK(parser_helper("\"\\u0c00\"").get() == "ఀ"); + CHECK(parser_helper("\"\\ud000\"").get() == "퀀"); + CHECK(parser_helper("\"\\u000E\"").get() == "\x0E"); + CHECK(parser_helper("\"\\u00F0\"").get() == "ð"); + CHECK(parser_helper("\"\\u0100\"").get() == "Ā"); + CHECK(parser_helper("\"\\u2000\"").get() == " "); + CHECK(parser_helper("\"\\uFFFF\"").get() == "￿"); + CHECK(parser_helper("\"\\u20AC\"").get() == "€"); + CHECK(parser_helper("\"€\"").get() == "€"); + CHECK(parser_helper("\"🎈\"").get() == "🎈"); + + CHECK(parser_helper("\"\\ud80c\\udc60\"").get() == "\xf0\x93\x81\xa0"); + CHECK(parser_helper("\"\\ud83c\\udf1e\"").get() == "🌞"); + } + } + + SECTION("number") + { + SECTION("integers") + { + SECTION("without exponent") + { + CHECK(parser_helper("-128") == json(-128)); + CHECK(parser_helper("-0") == json(-0)); + CHECK(parser_helper("0") == json(0)); + CHECK(parser_helper("128") == json(128)); + } + + SECTION("with exponent") + { + CHECK(parser_helper("0e1") == json(0e1)); + CHECK(parser_helper("0E1") == json(0e1)); + + CHECK(parser_helper("10000E-4") == json(10000e-4)); + CHECK(parser_helper("10000E-3") == json(10000e-3)); + CHECK(parser_helper("10000E-2") == json(10000e-2)); + CHECK(parser_helper("10000E-1") == json(10000e-1)); + CHECK(parser_helper("10000E0") == json(10000e0)); + CHECK(parser_helper("10000E1") == json(10000e1)); + CHECK(parser_helper("10000E2") == json(10000e2)); + CHECK(parser_helper("10000E3") == json(10000e3)); + CHECK(parser_helper("10000E4") == json(10000e4)); + + CHECK(parser_helper("10000e-4") == json(10000e-4)); + CHECK(parser_helper("10000e-3") == json(10000e-3)); + CHECK(parser_helper("10000e-2") == json(10000e-2)); + CHECK(parser_helper("10000e-1") == json(10000e-1)); + CHECK(parser_helper("10000e0") == json(10000e0)); + CHECK(parser_helper("10000e1") == json(10000e1)); + CHECK(parser_helper("10000e2") == json(10000e2)); + CHECK(parser_helper("10000e3") == json(10000e3)); + CHECK(parser_helper("10000e4") == json(10000e4)); + + CHECK(parser_helper("-0e1") == json(-0e1)); + CHECK(parser_helper("-0E1") == json(-0e1)); + CHECK(parser_helper("-0E123") == json(-0e123)); + + // numbers after exponent + CHECK(parser_helper("10E0") == json(10e0)); + CHECK(parser_helper("10E1") == json(10e1)); + CHECK(parser_helper("10E2") == json(10e2)); + CHECK(parser_helper("10E3") == json(10e3)); + CHECK(parser_helper("10E4") == json(10e4)); + CHECK(parser_helper("10E5") == json(10e5)); + CHECK(parser_helper("10E6") == json(10e6)); + CHECK(parser_helper("10E7") == json(10e7)); + CHECK(parser_helper("10E8") == json(10e8)); + CHECK(parser_helper("10E9") == json(10e9)); + CHECK(parser_helper("10E+0") == json(10e0)); + CHECK(parser_helper("10E+1") == json(10e1)); + CHECK(parser_helper("10E+2") == json(10e2)); + CHECK(parser_helper("10E+3") == json(10e3)); + CHECK(parser_helper("10E+4") == json(10e4)); + CHECK(parser_helper("10E+5") == json(10e5)); + CHECK(parser_helper("10E+6") == json(10e6)); + CHECK(parser_helper("10E+7") == json(10e7)); + CHECK(parser_helper("10E+8") == json(10e8)); + CHECK(parser_helper("10E+9") == json(10e9)); + CHECK(parser_helper("10E-1") == json(10e-1)); + CHECK(parser_helper("10E-2") == json(10e-2)); + CHECK(parser_helper("10E-3") == json(10e-3)); + CHECK(parser_helper("10E-4") == json(10e-4)); + CHECK(parser_helper("10E-5") == json(10e-5)); + CHECK(parser_helper("10E-6") == json(10e-6)); + CHECK(parser_helper("10E-7") == json(10e-7)); + CHECK(parser_helper("10E-8") == json(10e-8)); + CHECK(parser_helper("10E-9") == json(10e-9)); + } + + SECTION("edge cases") + { + // From RFC8259, Section 6: + // Note that when such software is used, numbers that are + // integers and are in the range [-(2**53)+1, (2**53)-1] + // are interoperable in the sense that implementations will + // agree exactly on their numeric values. + + // -(2**53)+1 + CHECK(parser_helper("-9007199254740991").get() == -9007199254740991); + // (2**53)-1 + CHECK(parser_helper("9007199254740991").get() == 9007199254740991); + } + + SECTION("over the edge cases") // issue #178 - Integer conversion to unsigned (incorrect handling of 64-bit integers) + { + // While RFC8259, Section 6 specifies a preference for support + // for ranges in range of IEEE 754-2008 binary64 (double precision) + // this does not accommodate 64-bit integers without loss of accuracy. + // As 64-bit integers are now widely used in software, it is desirable + // to expand support to the full 64 bit (signed and unsigned) range + // i.e. -(2**63) -> (2**64)-1. + + // -(2**63) ** Note: compilers see negative literals as negated positive numbers (hence the -1)) + CHECK(parser_helper("-9223372036854775808").get() == -9223372036854775807 - 1); + // (2**63)-1 + CHECK(parser_helper("9223372036854775807").get() == 9223372036854775807); + // (2**64)-1 + CHECK(parser_helper("18446744073709551615").get() == 18446744073709551615u); + } + } + + SECTION("floating-point") + { + SECTION("without exponent") + { + CHECK(parser_helper("-128.5") == json(-128.5)); + CHECK(parser_helper("0.999") == json(0.999)); + CHECK(parser_helper("128.5") == json(128.5)); + CHECK(parser_helper("-0.0") == json(-0.0)); + } + + SECTION("with exponent") + { + CHECK(parser_helper("-128.5E3") == json(-128.5E3)); + CHECK(parser_helper("-128.5E-3") == json(-128.5E-3)); + CHECK(parser_helper("-0.0e1") == json(-0.0e1)); + CHECK(parser_helper("-0.0E1") == json(-0.0e1)); + } + } + + SECTION("overflow") + { + // overflows during parsing yield an exception + CHECK_THROWS_WITH_AS(parser_helper("1.18973e+4932").empty(), "[json.exception.out_of_range.406] number overflow parsing '1.18973e+4932'", json::out_of_range&); + } + + SECTION("invalid numbers") + { + // numbers must not begin with "+" + CHECK_THROWS_AS(parser_helper("+1"), json::parse_error&); + CHECK_THROWS_AS(parser_helper("+0"), json::parse_error&); + + CHECK_THROWS_WITH_AS(parser_helper("01"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - unexpected number literal; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-01"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - unexpected number literal; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("--1"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid number; expected digit after '-'; last read: '--'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1."), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected digit after '.'; last read: '1.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1E"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1E'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1E-"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid number; expected digit after exponent sign; last read: '1E-'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1.E1"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected digit after '.'; last read: '1.E'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-1E"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '-1E'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0E#"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '-0E#'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0E-#"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid number; expected digit after exponent sign; last read: '-0E-#'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0#"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid literal; last read: '-0#'; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0.0:"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - unexpected ':'; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0.0Z"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid literal; last read: '-0.0Z'; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0E123:"), + "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing value - unexpected ':'; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0e0-:"), + "[json.exception.parse_error.101] parse error at line 1, column 6: syntax error while parsing value - invalid number; expected digit after '-'; last read: '-:'; expected end of input", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0e-:"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid number; expected digit after exponent sign; last read: '-0e-:'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0f"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '-0f'; expected end of input", json::parse_error&); + } + } + } + + SECTION("accept") + { + SECTION("null") + { + CHECK(accept_helper("null")); + } + + SECTION("true") + { + CHECK(accept_helper("true")); + } + + SECTION("false") + { + CHECK(accept_helper("false")); + } + + SECTION("array") + { + SECTION("empty array") + { + CHECK(accept_helper("[]")); + CHECK(accept_helper("[ ]")); + } + + SECTION("nonempty array") + { + CHECK(accept_helper("[true, false, null]")); + } + } + + SECTION("object") + { + SECTION("empty object") + { + CHECK(accept_helper("{}")); + CHECK(accept_helper("{ }")); + } + + SECTION("nonempty object") + { + CHECK(accept_helper("{\"\": true, \"one\": 1, \"two\": null}")); + } + } + + SECTION("string") + { + // empty string + CHECK(accept_helper("\"\"")); + + SECTION("errors") + { + // error: tab in string + CHECK(accept_helper("\"\t\"") == false); + // error: newline in string + CHECK(accept_helper("\"\n\"") == false); + CHECK(accept_helper("\"\r\"") == false); + // error: backspace in string + CHECK(accept_helper("\"\b\"") == false); + // improve code coverage + CHECK(accept_helper("\uFF01") == false); + CHECK(accept_helper("[-4:1,]") == false); + // unescaped control characters + CHECK(accept_helper("\"\x00\"") == false); // NOLINT(bugprone-string-literal-with-embedded-nul) + CHECK(accept_helper("\"\x01\"") == false); + CHECK(accept_helper("\"\x02\"") == false); + CHECK(accept_helper("\"\x03\"") == false); + CHECK(accept_helper("\"\x04\"") == false); + CHECK(accept_helper("\"\x05\"") == false); + CHECK(accept_helper("\"\x06\"") == false); + CHECK(accept_helper("\"\x07\"") == false); + CHECK(accept_helper("\"\x08\"") == false); + CHECK(accept_helper("\"\x09\"") == false); + CHECK(accept_helper("\"\x0a\"") == false); + CHECK(accept_helper("\"\x0b\"") == false); + CHECK(accept_helper("\"\x0c\"") == false); + CHECK(accept_helper("\"\x0d\"") == false); + CHECK(accept_helper("\"\x0e\"") == false); + CHECK(accept_helper("\"\x0f\"") == false); + CHECK(accept_helper("\"\x10\"") == false); + CHECK(accept_helper("\"\x11\"") == false); + CHECK(accept_helper("\"\x12\"") == false); + CHECK(accept_helper("\"\x13\"") == false); + CHECK(accept_helper("\"\x14\"") == false); + CHECK(accept_helper("\"\x15\"") == false); + CHECK(accept_helper("\"\x16\"") == false); + CHECK(accept_helper("\"\x17\"") == false); + CHECK(accept_helper("\"\x18\"") == false); + CHECK(accept_helper("\"\x19\"") == false); + CHECK(accept_helper("\"\x1a\"") == false); + CHECK(accept_helper("\"\x1b\"") == false); + CHECK(accept_helper("\"\x1c\"") == false); + CHECK(accept_helper("\"\x1d\"") == false); + CHECK(accept_helper("\"\x1e\"") == false); + CHECK(accept_helper("\"\x1f\"") == false); + } + + SECTION("escaped") + { + // quotation mark "\"" + auto r1 = R"("\"")"_json; + CHECK(accept_helper("\"\\\"\"")); + // reverse solidus "\\" + auto r2 = R"("\\")"_json; + CHECK(accept_helper("\"\\\\\"")); + // solidus + CHECK(accept_helper("\"\\/\"")); + // backspace + CHECK(accept_helper("\"\\b\"")); + // formfeed + CHECK(accept_helper("\"\\f\"")); + // newline + CHECK(accept_helper("\"\\n\"")); + // carriage return + CHECK(accept_helper("\"\\r\"")); + // horizontal tab + CHECK(accept_helper("\"\\t\"")); + + CHECK(accept_helper("\"\\u0001\"")); + CHECK(accept_helper("\"\\u000a\"")); + CHECK(accept_helper("\"\\u00b0\"")); + CHECK(accept_helper("\"\\u0c00\"")); + CHECK(accept_helper("\"\\ud000\"")); + CHECK(accept_helper("\"\\u000E\"")); + CHECK(accept_helper("\"\\u00F0\"")); + CHECK(accept_helper("\"\\u0100\"")); + CHECK(accept_helper("\"\\u2000\"")); + CHECK(accept_helper("\"\\uFFFF\"")); + CHECK(accept_helper("\"\\u20AC\"")); + CHECK(accept_helper("\"€\"")); + CHECK(accept_helper("\"🎈\"")); + + CHECK(accept_helper("\"\\ud80c\\udc60\"")); + CHECK(accept_helper("\"\\ud83c\\udf1e\"")); + } + } + + SECTION("number") + { + SECTION("integers") + { + SECTION("without exponent") + { + CHECK(accept_helper("-128")); + CHECK(accept_helper("-0")); + CHECK(accept_helper("0")); + CHECK(accept_helper("128")); + } + + SECTION("with exponent") + { + CHECK(accept_helper("0e1")); + CHECK(accept_helper("0E1")); + + CHECK(accept_helper("10000E-4")); + CHECK(accept_helper("10000E-3")); + CHECK(accept_helper("10000E-2")); + CHECK(accept_helper("10000E-1")); + CHECK(accept_helper("10000E0")); + CHECK(accept_helper("10000E1")); + CHECK(accept_helper("10000E2")); + CHECK(accept_helper("10000E3")); + CHECK(accept_helper("10000E4")); + + CHECK(accept_helper("10000e-4")); + CHECK(accept_helper("10000e-3")); + CHECK(accept_helper("10000e-2")); + CHECK(accept_helper("10000e-1")); + CHECK(accept_helper("10000e0")); + CHECK(accept_helper("10000e1")); + CHECK(accept_helper("10000e2")); + CHECK(accept_helper("10000e3")); + CHECK(accept_helper("10000e4")); + + CHECK(accept_helper("-0e1")); + CHECK(accept_helper("-0E1")); + CHECK(accept_helper("-0E123")); + } + + SECTION("edge cases") + { + // From RFC8259, Section 6: + // Note that when such software is used, numbers that are + // integers and are in the range [-(2**53)+1, (2**53)-1] + // are interoperable in the sense that implementations will + // agree exactly on their numeric values. + + // -(2**53)+1 + CHECK(accept_helper("-9007199254740991")); + // (2**53)-1 + CHECK(accept_helper("9007199254740991")); + } + + SECTION("over the edge cases") // issue #178 - Integer conversion to unsigned (incorrect handling of 64-bit integers) + { + // While RFC8259, Section 6 specifies a preference for support + // for ranges in range of IEEE 754-2008 binary64 (double precision) + // this does not accommodate 64 bit integers without loss of accuracy. + // As 64 bit integers are now widely used in software, it is desirable + // to expand support to the full 64 bit (signed and unsigned) range + // i.e. -(2**63) -> (2**64)-1. + + // -(2**63) ** Note: compilers see negative literals as negated positive numbers (hence the -1)) + CHECK(accept_helper("-9223372036854775808")); + // (2**63)-1 + CHECK(accept_helper("9223372036854775807")); + // (2**64)-1 + CHECK(accept_helper("18446744073709551615")); + } + } + + SECTION("floating-point") + { + SECTION("without exponent") + { + CHECK(accept_helper("-128.5")); + CHECK(accept_helper("0.999")); + CHECK(accept_helper("128.5")); + CHECK(accept_helper("-0.0")); + } + + SECTION("with exponent") + { + CHECK(accept_helper("-128.5E3")); + CHECK(accept_helper("-128.5E-3")); + CHECK(accept_helper("-0.0e1")); + CHECK(accept_helper("-0.0E1")); + } + } + + SECTION("overflow") + { + // overflows during parsing + CHECK(!accept_helper("1.18973e+4932")); + } + + SECTION("invalid numbers") + { + CHECK(accept_helper("01") == false); + CHECK(accept_helper("--1") == false); + CHECK(accept_helper("1.") == false); + CHECK(accept_helper("1E") == false); + CHECK(accept_helper("1E-") == false); + CHECK(accept_helper("1.E1") == false); + CHECK(accept_helper("-1E") == false); + CHECK(accept_helper("-0E#") == false); + CHECK(accept_helper("-0E-#") == false); + CHECK(accept_helper("-0#") == false); + CHECK(accept_helper("-0.0:") == false); + CHECK(accept_helper("-0.0Z") == false); + CHECK(accept_helper("-0E123:") == false); + CHECK(accept_helper("-0e0-:") == false); + CHECK(accept_helper("-0e-:") == false); + CHECK(accept_helper("-0f") == false); + + // numbers must not begin with "+" + CHECK(accept_helper("+1") == false); + CHECK(accept_helper("+0") == false); + } + } + } + + SECTION("parse errors") + { + // unexpected end of number + CHECK_THROWS_WITH_AS(parser_helper("0."), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected digit after '.'; last read: '0.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid number; expected digit after '-'; last read: '-'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("--"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid number; expected digit after '-'; last read: '--'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-0."), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid number; expected digit after '.'; last read: '-0.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-."), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid number; expected digit after '-'; last read: '-.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("-:"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid number; expected digit after '-'; last read: '-:'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("0.:"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected digit after '.'; last read: '0.:'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("e."), + "[json.exception.parse_error.101] parse error at line 1, column 1: syntax error while parsing value - invalid literal; last read: 'e'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1e."), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1e.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1e/"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1e/'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1e:"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1e:'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1E."), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1E.'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1E/"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1E/'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("1E:"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid number; expected '+', '-', or digit after exponent; last read: '1E:'", json::parse_error&); + + // unexpected end of null + CHECK_THROWS_WITH_AS(parser_helper("n"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid literal; last read: 'n'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("nu"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid literal; last read: 'nu'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("nul"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'nul'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("nulk"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'nulk'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("nulm"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'nulm'", json::parse_error&); + + // unexpected end of true + CHECK_THROWS_WITH_AS(parser_helper("t"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid literal; last read: 't'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("tr"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid literal; last read: 'tr'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("tru"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'tru'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("trud"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'trud'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("truf"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'truf'", json::parse_error&); + + // unexpected end of false + CHECK_THROWS_WITH_AS(parser_helper("f"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid literal; last read: 'f'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("fa"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid literal; last read: 'fa'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("fal"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: 'fal'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("fals"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid literal; last read: 'fals'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("falsd"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid literal; last read: 'falsd'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("falsf"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid literal; last read: 'falsf'", json::parse_error&); + + // missing/unexpected end of array + CHECK_THROWS_WITH_AS(parser_helper("["), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("[1"), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing array - unexpected end of input; expected ']'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("[1,"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("[1,]"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - unexpected ']'; expected '[', '{', or a literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("]"), + "[json.exception.parse_error.101] parse error at line 1, column 1: syntax error while parsing value - unexpected ']'; expected '[', '{', or a literal", json::parse_error&); + + // missing/unexpected end of object + CHECK_THROWS_WITH_AS(parser_helper("{"), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing object key - unexpected end of input; expected string literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("{\"foo\""), + "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing object separator - unexpected end of input; expected ':'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("{\"foo\":"), + "[json.exception.parse_error.101] parse error at line 1, column 8: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("{\"foo\":}"), + "[json.exception.parse_error.101] parse error at line 1, column 8: syntax error while parsing value - unexpected '}'; expected '[', '{', or a literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("{\"foo\":1,}"), + "[json.exception.parse_error.101] parse error at line 1, column 10: syntax error while parsing object key - unexpected '}'; expected string literal", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("}"), + "[json.exception.parse_error.101] parse error at line 1, column 1: syntax error while parsing value - unexpected '}'; expected '[', '{', or a literal", json::parse_error&); + + // missing/unexpected end of string + CHECK_THROWS_WITH_AS(parser_helper("\""), + "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing value - invalid string: missing closing quote; last read: '\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\\""), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid string: missing closing quote; last read: '\"\\\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u\""), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u0\""), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u0\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u01\""), + "[json.exception.parse_error.101] parse error at line 1, column 6: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u01\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u012\""), + "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u012\"'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u"), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u0"), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u0'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u01"), + "[json.exception.parse_error.101] parse error at line 1, column 6: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u01'", json::parse_error&); + CHECK_THROWS_WITH_AS(parser_helper("\"\\u012"), + "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '\"\\u012'", json::parse_error&); + + // invalid escapes + for (int c = 1; c < 128; ++c) + { + auto s = std::string("\"\\") + std::string(1, static_cast(c)) + "\""; + + switch (c) + { + // valid escapes + case ('"'): + case ('\\'): + case ('/'): + case ('b'): + case ('f'): + case ('n'): + case ('r'): + case ('t'): + { + CHECK_NOTHROW(parser_helper(s)); + break; + } + + // \u must be followed with four numbers, so we skip it here + case ('u'): + { + break; + } + + // any other combination of backslash and character is invalid + default: + { + CHECK_THROWS_AS(parser_helper(s), json::parse_error&); + // only check error message if c is not a control character + if (c > 0x1f) + { + CHECK_THROWS_WITH_STD_STR(parser_helper(s), + "[json.exception.parse_error.101] parse error at line 1, column 3: syntax error while parsing value - invalid string: forbidden character after backslash; last read: '\"\\" + std::string(1, static_cast(c)) + "'"); + } + break; + } + } + } + + // invalid \uxxxx escapes + { + // check whether character is a valid hex character + const auto valid = [](int c) + { + switch (c) + { + case ('0'): + case ('1'): + case ('2'): + case ('3'): + case ('4'): + case ('5'): + case ('6'): + case ('7'): + case ('8'): + case ('9'): + case ('a'): + case ('b'): + case ('c'): + case ('d'): + case ('e'): + case ('f'): + case ('A'): + case ('B'): + case ('C'): + case ('D'): + case ('E'): + case ('F'): + { + return true; + } + + default: + { + return false; + } + } + }; + + for (int c = 1; c < 128; ++c) + { + std::string const s = "\"\\u"; + + // create a string with the iterated character at each position + auto s1 = s + "000" + std::string(1, static_cast(c)) + "\""; + auto s2 = s + "00" + std::string(1, static_cast(c)) + "0\""; + auto s3 = s + "0" + std::string(1, static_cast(c)) + "00\""; + auto s4 = s + std::string(1, static_cast(c)) + "000\""; + + if (valid(c)) + { + CAPTURE(s1) + CHECK_NOTHROW(parser_helper(s1)); + CAPTURE(s2) + CHECK_NOTHROW(parser_helper(s2)); + CAPTURE(s3) + CHECK_NOTHROW(parser_helper(s3)); + CAPTURE(s4) + CHECK_NOTHROW(parser_helper(s4)); + } + else + { + CAPTURE(s1) + CHECK_THROWS_AS(parser_helper(s1), json::parse_error&); + // only check error message if c is not a control character + if (c > 0x1f) + { + CHECK_THROWS_WITH_STD_STR(parser_helper(s1), + "[json.exception.parse_error.101] parse error at line 1, column 7: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s1.substr(0, 7) + "'"); + } + + CAPTURE(s2) + CHECK_THROWS_AS(parser_helper(s2), json::parse_error&); + // only check error message if c is not a control character + if (c > 0x1f) + { + CHECK_THROWS_WITH_STD_STR(parser_helper(s2), + "[json.exception.parse_error.101] parse error at line 1, column 6: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s2.substr(0, 6) + "'"); + } + + CAPTURE(s3) + CHECK_THROWS_AS(parser_helper(s3), json::parse_error&); + // only check error message if c is not a control character + if (c > 0x1f) + { + CHECK_THROWS_WITH_STD_STR(parser_helper(s3), + "[json.exception.parse_error.101] parse error at line 1, column 5: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s3.substr(0, 5) + "'"); + } + + CAPTURE(s4) + CHECK_THROWS_AS(parser_helper(s4), json::parse_error&); + // only check error message if c is not a control character + if (c > 0x1f) + { + CHECK_THROWS_WITH_STD_STR(parser_helper(s4), + "[json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid string: '\\u' must be followed by 4 hex digits; last read: '" + s4.substr(0, 4) + "'"); + } + } + } + } + + json _; + + // missing part of a surrogate pair + CHECK_THROWS_WITH_AS(_ = json::parse("\"\\uD80C\""), "[json.exception.parse_error.101] parse error at line 1, column 8: syntax error while parsing value - invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF; last read: '\"\\uD80C\"'", json::parse_error&); + // invalid surrogate pair + CHECK_THROWS_WITH_AS(_ = json::parse("\"\\uD80C\\uD80C\""), + "[json.exception.parse_error.101] parse error at line 1, column 13: syntax error while parsing value - invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF; last read: '\"\\uD80C\\uD80C'", json::parse_error&); + CHECK_THROWS_WITH_AS(_ = json::parse("\"\\uD80C\\u0000\""), + "[json.exception.parse_error.101] parse error at line 1, column 13: syntax error while parsing value - invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF; last read: '\"\\uD80C\\u0000'", json::parse_error&); + CHECK_THROWS_WITH_AS(_ = json::parse("\"\\uD80C\\uFFFF\""), + "[json.exception.parse_error.101] parse error at line 1, column 13: syntax error while parsing value - invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF; last read: '\"\\uD80C\\uFFFF'", json::parse_error&); + } + + SECTION("parse errors (accept)") + { + // unexpected end of number + CHECK(accept_helper("0.") == false); + CHECK(accept_helper("-") == false); + CHECK(accept_helper("--") == false); + CHECK(accept_helper("-0.") == false); + CHECK(accept_helper("-.") == false); + CHECK(accept_helper("-:") == false); + CHECK(accept_helper("0.:") == false); + CHECK(accept_helper("e.") == false); + CHECK(accept_helper("1e.") == false); + CHECK(accept_helper("1e/") == false); + CHECK(accept_helper("1e:") == false); + CHECK(accept_helper("1E.") == false); + CHECK(accept_helper("1E/") == false); + CHECK(accept_helper("1E:") == false); + + // unexpected end of null + CHECK(accept_helper("n") == false); + CHECK(accept_helper("nu") == false); + CHECK(accept_helper("nul") == false); + + // unexpected end of true + CHECK(accept_helper("t") == false); + CHECK(accept_helper("tr") == false); + CHECK(accept_helper("tru") == false); + + // unexpected end of false + CHECK(accept_helper("f") == false); + CHECK(accept_helper("fa") == false); + CHECK(accept_helper("fal") == false); + CHECK(accept_helper("fals") == false); + + // missing/unexpected end of array + CHECK(accept_helper("[") == false); + CHECK(accept_helper("[1") == false); + CHECK(accept_helper("[1,") == false); + CHECK(accept_helper("[1,]") == false); + CHECK(accept_helper("]") == false); + + // missing/unexpected end of object + CHECK(accept_helper("{") == false); + CHECK(accept_helper("{\"foo\"") == false); + CHECK(accept_helper("{\"foo\":") == false); + CHECK(accept_helper("{\"foo\":}") == false); + CHECK(accept_helper("{\"foo\":1,}") == false); + CHECK(accept_helper("}") == false); + + // missing/unexpected end of string + CHECK(accept_helper("\"") == false); + CHECK(accept_helper("\"\\\"") == false); + CHECK(accept_helper("\"\\u\"") == false); + CHECK(accept_helper("\"\\u0\"") == false); + CHECK(accept_helper("\"\\u01\"") == false); + CHECK(accept_helper("\"\\u012\"") == false); + CHECK(accept_helper("\"\\u") == false); + CHECK(accept_helper("\"\\u0") == false); + CHECK(accept_helper("\"\\u01") == false); + CHECK(accept_helper("\"\\u012") == false); + + // unget of newline + CHECK(parser_helper("\n123\n") == 123); + + // invalid escapes + for (int c = 1; c < 128; ++c) + { + auto s = std::string("\"\\") + std::string(1, static_cast(c)) + "\""; + + switch (c) + { + // valid escapes + case ('"'): + case ('\\'): + case ('/'): + case ('b'): + case ('f'): + case ('n'): + case ('r'): + case ('t'): + { + CHECK(json::parser(nlohmann::detail::input_adapter(s)).accept()); + break; + } + + // \u must be followed with four numbers, so we skip it here + case ('u'): + { + break; + } + + // any other combination of backslash and character is invalid + default: + { + CHECK(json::parser(nlohmann::detail::input_adapter(s)).accept() == false); + break; + } + } + } + + // invalid \uxxxx escapes + { + // check whether character is a valid hex character + const auto valid = [](int c) + { + switch (c) + { + case ('0'): + case ('1'): + case ('2'): + case ('3'): + case ('4'): + case ('5'): + case ('6'): + case ('7'): + case ('8'): + case ('9'): + case ('a'): + case ('b'): + case ('c'): + case ('d'): + case ('e'): + case ('f'): + case ('A'): + case ('B'): + case ('C'): + case ('D'): + case ('E'): + case ('F'): + { + return true; + } + + default: + { + return false; + } + } + }; + + for (int c = 1; c < 128; ++c) + { + std::string const s = "\"\\u"; + + // create a string with the iterated character at each position + const auto s1 = s + "000" + std::string(1, static_cast(c)) + "\""; + const auto s2 = s + "00" + std::string(1, static_cast(c)) + "0\""; + const auto s3 = s + "0" + std::string(1, static_cast(c)) + "00\""; + const auto s4 = s + std::string(1, static_cast(c)) + "000\""; + + if (valid(c)) + { + CAPTURE(s1) + CHECK(json::parser(nlohmann::detail::input_adapter(s1)).accept()); + CAPTURE(s2) + CHECK(json::parser(nlohmann::detail::input_adapter(s2)).accept()); + CAPTURE(s3) + CHECK(json::parser(nlohmann::detail::input_adapter(s3)).accept()); + CAPTURE(s4) + CHECK(json::parser(nlohmann::detail::input_adapter(s4)).accept()); + } + else + { + CAPTURE(s1) + CHECK(json::parser(nlohmann::detail::input_adapter(s1)).accept() == false); + + CAPTURE(s2) + CHECK(json::parser(nlohmann::detail::input_adapter(s2)).accept() == false); + + CAPTURE(s3) + CHECK(json::parser(nlohmann::detail::input_adapter(s3)).accept() == false); + + CAPTURE(s4) + CHECK(json::parser(nlohmann::detail::input_adapter(s4)).accept() == false); + } + } + } + + // missing part of a surrogate pair + CHECK(accept_helper("\"\\uD80C\"") == false); + // invalid surrogate pair + CHECK(accept_helper("\"\\uD80C\\uD80C\"") == false); + CHECK(accept_helper("\"\\uD80C\\u0000\"") == false); + CHECK(accept_helper("\"\\uD80C\\uFFFF\"") == false); + } + + SECTION("tests found by mutate++") + { + // test case to make sure no comma precedes the first key + CHECK_THROWS_WITH_AS(parser_helper("{,\"key\": false}"), "[json.exception.parse_error.101] parse error at line 1, column 2: syntax error while parsing object key - unexpected ','; expected string literal", json::parse_error&); + // test case to make sure an object is properly closed + CHECK_THROWS_WITH_AS(parser_helper("[{\"key\": false true]"), "[json.exception.parse_error.101] parse error at line 1, column 19: syntax error while parsing object - unexpected true literal; expected '}'", json::parse_error&); + + // test case to make sure the callback is properly evaluated after reading a key + { + json::parser_callback_t const cb = [](int /*unused*/, json::parse_event_t event, json& /*unused*/) noexcept + { + return event != json::parse_event_t::key; + }; + + json x = json::parse("{\"key\": false}", cb); + CHECK(x == json::object()); + } + } +} diff --git a/TSF/tests/unit-literals.cpp b/TSF/tests/unit-literals.cpp new file mode 100644 index 0000000000..defa7525df --- /dev/null +++ b/TSF/tests/unit-literals.cpp @@ -0,0 +1,2191 @@ + +#include "doctest_compatibility.h" + +#include +using nlohmann::json; + +namespace +{ +void parser_helper(const std::string& input); + +void parser_helper(const std::string& input){ + const json temp = json::parse(input); +} +} //namespace + +TEST_CASE("accept") +{ + SECTION("unicode") + { + CHECK(json::accept("\u0074\u0072\u0075\u0065")); // true + CHECK(json::accept("\u0066\u0061\u006c\u0073\u0065")); // false + CHECK(json::accept("\u006e\u0075\u006c\u006c")); + } + // only lower case literal names true, null and false are allowed json tokens + // any capital letter gives illegal token + SECTION("capitalisation") + { + SECTION("true") + { + CHECK(!json::accept("True")); + CHECK(!json::accept("tRue")); + CHECK(!json::accept("trUe")); + CHECK(!json::accept("truE")); + CHECK(!json::accept("TRue")); + CHECK(!json::accept("TrUe")); + CHECK(!json::accept("TruE")); + CHECK(!json::accept("tRUe")); + CHECK(!json::accept("tRuE")); + CHECK(!json::accept("trUE")); + CHECK(!json::accept("TRUe")); + CHECK(!json::accept("TRuE")); + CHECK(!json::accept("TrUE")); + CHECK(!json::accept("tRUE")); + CHECK(!json::accept("TRUE")); + } + SECTION("null") + { + CHECK(!json::accept("Null")); + CHECK(!json::accept("nUll")); + CHECK(!json::accept("nuLl")); + CHECK(!json::accept("nulL")); + CHECK(!json::accept("NUll")); + CHECK(!json::accept("NuLl")); + CHECK(!json::accept("NulL")); + CHECK(!json::accept("nULl")); + CHECK(!json::accept("nUlL")); + CHECK(!json::accept("nuLL")); + CHECK(!json::accept("NULl")); + CHECK(!json::accept("NUlL")); + CHECK(!json::accept("NuLL")); + CHECK(!json::accept("nULL")); + CHECK(!json::accept("NULL")); + } + SECTION("false") + { + CHECK(!json::accept("False")); + CHECK(!json::accept("fAlse")); + CHECK(!json::accept("FAlse")); + CHECK(!json::accept("faLse")); + CHECK(!json::accept("FaLse")); + CHECK(!json::accept("fALse")); + CHECK(!json::accept("FALse")); + CHECK(!json::accept("falSe")); + CHECK(!json::accept("FalSe")); + CHECK(!json::accept("fAlSe")); + CHECK(!json::accept("FAlSe")); + CHECK(!json::accept("faLSe")); + CHECK(!json::accept("FaLSe")); + CHECK(!json::accept("fALSe")); + CHECK(!json::accept("FALSe")); + CHECK(!json::accept("falsE")); + CHECK(!json::accept("FalsE")); + CHECK(!json::accept("fAlsE")); + CHECK(!json::accept("FAlsE")); + CHECK(!json::accept("faLsE")); + CHECK(!json::accept("FaLsE")); + CHECK(!json::accept("fALsE")); + CHECK(!json::accept("FALsE")); + CHECK(!json::accept("falSE")); + CHECK(!json::accept("FalSE")); + CHECK(!json::accept("fAlSE")); + CHECK(!json::accept("FAlSE")); + CHECK(!json::accept("faLSE")); + CHECK(!json::accept("FaLSE")); + CHECK(!json::accept("fALSE")); + CHECK(!json::accept("FALSE")); + } + } + // There are no other literal names that are valid json tokens. + // Verify that some common literal names are rejected, no matter what letter is capital. + SECTION("illegal literals") + { + SECTION("nil") + { + CHECK(!json::accept("nil")); + CHECK(!json::accept("Nil")); + CHECK(!json::accept("nIl")); + CHECK(!json::accept("NIl")); + CHECK(!json::accept("niL")); + CHECK(!json::accept("NiL")); + CHECK(!json::accept("nIL")); + CHECK(!json::accept("NIL")); + } + SECTION("truth") + { + CHECK(!json::accept("truth")); + CHECK(!json::accept("Truth")); + CHECK(!json::accept("tRuth")); + CHECK(!json::accept("TRuth")); + CHECK(!json::accept("trUth")); + CHECK(!json::accept("TrUth")); + CHECK(!json::accept("tRUth")); + CHECK(!json::accept("TRUth")); + CHECK(!json::accept("truTh")); + CHECK(!json::accept("TruTh")); + CHECK(!json::accept("tRuTh")); + CHECK(!json::accept("TRuTh")); + CHECK(!json::accept("trUTh")); + CHECK(!json::accept("TrUTh")); + CHECK(!json::accept("tRUTh")); + CHECK(!json::accept("TRUTh")); + CHECK(!json::accept("trutH")); + CHECK(!json::accept("TrutH")); + CHECK(!json::accept("tRutH")); + CHECK(!json::accept("TRutH")); + CHECK(!json::accept("trUtH")); + CHECK(!json::accept("TrUtH")); + CHECK(!json::accept("tRUtH")); + CHECK(!json::accept("TRUtH")); + CHECK(!json::accept("truTH")); + CHECK(!json::accept("TruTH")); + CHECK(!json::accept("tRuTH")); + CHECK(!json::accept("TRuTH")); + CHECK(!json::accept("trUTH")); + CHECK(!json::accept("TrUTH")); + CHECK(!json::accept("tRUTH")); + CHECK(!json::accept("TRUTH")); + } + SECTION("const") + { + CHECK(!json::accept("const")); + CHECK(!json::accept("Const")); + CHECK(!json::accept("cOnst")); + CHECK(!json::accept("COnst")); + CHECK(!json::accept("coNst")); + CHECK(!json::accept("CoNst")); + CHECK(!json::accept("cONst")); + CHECK(!json::accept("CONst")); + CHECK(!json::accept("conSt")); + CHECK(!json::accept("ConSt")); + CHECK(!json::accept("cOnSt")); + CHECK(!json::accept("COnSt")); + CHECK(!json::accept("coNSt")); + CHECK(!json::accept("CoNSt")); + CHECK(!json::accept("cONSt")); + CHECK(!json::accept("CONSt")); + CHECK(!json::accept("consT")); + CHECK(!json::accept("ConsT")); + CHECK(!json::accept("cOnsT")); + CHECK(!json::accept("COnsT")); + CHECK(!json::accept("coNsT")); + CHECK(!json::accept("CoNsT")); + CHECK(!json::accept("cONsT")); + CHECK(!json::accept("CONsT")); + CHECK(!json::accept("conST")); + CHECK(!json::accept("ConST")); + CHECK(!json::accept("cOnST")); + CHECK(!json::accept("COnST")); + CHECK(!json::accept("coNST")); + CHECK(!json::accept("CoNST")); + CHECK(!json::accept("cONST")); + CHECK(!json::accept("CONST")); + } + SECTION("none") + { + CHECK(!json::accept("none")); + CHECK(!json::accept("None")); + CHECK(!json::accept("nOne")); + CHECK(!json::accept("NOne")); + CHECK(!json::accept("noNe")); + CHECK(!json::accept("NoNe")); + CHECK(!json::accept("nONe")); + CHECK(!json::accept("NONe")); + CHECK(!json::accept("nonE")); + CHECK(!json::accept("NonE")); + CHECK(!json::accept("nOnE")); + CHECK(!json::accept("NOnE")); + CHECK(!json::accept("noNE")); + CHECK(!json::accept("NoNE")); + CHECK(!json::accept("nONE")); + CHECK(!json::accept("NONE")); + } + SECTION("self") + { + CHECK(!json::accept("self")); + CHECK(!json::accept("Self")); + CHECK(!json::accept("sElf")); + CHECK(!json::accept("SElf")); + CHECK(!json::accept("seLf")); + CHECK(!json::accept("SeLf")); + CHECK(!json::accept("sELf")); + CHECK(!json::accept("SELf")); + CHECK(!json::accept("selF")); + CHECK(!json::accept("SelF")); + CHECK(!json::accept("sElF")); + CHECK(!json::accept("SElF")); + CHECK(!json::accept("seLF")); + CHECK(!json::accept("SeLF")); + CHECK(!json::accept("sELF")); + CHECK(!json::accept("SELF")); + } + SECTION("super") + { + CHECK(!json::accept("super")); + CHECK(!json::accept("Super")); + CHECK(!json::accept("sUper")); + CHECK(!json::accept("SUper")); + CHECK(!json::accept("suPer")); + CHECK(!json::accept("SuPer")); + CHECK(!json::accept("sUPer")); + CHECK(!json::accept("SUPer")); + CHECK(!json::accept("supEr")); + CHECK(!json::accept("SupEr")); + CHECK(!json::accept("sUpEr")); + CHECK(!json::accept("SUpEr")); + CHECK(!json::accept("suPEr")); + CHECK(!json::accept("SuPEr")); + CHECK(!json::accept("sUPEr")); + CHECK(!json::accept("SUPEr")); + CHECK(!json::accept("supeR")); + CHECK(!json::accept("SupeR")); + CHECK(!json::accept("sUpeR")); + CHECK(!json::accept("SUpeR")); + CHECK(!json::accept("suPeR")); + CHECK(!json::accept("SuPeR")); + CHECK(!json::accept("sUPeR")); + CHECK(!json::accept("SUPeR")); + CHECK(!json::accept("supER")); + CHECK(!json::accept("SupER")); + CHECK(!json::accept("sUpER")); + CHECK(!json::accept("SUpER")); + CHECK(!json::accept("suPER")); + CHECK(!json::accept("SuPER")); + CHECK(!json::accept("sUPER")); + CHECK(!json::accept("SUPER")); + } + SECTION("this") + { + CHECK(!json::accept("this")); + CHECK(!json::accept("This")); + CHECK(!json::accept("tHis")); + CHECK(!json::accept("THis")); + CHECK(!json::accept("thIs")); + CHECK(!json::accept("ThIs")); + CHECK(!json::accept("tHIs")); + CHECK(!json::accept("THIs")); + CHECK(!json::accept("thiS")); + CHECK(!json::accept("ThiS")); + CHECK(!json::accept("tHiS")); + CHECK(!json::accept("THiS")); + CHECK(!json::accept("thIS")); + CHECK(!json::accept("ThIS")); + CHECK(!json::accept("tHIS")); + CHECK(!json::accept("THIS")); + } + SECTION("undefined") + { + CHECK(!json::accept("undefined")); + CHECK(!json::accept("Undefined")); + CHECK(!json::accept("uNdefined")); + CHECK(!json::accept("UNdefined")); + CHECK(!json::accept("unDefined")); + CHECK(!json::accept("UnDefined")); + CHECK(!json::accept("uNDefined")); + CHECK(!json::accept("UNDefined")); + CHECK(!json::accept("undEfined")); + CHECK(!json::accept("UndEfined")); + CHECK(!json::accept("uNdEfined")); + CHECK(!json::accept("UNdEfined")); + CHECK(!json::accept("unDEfined")); + CHECK(!json::accept("UnDEfined")); + CHECK(!json::accept("uNDEfined")); + CHECK(!json::accept("UNDEfined")); + CHECK(!json::accept("undeFined")); + CHECK(!json::accept("UndeFined")); + CHECK(!json::accept("uNdeFined")); + CHECK(!json::accept("UNdeFined")); + CHECK(!json::accept("unDeFined")); + CHECK(!json::accept("UnDeFined")); + CHECK(!json::accept("uNDeFined")); + CHECK(!json::accept("UNDeFined")); + CHECK(!json::accept("undEFined")); + CHECK(!json::accept("UndEFined")); + CHECK(!json::accept("uNdEFined")); + CHECK(!json::accept("UNdEFined")); + CHECK(!json::accept("unDEFined")); + CHECK(!json::accept("UnDEFined")); + CHECK(!json::accept("uNDEFined")); + CHECK(!json::accept("UNDEFined")); + CHECK(!json::accept("undefIned")); + CHECK(!json::accept("UndefIned")); + CHECK(!json::accept("uNdefIned")); + CHECK(!json::accept("UNdefIned")); + CHECK(!json::accept("unDefIned")); + CHECK(!json::accept("UnDefIned")); + CHECK(!json::accept("uNDefIned")); + CHECK(!json::accept("UNDefIned")); + CHECK(!json::accept("undEfIned")); + CHECK(!json::accept("UndEfIned")); + CHECK(!json::accept("uNdEfIned")); + CHECK(!json::accept("UNdEfIned")); + CHECK(!json::accept("unDEfIned")); + CHECK(!json::accept("UnDEfIned")); + CHECK(!json::accept("uNDEfIned")); + CHECK(!json::accept("UNDEfIned")); + CHECK(!json::accept("undeFIned")); + CHECK(!json::accept("UndeFIned")); + CHECK(!json::accept("uNdeFIned")); + CHECK(!json::accept("UNdeFIned")); + CHECK(!json::accept("unDeFIned")); + CHECK(!json::accept("UnDeFIned")); + CHECK(!json::accept("uNDeFIned")); + CHECK(!json::accept("UNDeFIned")); + CHECK(!json::accept("undEFIned")); + CHECK(!json::accept("UndEFIned")); + CHECK(!json::accept("uNdEFIned")); + CHECK(!json::accept("UNdEFIned")); + CHECK(!json::accept("unDEFIned")); + CHECK(!json::accept("UnDEFIned")); + CHECK(!json::accept("uNDEFIned")); + CHECK(!json::accept("UNDEFIned")); + CHECK(!json::accept("undefiNed")); + CHECK(!json::accept("UndefiNed")); + CHECK(!json::accept("uNdefiNed")); + CHECK(!json::accept("UNdefiNed")); + CHECK(!json::accept("unDefiNed")); + CHECK(!json::accept("UnDefiNed")); + CHECK(!json::accept("uNDefiNed")); + CHECK(!json::accept("UNDefiNed")); + CHECK(!json::accept("undEfiNed")); + CHECK(!json::accept("UndEfiNed")); + CHECK(!json::accept("uNdEfiNed")); + CHECK(!json::accept("UNdEfiNed")); + CHECK(!json::accept("unDEfiNed")); + CHECK(!json::accept("UnDEfiNed")); + CHECK(!json::accept("uNDEfiNed")); + CHECK(!json::accept("UNDEfiNed")); + CHECK(!json::accept("undeFiNed")); + CHECK(!json::accept("UndeFiNed")); + CHECK(!json::accept("uNdeFiNed")); + CHECK(!json::accept("UNdeFiNed")); + CHECK(!json::accept("unDeFiNed")); + CHECK(!json::accept("UnDeFiNed")); + CHECK(!json::accept("uNDeFiNed")); + CHECK(!json::accept("UNDeFiNed")); + CHECK(!json::accept("undEFiNed")); + CHECK(!json::accept("UndEFiNed")); + CHECK(!json::accept("uNdEFiNed")); + CHECK(!json::accept("UNdEFiNed")); + CHECK(!json::accept("unDEFiNed")); + CHECK(!json::accept("UnDEFiNed")); + CHECK(!json::accept("uNDEFiNed")); + CHECK(!json::accept("UNDEFiNed")); + CHECK(!json::accept("undefINed")); + CHECK(!json::accept("UndefINed")); + CHECK(!json::accept("uNdefINed")); + CHECK(!json::accept("UNdefINed")); + CHECK(!json::accept("unDefINed")); + CHECK(!json::accept("UnDefINed")); + CHECK(!json::accept("uNDefINed")); + CHECK(!json::accept("UNDefINed")); + CHECK(!json::accept("undEfINed")); + CHECK(!json::accept("UndEfINed")); + CHECK(!json::accept("uNdEfINed")); + CHECK(!json::accept("UNdEfINed")); + CHECK(!json::accept("unDEfINed")); + CHECK(!json::accept("UnDEfINed")); + CHECK(!json::accept("uNDEfINed")); + CHECK(!json::accept("UNDEfINed")); + CHECK(!json::accept("undeFINed")); + CHECK(!json::accept("UndeFINed")); + CHECK(!json::accept("uNdeFINed")); + CHECK(!json::accept("UNdeFINed")); + CHECK(!json::accept("unDeFINed")); + CHECK(!json::accept("UnDeFINed")); + CHECK(!json::accept("uNDeFINed")); + CHECK(!json::accept("UNDeFINed")); + CHECK(!json::accept("undEFINed")); + CHECK(!json::accept("UndEFINed")); + CHECK(!json::accept("uNdEFINed")); + CHECK(!json::accept("UNdEFINed")); + CHECK(!json::accept("unDEFINed")); + CHECK(!json::accept("UnDEFINed")); + CHECK(!json::accept("uNDEFINed")); + CHECK(!json::accept("UNDEFINed")); + CHECK(!json::accept("undefinEd")); + CHECK(!json::accept("UndefinEd")); + CHECK(!json::accept("uNdefinEd")); + CHECK(!json::accept("UNdefinEd")); + CHECK(!json::accept("unDefinEd")); + CHECK(!json::accept("UnDefinEd")); + CHECK(!json::accept("uNDefinEd")); + CHECK(!json::accept("UNDefinEd")); + CHECK(!json::accept("undEfinEd")); + CHECK(!json::accept("UndEfinEd")); + CHECK(!json::accept("uNdEfinEd")); + CHECK(!json::accept("UNdEfinEd")); + CHECK(!json::accept("unDEfinEd")); + CHECK(!json::accept("UnDEfinEd")); + CHECK(!json::accept("uNDEfinEd")); + CHECK(!json::accept("UNDEfinEd")); + CHECK(!json::accept("undeFinEd")); + CHECK(!json::accept("UndeFinEd")); + CHECK(!json::accept("uNdeFinEd")); + CHECK(!json::accept("UNdeFinEd")); + CHECK(!json::accept("unDeFinEd")); + CHECK(!json::accept("UnDeFinEd")); + CHECK(!json::accept("uNDeFinEd")); + CHECK(!json::accept("UNDeFinEd")); + CHECK(!json::accept("undEFinEd")); + CHECK(!json::accept("UndEFinEd")); + CHECK(!json::accept("uNdEFinEd")); + CHECK(!json::accept("UNdEFinEd")); + CHECK(!json::accept("unDEFinEd")); + CHECK(!json::accept("UnDEFinEd")); + CHECK(!json::accept("uNDEFinEd")); + CHECK(!json::accept("UNDEFinEd")); + CHECK(!json::accept("undefInEd")); + CHECK(!json::accept("UndefInEd")); + CHECK(!json::accept("uNdefInEd")); + CHECK(!json::accept("UNdefInEd")); + CHECK(!json::accept("unDefInEd")); + CHECK(!json::accept("UnDefInEd")); + CHECK(!json::accept("uNDefInEd")); + CHECK(!json::accept("UNDefInEd")); + CHECK(!json::accept("undEfInEd")); + CHECK(!json::accept("UndEfInEd")); + CHECK(!json::accept("uNdEfInEd")); + CHECK(!json::accept("UNdEfInEd")); + CHECK(!json::accept("unDEfInEd")); + CHECK(!json::accept("UnDEfInEd")); + CHECK(!json::accept("uNDEfInEd")); + CHECK(!json::accept("UNDEfInEd")); + CHECK(!json::accept("undeFInEd")); + CHECK(!json::accept("UndeFInEd")); + CHECK(!json::accept("uNdeFInEd")); + CHECK(!json::accept("UNdeFInEd")); + CHECK(!json::accept("unDeFInEd")); + CHECK(!json::accept("UnDeFInEd")); + CHECK(!json::accept("uNDeFInEd")); + CHECK(!json::accept("UNDeFInEd")); + CHECK(!json::accept("undEFInEd")); + CHECK(!json::accept("UndEFInEd")); + CHECK(!json::accept("uNdEFInEd")); + CHECK(!json::accept("UNdEFInEd")); + CHECK(!json::accept("unDEFInEd")); + CHECK(!json::accept("UnDEFInEd")); + CHECK(!json::accept("uNDEFInEd")); + CHECK(!json::accept("UNDEFInEd")); + CHECK(!json::accept("undefiNEd")); + CHECK(!json::accept("UndefiNEd")); + CHECK(!json::accept("uNdefiNEd")); + CHECK(!json::accept("UNdefiNEd")); + CHECK(!json::accept("unDefiNEd")); + CHECK(!json::accept("UnDefiNEd")); + CHECK(!json::accept("uNDefiNEd")); + CHECK(!json::accept("UNDefiNEd")); + CHECK(!json::accept("undEfiNEd")); + CHECK(!json::accept("UndEfiNEd")); + CHECK(!json::accept("uNdEfiNEd")); + CHECK(!json::accept("UNdEfiNEd")); + CHECK(!json::accept("unDEfiNEd")); + CHECK(!json::accept("UnDEfiNEd")); + CHECK(!json::accept("uNDEfiNEd")); + CHECK(!json::accept("UNDEfiNEd")); + CHECK(!json::accept("undeFiNEd")); + CHECK(!json::accept("UndeFiNEd")); + CHECK(!json::accept("uNdeFiNEd")); + CHECK(!json::accept("UNdeFiNEd")); + CHECK(!json::accept("unDeFiNEd")); + CHECK(!json::accept("UnDeFiNEd")); + CHECK(!json::accept("uNDeFiNEd")); + CHECK(!json::accept("UNDeFiNEd")); + CHECK(!json::accept("undEFiNEd")); + CHECK(!json::accept("UndEFiNEd")); + CHECK(!json::accept("uNdEFiNEd")); + CHECK(!json::accept("UNdEFiNEd")); + CHECK(!json::accept("unDEFiNEd")); + CHECK(!json::accept("UnDEFiNEd")); + CHECK(!json::accept("uNDEFiNEd")); + CHECK(!json::accept("UNDEFiNEd")); + CHECK(!json::accept("undefINEd")); + CHECK(!json::accept("UndefINEd")); + CHECK(!json::accept("uNdefINEd")); + CHECK(!json::accept("UNdefINEd")); + CHECK(!json::accept("unDefINEd")); + CHECK(!json::accept("UnDefINEd")); + CHECK(!json::accept("uNDefINEd")); + CHECK(!json::accept("UNDefINEd")); + CHECK(!json::accept("undEfINEd")); + CHECK(!json::accept("UndEfINEd")); + CHECK(!json::accept("uNdEfINEd")); + CHECK(!json::accept("UNdEfINEd")); + CHECK(!json::accept("unDEfINEd")); + CHECK(!json::accept("UnDEfINEd")); + CHECK(!json::accept("uNDEfINEd")); + CHECK(!json::accept("UNDEfINEd")); + CHECK(!json::accept("undeFINEd")); + CHECK(!json::accept("UndeFINEd")); + CHECK(!json::accept("uNdeFINEd")); + CHECK(!json::accept("UNdeFINEd")); + CHECK(!json::accept("unDeFINEd")); + CHECK(!json::accept("UnDeFINEd")); + CHECK(!json::accept("uNDeFINEd")); + CHECK(!json::accept("UNDeFINEd")); + CHECK(!json::accept("undEFINEd")); + CHECK(!json::accept("UndEFINEd")); + CHECK(!json::accept("uNdEFINEd")); + CHECK(!json::accept("UNdEFINEd")); + CHECK(!json::accept("unDEFINEd")); + CHECK(!json::accept("UnDEFINEd")); + CHECK(!json::accept("uNDEFINEd")); + CHECK(!json::accept("UNDEFINEd")); + CHECK(!json::accept("undefineD")); + CHECK(!json::accept("UndefineD")); + CHECK(!json::accept("uNdefineD")); + CHECK(!json::accept("UNdefineD")); + CHECK(!json::accept("unDefineD")); + CHECK(!json::accept("UnDefineD")); + CHECK(!json::accept("uNDefineD")); + CHECK(!json::accept("UNDefineD")); + CHECK(!json::accept("undEfineD")); + CHECK(!json::accept("UndEfineD")); + CHECK(!json::accept("uNdEfineD")); + CHECK(!json::accept("UNdEfineD")); + CHECK(!json::accept("unDEfineD")); + CHECK(!json::accept("UnDEfineD")); + CHECK(!json::accept("uNDEfineD")); + CHECK(!json::accept("UNDEfineD")); + CHECK(!json::accept("undeFineD")); + CHECK(!json::accept("UndeFineD")); + CHECK(!json::accept("uNdeFineD")); + CHECK(!json::accept("UNdeFineD")); + CHECK(!json::accept("unDeFineD")); + CHECK(!json::accept("UnDeFineD")); + CHECK(!json::accept("uNDeFineD")); + CHECK(!json::accept("UNDeFineD")); + CHECK(!json::accept("undEFineD")); + CHECK(!json::accept("UndEFineD")); + CHECK(!json::accept("uNdEFineD")); + CHECK(!json::accept("UNdEFineD")); + CHECK(!json::accept("unDEFineD")); + CHECK(!json::accept("UnDEFineD")); + CHECK(!json::accept("uNDEFineD")); + CHECK(!json::accept("UNDEFineD")); + CHECK(!json::accept("undefIneD")); + CHECK(!json::accept("UndefIneD")); + CHECK(!json::accept("uNdefIneD")); + CHECK(!json::accept("UNdefIneD")); + CHECK(!json::accept("unDefIneD")); + CHECK(!json::accept("UnDefIneD")); + CHECK(!json::accept("uNDefIneD")); + CHECK(!json::accept("UNDefIneD")); + CHECK(!json::accept("undEfIneD")); + CHECK(!json::accept("UndEfIneD")); + CHECK(!json::accept("uNdEfIneD")); + CHECK(!json::accept("UNdEfIneD")); + CHECK(!json::accept("unDEfIneD")); + CHECK(!json::accept("UnDEfIneD")); + CHECK(!json::accept("uNDEfIneD")); + CHECK(!json::accept("UNDEfIneD")); + CHECK(!json::accept("undeFIneD")); + CHECK(!json::accept("UndeFIneD")); + CHECK(!json::accept("uNdeFIneD")); + CHECK(!json::accept("UNdeFIneD")); + CHECK(!json::accept("unDeFIneD")); + CHECK(!json::accept("UnDeFIneD")); + CHECK(!json::accept("uNDeFIneD")); + CHECK(!json::accept("UNDeFIneD")); + CHECK(!json::accept("undEFIneD")); + CHECK(!json::accept("UndEFIneD")); + CHECK(!json::accept("uNdEFIneD")); + CHECK(!json::accept("UNdEFIneD")); + CHECK(!json::accept("unDEFIneD")); + CHECK(!json::accept("UnDEFIneD")); + CHECK(!json::accept("uNDEFIneD")); + CHECK(!json::accept("UNDEFIneD")); + CHECK(!json::accept("undefiNeD")); + CHECK(!json::accept("UndefiNeD")); + CHECK(!json::accept("uNdefiNeD")); + CHECK(!json::accept("UNdefiNeD")); + CHECK(!json::accept("unDefiNeD")); + CHECK(!json::accept("UnDefiNeD")); + CHECK(!json::accept("uNDefiNeD")); + CHECK(!json::accept("UNDefiNeD")); + CHECK(!json::accept("undEfiNeD")); + CHECK(!json::accept("UndEfiNeD")); + CHECK(!json::accept("uNdEfiNeD")); + CHECK(!json::accept("UNdEfiNeD")); + CHECK(!json::accept("unDEfiNeD")); + CHECK(!json::accept("UnDEfiNeD")); + CHECK(!json::accept("uNDEfiNeD")); + CHECK(!json::accept("UNDEfiNeD")); + CHECK(!json::accept("undeFiNeD")); + CHECK(!json::accept("UndeFiNeD")); + CHECK(!json::accept("uNdeFiNeD")); + CHECK(!json::accept("UNdeFiNeD")); + CHECK(!json::accept("unDeFiNeD")); + CHECK(!json::accept("UnDeFiNeD")); + CHECK(!json::accept("uNDeFiNeD")); + CHECK(!json::accept("UNDeFiNeD")); + CHECK(!json::accept("undEFiNeD")); + CHECK(!json::accept("UndEFiNeD")); + CHECK(!json::accept("uNdEFiNeD")); + CHECK(!json::accept("UNdEFiNeD")); + CHECK(!json::accept("unDEFiNeD")); + CHECK(!json::accept("UnDEFiNeD")); + CHECK(!json::accept("uNDEFiNeD")); + CHECK(!json::accept("UNDEFiNeD")); + CHECK(!json::accept("undefINeD")); + CHECK(!json::accept("UndefINeD")); + CHECK(!json::accept("uNdefINeD")); + CHECK(!json::accept("UNdefINeD")); + CHECK(!json::accept("unDefINeD")); + CHECK(!json::accept("UnDefINeD")); + CHECK(!json::accept("uNDefINeD")); + CHECK(!json::accept("UNDefINeD")); + CHECK(!json::accept("undEfINeD")); + CHECK(!json::accept("UndEfINeD")); + CHECK(!json::accept("uNdEfINeD")); + CHECK(!json::accept("UNdEfINeD")); + CHECK(!json::accept("unDEfINeD")); + CHECK(!json::accept("UnDEfINeD")); + CHECK(!json::accept("uNDEfINeD")); + CHECK(!json::accept("UNDEfINeD")); + CHECK(!json::accept("undeFINeD")); + CHECK(!json::accept("UndeFINeD")); + CHECK(!json::accept("uNdeFINeD")); + CHECK(!json::accept("UNdeFINeD")); + CHECK(!json::accept("unDeFINeD")); + CHECK(!json::accept("UnDeFINeD")); + CHECK(!json::accept("uNDeFINeD")); + CHECK(!json::accept("UNDeFINeD")); + CHECK(!json::accept("undEFINeD")); + CHECK(!json::accept("UndEFINeD")); + CHECK(!json::accept("uNdEFINeD")); + CHECK(!json::accept("UNdEFINeD")); + CHECK(!json::accept("unDEFINeD")); + CHECK(!json::accept("UnDEFINeD")); + CHECK(!json::accept("uNDEFINeD")); + CHECK(!json::accept("UNDEFINeD")); + CHECK(!json::accept("undefinED")); + CHECK(!json::accept("UndefinED")); + CHECK(!json::accept("uNdefinED")); + CHECK(!json::accept("UNdefinED")); + CHECK(!json::accept("unDefinED")); + CHECK(!json::accept("UnDefinED")); + CHECK(!json::accept("uNDefinED")); + CHECK(!json::accept("UNDefinED")); + CHECK(!json::accept("undEfinED")); + CHECK(!json::accept("UndEfinED")); + CHECK(!json::accept("uNdEfinED")); + CHECK(!json::accept("UNdEfinED")); + CHECK(!json::accept("unDEfinED")); + CHECK(!json::accept("UnDEfinED")); + CHECK(!json::accept("uNDEfinED")); + CHECK(!json::accept("UNDEfinED")); + CHECK(!json::accept("undeFinED")); + CHECK(!json::accept("UndeFinED")); + CHECK(!json::accept("uNdeFinED")); + CHECK(!json::accept("UNdeFinED")); + CHECK(!json::accept("unDeFinED")); + CHECK(!json::accept("UnDeFinED")); + CHECK(!json::accept("uNDeFinED")); + CHECK(!json::accept("UNDeFinED")); + CHECK(!json::accept("undEFinED")); + CHECK(!json::accept("UndEFinED")); + CHECK(!json::accept("uNdEFinED")); + CHECK(!json::accept("UNdEFinED")); + CHECK(!json::accept("unDEFinED")); + CHECK(!json::accept("UnDEFinED")); + CHECK(!json::accept("uNDEFinED")); + CHECK(!json::accept("UNDEFinED")); + CHECK(!json::accept("undefInED")); + CHECK(!json::accept("UndefInED")); + CHECK(!json::accept("uNdefInED")); + CHECK(!json::accept("UNdefInED")); + CHECK(!json::accept("unDefInED")); + CHECK(!json::accept("UnDefInED")); + CHECK(!json::accept("uNDefInED")); + CHECK(!json::accept("UNDefInED")); + CHECK(!json::accept("undEfInED")); + CHECK(!json::accept("UndEfInED")); + CHECK(!json::accept("uNdEfInED")); + CHECK(!json::accept("UNdEfInED")); + CHECK(!json::accept("unDEfInED")); + CHECK(!json::accept("UnDEfInED")); + CHECK(!json::accept("uNDEfInED")); + CHECK(!json::accept("UNDEfInED")); + CHECK(!json::accept("undeFInED")); + CHECK(!json::accept("UndeFInED")); + CHECK(!json::accept("uNdeFInED")); + CHECK(!json::accept("UNdeFInED")); + CHECK(!json::accept("unDeFInED")); + CHECK(!json::accept("UnDeFInED")); + CHECK(!json::accept("uNDeFInED")); + CHECK(!json::accept("UNDeFInED")); + CHECK(!json::accept("undEFInED")); + CHECK(!json::accept("UndEFInED")); + CHECK(!json::accept("uNdEFInED")); + CHECK(!json::accept("UNdEFInED")); + CHECK(!json::accept("unDEFInED")); + CHECK(!json::accept("UnDEFInED")); + CHECK(!json::accept("uNDEFInED")); + CHECK(!json::accept("UNDEFInED")); + CHECK(!json::accept("undefiNED")); + CHECK(!json::accept("UndefiNED")); + CHECK(!json::accept("uNdefiNED")); + CHECK(!json::accept("UNdefiNED")); + CHECK(!json::accept("unDefiNED")); + CHECK(!json::accept("UnDefiNED")); + CHECK(!json::accept("uNDefiNED")); + CHECK(!json::accept("UNDefiNED")); + CHECK(!json::accept("undEfiNED")); + CHECK(!json::accept("UndEfiNED")); + CHECK(!json::accept("uNdEfiNED")); + CHECK(!json::accept("UNdEfiNED")); + CHECK(!json::accept("unDEfiNED")); + CHECK(!json::accept("UnDEfiNED")); + CHECK(!json::accept("uNDEfiNED")); + CHECK(!json::accept("UNDEfiNED")); + CHECK(!json::accept("undeFiNED")); + CHECK(!json::accept("UndeFiNED")); + CHECK(!json::accept("uNdeFiNED")); + CHECK(!json::accept("UNdeFiNED")); + CHECK(!json::accept("unDeFiNED")); + CHECK(!json::accept("UnDeFiNED")); + CHECK(!json::accept("uNDeFiNED")); + CHECK(!json::accept("UNDeFiNED")); + CHECK(!json::accept("undEFiNED")); + CHECK(!json::accept("UndEFiNED")); + CHECK(!json::accept("uNdEFiNED")); + CHECK(!json::accept("UNdEFiNED")); + CHECK(!json::accept("unDEFiNED")); + CHECK(!json::accept("UnDEFiNED")); + CHECK(!json::accept("uNDEFiNED")); + CHECK(!json::accept("UNDEFiNED")); + CHECK(!json::accept("undefINED")); + CHECK(!json::accept("UndefINED")); + CHECK(!json::accept("uNdefINED")); + CHECK(!json::accept("UNdefINED")); + CHECK(!json::accept("unDefINED")); + CHECK(!json::accept("UnDefINED")); + CHECK(!json::accept("uNDefINED")); + CHECK(!json::accept("UNDefINED")); + CHECK(!json::accept("undEfINED")); + CHECK(!json::accept("UndEfINED")); + CHECK(!json::accept("uNdEfINED")); + CHECK(!json::accept("UNdEfINED")); + CHECK(!json::accept("unDEfINED")); + CHECK(!json::accept("UnDEfINED")); + CHECK(!json::accept("uNDEfINED")); + CHECK(!json::accept("UNDEfINED")); + CHECK(!json::accept("undeFINED")); + CHECK(!json::accept("UndeFINED")); + CHECK(!json::accept("uNdeFINED")); + CHECK(!json::accept("UNdeFINED")); + CHECK(!json::accept("unDeFINED")); + CHECK(!json::accept("UnDeFINED")); + CHECK(!json::accept("uNDeFINED")); + CHECK(!json::accept("UNDeFINED")); + CHECK(!json::accept("undEFINED")); + CHECK(!json::accept("UndEFINED")); + CHECK(!json::accept("uNdEFINED")); + CHECK(!json::accept("UNdEFINED")); + CHECK(!json::accept("unDEFINED")); + CHECK(!json::accept("UnDEFINED")); + CHECK(!json::accept("uNDEFINED")); + CHECK(!json::accept("UNDEFINED")); + } + } + // The literal names NaN and any expression for infinity are not valid json tokens. + // Verify that they are rejected, no matter the capitalisation. + SECTION("illegal literal numbers") + { + SECTION("inf") + { + CHECK(!json::accept("inf")); + CHECK(!json::accept("Inf")); + CHECK(!json::accept("iNf")); + CHECK(!json::accept("INf")); + CHECK(!json::accept("inF")); + CHECK(!json::accept("InF")); + CHECK(!json::accept("iNF")); + CHECK(!json::accept("INF")); + } + SECTION("infinity") + { + CHECK(!json::accept("infinity")); + CHECK(!json::accept("Infinity")); + CHECK(!json::accept("iNfinity")); + CHECK(!json::accept("INfinity")); + CHECK(!json::accept("inFinity")); + CHECK(!json::accept("InFinity")); + CHECK(!json::accept("iNFinity")); + CHECK(!json::accept("INFinity")); + CHECK(!json::accept("infInity")); + CHECK(!json::accept("InfInity")); + CHECK(!json::accept("iNfInity")); + CHECK(!json::accept("INfInity")); + CHECK(!json::accept("inFInity")); + CHECK(!json::accept("InFInity")); + CHECK(!json::accept("iNFInity")); + CHECK(!json::accept("INFInity")); + CHECK(!json::accept("infiNity")); + CHECK(!json::accept("InfiNity")); + CHECK(!json::accept("iNfiNity")); + CHECK(!json::accept("INfiNity")); + CHECK(!json::accept("inFiNity")); + CHECK(!json::accept("InFiNity")); + CHECK(!json::accept("iNFiNity")); + CHECK(!json::accept("INFiNity")); + CHECK(!json::accept("infINity")); + CHECK(!json::accept("InfINity")); + CHECK(!json::accept("iNfINity")); + CHECK(!json::accept("INfINity")); + CHECK(!json::accept("inFINity")); + CHECK(!json::accept("InFINity")); + CHECK(!json::accept("iNFINity")); + CHECK(!json::accept("INFINity")); + CHECK(!json::accept("infinIty")); + CHECK(!json::accept("InfinIty")); + CHECK(!json::accept("iNfinIty")); + CHECK(!json::accept("INfinIty")); + CHECK(!json::accept("inFinIty")); + CHECK(!json::accept("InFinIty")); + CHECK(!json::accept("iNFinIty")); + CHECK(!json::accept("INFinIty")); + CHECK(!json::accept("infInIty")); + CHECK(!json::accept("InfInIty")); + CHECK(!json::accept("iNfInIty")); + CHECK(!json::accept("INfInIty")); + CHECK(!json::accept("inFInIty")); + CHECK(!json::accept("InFInIty")); + CHECK(!json::accept("iNFInIty")); + CHECK(!json::accept("INFInIty")); + CHECK(!json::accept("infiNIty")); + CHECK(!json::accept("InfiNIty")); + CHECK(!json::accept("iNfiNIty")); + CHECK(!json::accept("INfiNIty")); + CHECK(!json::accept("inFiNIty")); + CHECK(!json::accept("InFiNIty")); + CHECK(!json::accept("iNFiNIty")); + CHECK(!json::accept("INFiNIty")); + CHECK(!json::accept("infINIty")); + CHECK(!json::accept("InfINIty")); + CHECK(!json::accept("iNfINIty")); + CHECK(!json::accept("INfINIty")); + CHECK(!json::accept("inFINIty")); + CHECK(!json::accept("InFINIty")); + CHECK(!json::accept("iNFINIty")); + CHECK(!json::accept("INFINIty")); + CHECK(!json::accept("infiniTy")); + CHECK(!json::accept("InfiniTy")); + CHECK(!json::accept("iNfiniTy")); + CHECK(!json::accept("INfiniTy")); + CHECK(!json::accept("inFiniTy")); + CHECK(!json::accept("InFiniTy")); + CHECK(!json::accept("iNFiniTy")); + CHECK(!json::accept("INFiniTy")); + CHECK(!json::accept("infIniTy")); + CHECK(!json::accept("InfIniTy")); + CHECK(!json::accept("iNfIniTy")); + CHECK(!json::accept("INfIniTy")); + CHECK(!json::accept("inFIniTy")); + CHECK(!json::accept("InFIniTy")); + CHECK(!json::accept("iNFIniTy")); + CHECK(!json::accept("INFIniTy")); + CHECK(!json::accept("infiNiTy")); + CHECK(!json::accept("InfiNiTy")); + CHECK(!json::accept("iNfiNiTy")); + CHECK(!json::accept("INfiNiTy")); + CHECK(!json::accept("inFiNiTy")); + CHECK(!json::accept("InFiNiTy")); + CHECK(!json::accept("iNFiNiTy")); + CHECK(!json::accept("INFiNiTy")); + CHECK(!json::accept("infINiTy")); + CHECK(!json::accept("InfINiTy")); + CHECK(!json::accept("iNfINiTy")); + CHECK(!json::accept("INfINiTy")); + CHECK(!json::accept("inFINiTy")); + CHECK(!json::accept("InFINiTy")); + CHECK(!json::accept("iNFINiTy")); + CHECK(!json::accept("INFINiTy")); + CHECK(!json::accept("infinITy")); + CHECK(!json::accept("InfinITy")); + CHECK(!json::accept("iNfinITy")); + CHECK(!json::accept("INfinITy")); + CHECK(!json::accept("inFinITy")); + CHECK(!json::accept("InFinITy")); + CHECK(!json::accept("iNFinITy")); + CHECK(!json::accept("INFinITy")); + CHECK(!json::accept("infInITy")); + CHECK(!json::accept("InfInITy")); + CHECK(!json::accept("iNfInITy")); + CHECK(!json::accept("INfInITy")); + CHECK(!json::accept("inFInITy")); + CHECK(!json::accept("InFInITy")); + CHECK(!json::accept("iNFInITy")); + CHECK(!json::accept("INFInITy")); + CHECK(!json::accept("infiNITy")); + CHECK(!json::accept("InfiNITy")); + CHECK(!json::accept("iNfiNITy")); + CHECK(!json::accept("INfiNITy")); + CHECK(!json::accept("inFiNITy")); + CHECK(!json::accept("InFiNITy")); + CHECK(!json::accept("iNFiNITy")); + CHECK(!json::accept("INFiNITy")); + CHECK(!json::accept("infINITy")); + CHECK(!json::accept("InfINITy")); + CHECK(!json::accept("iNfINITy")); + CHECK(!json::accept("INfINITy")); + CHECK(!json::accept("inFINITy")); + CHECK(!json::accept("InFINITy")); + CHECK(!json::accept("iNFINITy")); + CHECK(!json::accept("INFINITy")); + CHECK(!json::accept("infinitY")); + CHECK(!json::accept("InfinitY")); + CHECK(!json::accept("iNfinitY")); + CHECK(!json::accept("INfinitY")); + CHECK(!json::accept("inFinitY")); + CHECK(!json::accept("InFinitY")); + CHECK(!json::accept("iNFinitY")); + CHECK(!json::accept("INFinitY")); + CHECK(!json::accept("infInitY")); + CHECK(!json::accept("InfInitY")); + CHECK(!json::accept("iNfInitY")); + CHECK(!json::accept("INfInitY")); + CHECK(!json::accept("inFInitY")); + CHECK(!json::accept("InFInitY")); + CHECK(!json::accept("iNFInitY")); + CHECK(!json::accept("INFInitY")); + CHECK(!json::accept("infiNitY")); + CHECK(!json::accept("InfiNitY")); + CHECK(!json::accept("iNfiNitY")); + CHECK(!json::accept("INfiNitY")); + CHECK(!json::accept("inFiNitY")); + CHECK(!json::accept("InFiNitY")); + CHECK(!json::accept("iNFiNitY")); + CHECK(!json::accept("INFiNitY")); + CHECK(!json::accept("infINitY")); + CHECK(!json::accept("InfINitY")); + CHECK(!json::accept("iNfINitY")); + CHECK(!json::accept("INfINitY")); + CHECK(!json::accept("inFINitY")); + CHECK(!json::accept("InFINitY")); + CHECK(!json::accept("iNFINitY")); + CHECK(!json::accept("INFINitY")); + CHECK(!json::accept("infinItY")); + CHECK(!json::accept("InfinItY")); + CHECK(!json::accept("iNfinItY")); + CHECK(!json::accept("INfinItY")); + CHECK(!json::accept("inFinItY")); + CHECK(!json::accept("InFinItY")); + CHECK(!json::accept("iNFinItY")); + CHECK(!json::accept("INFinItY")); + CHECK(!json::accept("infInItY")); + CHECK(!json::accept("InfInItY")); + CHECK(!json::accept("iNfInItY")); + CHECK(!json::accept("INfInItY")); + CHECK(!json::accept("inFInItY")); + CHECK(!json::accept("InFInItY")); + CHECK(!json::accept("iNFInItY")); + CHECK(!json::accept("INFInItY")); + CHECK(!json::accept("infiNItY")); + CHECK(!json::accept("InfiNItY")); + CHECK(!json::accept("iNfiNItY")); + CHECK(!json::accept("INfiNItY")); + CHECK(!json::accept("inFiNItY")); + CHECK(!json::accept("InFiNItY")); + CHECK(!json::accept("iNFiNItY")); + CHECK(!json::accept("INFiNItY")); + CHECK(!json::accept("infINItY")); + CHECK(!json::accept("InfINItY")); + CHECK(!json::accept("iNfINItY")); + CHECK(!json::accept("INfINItY")); + CHECK(!json::accept("inFINItY")); + CHECK(!json::accept("InFINItY")); + CHECK(!json::accept("iNFINItY")); + CHECK(!json::accept("INFINItY")); + CHECK(!json::accept("infiniTY")); + CHECK(!json::accept("InfiniTY")); + CHECK(!json::accept("iNfiniTY")); + CHECK(!json::accept("INfiniTY")); + CHECK(!json::accept("inFiniTY")); + CHECK(!json::accept("InFiniTY")); + CHECK(!json::accept("iNFiniTY")); + CHECK(!json::accept("INFiniTY")); + CHECK(!json::accept("infIniTY")); + CHECK(!json::accept("InfIniTY")); + CHECK(!json::accept("iNfIniTY")); + CHECK(!json::accept("INfIniTY")); + CHECK(!json::accept("inFIniTY")); + CHECK(!json::accept("InFIniTY")); + CHECK(!json::accept("iNFIniTY")); + CHECK(!json::accept("INFIniTY")); + CHECK(!json::accept("infiNiTY")); + CHECK(!json::accept("InfiNiTY")); + CHECK(!json::accept("iNfiNiTY")); + CHECK(!json::accept("INfiNiTY")); + CHECK(!json::accept("inFiNiTY")); + CHECK(!json::accept("InFiNiTY")); + CHECK(!json::accept("iNFiNiTY")); + CHECK(!json::accept("INFiNiTY")); + CHECK(!json::accept("infINiTY")); + CHECK(!json::accept("InfINiTY")); + CHECK(!json::accept("iNfINiTY")); + CHECK(!json::accept("INfINiTY")); + CHECK(!json::accept("inFINiTY")); + CHECK(!json::accept("InFINiTY")); + CHECK(!json::accept("iNFINiTY")); + CHECK(!json::accept("INFINiTY")); + CHECK(!json::accept("infinITY")); + CHECK(!json::accept("InfinITY")); + CHECK(!json::accept("iNfinITY")); + CHECK(!json::accept("INfinITY")); + CHECK(!json::accept("inFinITY")); + CHECK(!json::accept("InFinITY")); + CHECK(!json::accept("iNFinITY")); + CHECK(!json::accept("INFinITY")); + CHECK(!json::accept("infInITY")); + CHECK(!json::accept("InfInITY")); + CHECK(!json::accept("iNfInITY")); + CHECK(!json::accept("INfInITY")); + CHECK(!json::accept("inFInITY")); + CHECK(!json::accept("InFInITY")); + CHECK(!json::accept("iNFInITY")); + CHECK(!json::accept("INFInITY")); + CHECK(!json::accept("infiNITY")); + CHECK(!json::accept("InfiNITY")); + CHECK(!json::accept("iNfiNITY")); + CHECK(!json::accept("INfiNITY")); + CHECK(!json::accept("inFiNITY")); + CHECK(!json::accept("InFiNITY")); + CHECK(!json::accept("iNFiNITY")); + CHECK(!json::accept("INFiNITY")); + CHECK(!json::accept("infINITY")); + CHECK(!json::accept("InfINITY")); + CHECK(!json::accept("iNfINITY")); + CHECK(!json::accept("INfINITY")); + CHECK(!json::accept("inFINITY")); + CHECK(!json::accept("InFINITY")); + CHECK(!json::accept("iNFINITY")); + CHECK(!json::accept("INFINITY")); + } + SECTION("NaN") + { + CHECK(!json::accept("nan")); + CHECK(!json::accept("Nan")); + CHECK(!json::accept("nAn")); + CHECK(!json::accept("NAn")); + CHECK(!json::accept("naN")); + CHECK(!json::accept("NaN")); + CHECK(!json::accept("nAN")); + CHECK(!json::accept("NAN")); + } + } +} + +TEST_CASE("parse") +{ + SECTION("values") + { + CHECK(json::parse("null")==nullptr); + CHECK(json::parse("true")==true); + CHECK(json::parse("false")==false); + } + SECTION("whitespace") + { + CHECK(json::parse(" false ")==json::parse("false")); + CHECK(json::parse(" false\t")==json::parse("false")); + CHECK(json::parse(" false\n")==json::parse("false")); + CHECK(json::parse(" false\u000d")==json::parse("false")); + CHECK(json::parse("\tfalse ")==json::parse("false")); + CHECK(json::parse("\tfalse\t")==json::parse("false")); + CHECK(json::parse("\tfalse\n")==json::parse("false")); + CHECK(json::parse("\tfalse\u000d")==json::parse("false")); + CHECK(json::parse("\nfalse ")==json::parse("false")); + CHECK(json::parse("\nfalse\t")==json::parse("false")); + CHECK(json::parse("\nfalse\n")==json::parse("false")); + CHECK(json::parse("\nfalse\u000d")==json::parse("false")); + CHECK(json::parse("\u000dfalse ")==json::parse("false")); + CHECK(json::parse("\u000dfalse\t")==json::parse("false")); + CHECK(json::parse("\u000dfalse\n")==json::parse("false")); + CHECK(json::parse("\u000dfalse\u000d")==json::parse("false")); + CHECK(json::parse(" null ")==json::parse("null")); + CHECK(json::parse(" null\t")==json::parse("null")); + CHECK(json::parse(" null\n")==json::parse("null")); + CHECK(json::parse(" null\u000d")==json::parse("null")); + CHECK(json::parse("\tnull ")==json::parse("null")); + CHECK(json::parse("\tnull\t")==json::parse("null")); + CHECK(json::parse("\tnull\n")==json::parse("null")); + CHECK(json::parse("\tnull\u000d")==json::parse("null")); + CHECK(json::parse("\nnull ")==json::parse("null")); + CHECK(json::parse("\nnull\t")==json::parse("null")); + CHECK(json::parse("\nnull\n")==json::parse("null")); + CHECK(json::parse("\nnull\u000d")==json::parse("null")); + CHECK(json::parse("\u000dnull ")==json::parse("null")); + CHECK(json::parse("\u000dnull\t")==json::parse("null")); + CHECK(json::parse("\u000dnull\n")==json::parse("null")); + CHECK(json::parse("\u000dnull\u000d")==json::parse("null")); + CHECK(json::parse(" true ")==json::parse("true")); + CHECK(json::parse(" true\t")==json::parse("true")); + CHECK(json::parse(" true\n")==json::parse("true")); + CHECK(json::parse(" true\u000d")==json::parse("true")); + CHECK(json::parse("\ttrue ")==json::parse("true")); + CHECK(json::parse("\ttrue\t")==json::parse("true")); + CHECK(json::parse("\ttrue\n")==json::parse("true")); + CHECK(json::parse("\ttrue\u000d")==json::parse("true")); + CHECK(json::parse("\ntrue ")==json::parse("true")); + CHECK(json::parse("\ntrue\t")==json::parse("true")); + CHECK(json::parse("\ntrue\n")==json::parse("true")); + CHECK(json::parse("\ntrue\u000d")==json::parse("true")); + CHECK(json::parse("\u000dtrue ")==json::parse("true")); + CHECK(json::parse("\u000dtrue\t")==json::parse("true")); + CHECK(json::parse("\u000dtrue\n")==json::parse("true")); + CHECK(json::parse("\u000dtrue\u000d")==json::parse("true")); + } + SECTION("capitalisation") + { + SECTION("true") + { + CHECK_THROWS_AS(parser_helper("True"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRue"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("truE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRue"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TruE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRuE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRuE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUE"),json::parse_error&); + } + SECTION("null") + { + CHECK_THROWS_AS(parser_helper("Null"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nUll"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nuLl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nulL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NUll"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NuLl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NulL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nULl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nUlL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nuLL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NULl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NUlL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NuLL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nULL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NULL"),json::parse_error&); + } + SECTION("false") + { + CHECK_THROWS_AS(parser_helper("False"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fAlse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FAlse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("faLse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FaLse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fALse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FALse"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("falSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FalSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fAlSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FAlSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("faLSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FaLSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fALSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FALSe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("falsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FalsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fAlsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FAlsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("faLsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FaLsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fALsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FALsE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("falSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FalSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fAlSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FAlSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("faLSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FaLSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("fALSE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("FALSE"),json::parse_error&); + } + } + // There are no other literal names that are valid json tokens. + // Verify that some common literal names are rejected, no matter what letter is capital. + SECTION("illegal literals") + { + SECTION("nil") + { + CHECK_THROWS_AS(parser_helper("nil"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Nil"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nIl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NIl"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("niL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NiL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nIL"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NIL"),json::parse_error&); + } + SECTION("truth") + { + CHECK_THROWS_AS(parser_helper("truth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Truth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRuth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRuth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUth"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("truTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TruTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRuTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRuTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUTh"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trutH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrutH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRutH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRutH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUtH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUtH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUtH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUtH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("truTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TruTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRuTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRuTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("trUTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TrUTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tRUTH"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("TRUTH"),json::parse_error&); + } + SECTION("const") + { + CHECK_THROWS_AS(parser_helper("const"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Const"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cOnst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("COnst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("coNst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CoNst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cONst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CONst"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("conSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ConSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cOnSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("COnSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("coNSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CoNSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cONSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CONSt"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("consT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ConsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cOnsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("COnsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("coNsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CoNsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cONsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CONsT"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("conST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ConST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cOnST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("COnST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("coNST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CoNST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("cONST"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("CONST"),json::parse_error&); + } + SECTION("none") + { + CHECK_THROWS_AS(parser_helper("none"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("None"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nOne"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NOne"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("noNe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NoNe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nONe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NONe"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nonE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NonE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nOnE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NOnE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("noNE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NoNE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nONE"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NONE"),json::parse_error&); + } + SECTION("self") + { + CHECK_THROWS_AS(parser_helper("self"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Self"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sElf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SElf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("seLf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SeLf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sELf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SELf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("selF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SelF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sElF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SElF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("seLF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SeLF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sELF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SELF"),json::parse_error&); + } + SECTION("super") + { + CHECK_THROWS_AS(parser_helper("super"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Super"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUper"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUper"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("suPer"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SuPer"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUPer"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUPer"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("supEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SupEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUpEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUpEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("suPEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SuPEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUPEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUPEr"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("supeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SupeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUpeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUpeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("suPeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SuPeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUPeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUPeR"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("supER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SupER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUpER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUpER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("suPER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SuPER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("sUPER"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("SUPER"),json::parse_error&); + } + SECTION("this") + { + CHECK_THROWS_AS(parser_helper("this"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("This"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tHis"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("THis"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("thIs"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ThIs"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tHIs"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("THIs"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("thiS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ThiS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tHiS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("THiS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("thIS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("ThIS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("tHIS"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("THIS"),json::parse_error&); + } + SECTION("undefined") + { + CHECK_THROWS_AS(parser_helper("undefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Undefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFined"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFIned"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFiNed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFINed"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFinEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFInEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFiNEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFINEd"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFineD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFIneD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFiNeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFINeD"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFinED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFInED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFiNED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDefINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEfINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDeFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("undEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UndEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNdEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNdEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("unDEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UnDEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("uNDEFINED"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("UNDEFINED"),json::parse_error&); + } + } + // The literal names NaN and any expression for infinity are not valid json tokens. + // Verify that they are rejected, no matter the capitalisation. + SECTION("illegal literal numbers") + { + SECTION("inf") + { + CHECK_THROWS_AS(parser_helper("inf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Inf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INf"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNF"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INF"),json::parse_error&); + } + SECTION("infinity") + { + CHECK_THROWS_AS(parser_helper("infinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Infinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINity"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINIty"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFIniTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINiTy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINITy"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINitY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINItY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFIniTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINiTY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFinITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFInITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFiNITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("infINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InfINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNfINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INfINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("inFINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("InFINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("iNFINITY"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("INFINITY"),json::parse_error&); + } + SECTION("NaN") + { + CHECK_THROWS_AS(parser_helper("nan"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("Nan"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nAn"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NAn"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("naN"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NaN"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("nAN"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("NAN"),json::parse_error&); + } + } +} diff --git a/TSF/tests/unit-numbers.cpp b/TSF/tests/unit-numbers.cpp new file mode 100644 index 0000000000..f32b7fb4f1 --- /dev/null +++ b/TSF/tests/unit-numbers.cpp @@ -0,0 +1,538 @@ + +#include "doctest_compatibility.h" + +#include +using nlohmann::json; + +namespace +{ +void parser_helper(const std::string& input); + +void parser_helper(const std::string& input){ + const json temp = json::parse(input); +} +} //namespace + +TEST_CASE("accept") +{ + SECTION("exponents") + { + // The only valid exponents are U+0065 and U+0045. + // Their look-alikes, in particular U+0425 and U+0436 are forbidden. + SECTION("U+0425") + { + CHECK(!json::accept("0\u0425123")); + CHECK(!json::accept("123\u04250")); + CHECK(!json::accept("0.123\u0425123")); + CHECK(!json::accept("1.23\u0425123")); + CHECK(!json::accept("1.23\u04250")); + } + SECTION("U+0436") + { + CHECK(!json::accept("0\u0436123")); + CHECK(!json::accept("123\u04360")); + CHECK(!json::accept("0.123\u0436123")); + CHECK(!json::accept("1.23\u0436123")); + CHECK(!json::accept("1.23\u04360")); + } + // Leading zeroes for exponents are allowed. + SECTION("leading zeroes") + { + CHECK(json::accept("42\u004507")); + CHECK(json::accept("42\u004500000000000007")); + CHECK(json::accept("42\u00450000000000")); + CHECK(json::accept("42\u006507")); + CHECK(json::accept("42\u006500000000000007")); + CHECK(json::accept("42\u00650000000000")); + } + } + // The only valid operators are plus and minus at the beginning of a number. + // All other operators, i.e. brackets, *, /, +, -, !, comma are illegal. + SECTION("operators") + { + SECTION("plus") + { + CHECK(!json::accept("1+1")); + CHECK(!json::accept("0.1+1")); + CHECK(!json::accept("0.1+1.0")); + CHECK(!json::accept("0+0.1")); + CHECK(!json::accept("0.1\u00452+1")); + CHECK(!json::accept("0.1\u00652+1")); + CHECK(!json::accept("1+0.1\u00652")); + CHECK(!json::accept("3.5+0.1\u00652")); + } + SECTION("minus") + { + CHECK(!json::accept("1-1")); + CHECK(!json::accept("0.1-1")); + CHECK(!json::accept("0.1-1.0")); + CHECK(!json::accept("0-0.1")); + CHECK(!json::accept("0.1\u00452-1")); + CHECK(!json::accept("0.1\u00652-1")); + CHECK(!json::accept("1-0.1\u00652")); + CHECK(!json::accept("3.5-0.1\u00652")); + } + SECTION("brackets") + { + CHECK(!json::accept("(145)")); + CHECK(!json::accept("(34.32874)")); + CHECK(!json::accept("42\u0045(134)")); + CHECK(!json::accept("42\u0065(134)")); + } + SECTION("factorial") + { + CHECK(!json::accept("13!")); + } + SECTION("multiplication") + { + CHECK(!json::accept("1*1")); + CHECK(!json::accept("1.45*5")); + CHECK(!json::accept("154*23.76")); + CHECK(!json::accept("1\u004545*3")); + CHECK(!json::accept("1\u006545*3")); + CHECK(!json::accept("3*6\u004512")); + CHECK(!json::accept("3*6\u006512")); + } + SECTION("division") + { + CHECK(!json::accept("0/0")); + CHECK(!json::accept("1.45/5")); + CHECK(!json::accept("154/23.76")); + CHECK(!json::accept("1\u004545/3")); + CHECK(!json::accept("1\u006545/3")); + CHECK(!json::accept("7/6\u004512")); + CHECK(!json::accept("7/6\u006512")); + } + SECTION("comma") + { + CHECK(!json::accept("0,0")); + CHECK(!json::accept("100,000")); + CHECK(!json::accept("1,000.23")); + } + } + // Leading and tailing whitespace are ignored; whitespace in between a number is illegal. + // see also nst_json_testsuite2/test_parsing/n_number_1_000.json for a single test of this + // circumstance. + SECTION("whitespace") + { + SECTION("space") + { + CHECK(!json::accept("0 1")); + CHECK(!json::accept("1234 567")); + CHECK(!json::accept("123.456 789")); + CHECK(!json::accept("123\u00450 0")); + CHECK(!json::accept("123\u0045132 94")); + CHECK(!json::accept("1.23\u00450 0")); + CHECK(!json::accept("1.23\u0045132 94")); + CHECK(!json::accept("123\u00650 0")); + CHECK(!json::accept("123\u0065132 94")); + CHECK(!json::accept("1.23\u00650 0")); + CHECK(!json::accept("1.23\u0065132 94")); + } + SECTION("tab") + { + CHECK(!json::accept("0\t1")); + CHECK(!json::accept("1234\t567")); + CHECK(!json::accept("123.456\t789")); + CHECK(!json::accept("123\u00450\t0")); + CHECK(!json::accept("123\u0045132\t94")); + CHECK(!json::accept("1.23\u00450\t0")); + CHECK(!json::accept("1.23\u0045132\t94")); + CHECK(!json::accept("123\u00650\t0")); + CHECK(!json::accept("123\u0065132\t94")); + CHECK(!json::accept("1.23\u00650\t0")); + CHECK(!json::accept("1.23\u0065132\t94")); + } + SECTION("new line") + { + CHECK(!json::accept("0\n1")); + CHECK(!json::accept("1234\n567")); + CHECK(!json::accept("123.456\n789")); + CHECK(!json::accept("123\u00450\n0")); + CHECK(!json::accept("123\u0045132\n94")); + CHECK(!json::accept("1.23\u00450\n0")); + CHECK(!json::accept("1.23\u0045132\n94")); + CHECK(!json::accept("123\u00650\n0")); + CHECK(!json::accept("123\u0065132\n94")); + CHECK(!json::accept("1.23\u00650\n0")); + CHECK(!json::accept("1.23\u0065132\n94")); + } + SECTION("Carriage return") + { + CHECK(!json::accept("0\u000d1")); + CHECK(!json::accept("1234\u000d567")); + CHECK(!json::accept("123.456\u000d789")); + CHECK(!json::accept("123\u00450\u000d0")); + CHECK(!json::accept("123\u0045132\u000d94")); + CHECK(!json::accept("1.23\u00450\u000d0")); + CHECK(!json::accept("1.23\u0045132\u000d94")); + CHECK(!json::accept("123\u00650\u000d0")); + CHECK(!json::accept("123\u0065132\u000d94")); + CHECK(!json::accept("1.23\u00650\u000d0")); + CHECK(!json::accept("1.23\u0065132\u000d94")); + } + // See also nst_json_testsuite2/test_parsing/y_number_after_space.json + SECTION("Leading and tailing") + { + SECTION("space") + { + CHECK(json::accept(" 123")); + CHECK(json::accept(" 123.23672")); + CHECK(json::accept(" 12\u004523")); + CHECK(json::accept(" 92\u006532")); + CHECK(json::accept(" 123")); + CHECK(json::accept(" 123.23672")); + CHECK(json::accept(" 12\u004523")); + CHECK(json::accept(" 92\u006532")); + CHECK(json::accept("123 ")); + CHECK(json::accept("123.23672 ")); + CHECK(json::accept("12\u004523 ")); + CHECK(json::accept("92\u006532 ")); + CHECK(json::accept("123 ")); + CHECK(json::accept("123.23672 ")); + CHECK(json::accept("12\u004523 ")); + CHECK(json::accept("92\u006532 ")); + } + SECTION("tab") + { + CHECK(json::accept("\t123")); + CHECK(json::accept("\t123.23672")); + CHECK(json::accept("\t12\u004523")); + CHECK(json::accept("\t92\u006532")); + CHECK(json::accept("123\t")); + CHECK(json::accept("123.23672\t")); + CHECK(json::accept("12\u004523\t")); + CHECK(json::accept("92\u006532\t")); + CHECK(json::accept("\t\t\t\t\t\t\t123")); + CHECK(json::accept("\t\t\t\t\t\t\t\t123.23672")); + CHECK(json::accept("\t\t\t\t\t\t\t\t\t12\u004523")); + CHECK(json::accept("\t\t\t\t\t\t92\u006532")); + CHECK(json::accept("123\t\t\t\t\t\t")); + CHECK(json::accept("123.23672\t\t\t\t\t")); + CHECK(json::accept("12\u004523\t\t\t\t")); + CHECK(json::accept("92\u006532\t\t\t\t\t\t")); + } + SECTION("newline") + { + CHECK(json::accept("\n123")); + CHECK(json::accept("\n123.23672")); + CHECK(json::accept("\n12\u004523")); + CHECK(json::accept("\n92\u006532")); + CHECK(json::accept("123\n")); + CHECK(json::accept("123.23672\n")); + CHECK(json::accept("12\u004523\n")); + CHECK(json::accept("92\u006532\n")); + CHECK(json::accept("\n\n\n\n\n\n\n123")); + CHECK(json::accept("\n\n\n\n\n\n\n\n123.23672")); + CHECK(json::accept("\n\n\n\n\n\n\n\n12\u004523")); + CHECK(json::accept("\n\n\n\n\n\n92\u006532")); + CHECK(json::accept("123\n\n\n\n\n\n")); + CHECK(json::accept("123.23672\n\n\n\n\n")); + CHECK(json::accept("12\u004523\n\n\n\n")); + CHECK(json::accept("92\u006532\n\n\n\n\n\n")); + } + SECTION("Carriage return") + { + CHECK(json::accept("\u000d123")); + CHECK(json::accept("\u000d123.23672")); + CHECK(json::accept("\u000d12\u004523")); + CHECK(json::accept("\u000d92\u006532")); + CHECK(json::accept("123\u000d")); + CHECK(json::accept("123.23672\u000d")); + CHECK(json::accept("12\u004523\u000d")); + CHECK(json::accept("92\u006532\u000d")); + CHECK(json::accept("\u000d\u000d\u000d\u000d\u000d\u000d\u000d123")); + CHECK(json::accept("\u000d\u000d\u000d\u000d\u000d\u000d\u000d\u000d123.23672")); + CHECK(json::accept("\u000d\u000d\u000d\u000d\u000d\u000d\u000d\u000d\u000d12\u004523")); + CHECK(json::accept("\u000d\u000d\u000d\u000d\u000d\u000d92\u006532")); + CHECK(json::accept("123\u000d\u000d\u000d\u000d\u000d\u000d")); + CHECK(json::accept("123.23672\u000d\u000d\u000d\u000d\u000d")); + CHECK(json::accept("12\u004523\u000d\u000d\u000d\u000d")); + CHECK(json::accept("92\u006532\u000d\u000d\u000d\u000d\u000d\u000d")); + } + SECTION("Mixed") + { + CHECK(json::accept("\u000d\t\n \t\n \u000d\u000d\t\t124234 \n\n\n\n\u000d\u000d\t\t")); + CHECK(json::accept("\u000d\t\n \t\n \u000d\u000d\t\t1242.34 \n\n\n\n\u000d\u000d\t\t")); + CHECK(json::accept("\u000d\t\n \t\n \u000d\u000d\t\t1242\u004534 \n\n\n\n\u000d\u000d\t\t")); + CHECK(json::accept("\u000d\t\n \t\n \u000d\u000d\t\t1242\u006534 \n\n\n\n\u000d\u000d\t\t")); + } + } + } + // Recall that unit-class_parser.cpp parser class:accept:number:invalid numbers checks 01; + // nst_json_testsuite2/test_parsing/n_number_-01.json checks -01. + SECTION("Leading zeroes") + { + CHECK(!json::accept("01.0")); + CHECK(!json::accept("05\u004542")); + CHECK(!json::accept("05\u006542")); + CHECK(!json::accept("00")); + CHECK(!json::accept("000000000000000000000000000000000000000000000000")); + CHECK(!json::accept("0000000000000000000000000000000000042")); + CHECK(!json::accept("-01.0")); + CHECK(!json::accept("-05\u004542")); + CHECK(!json::accept("-05\u006542")); + CHECK(!json::accept("-00")); + CHECK(!json::accept("-000000000000000000000000000000000000000000000000")); + CHECK(!json::accept("-0000000000000000000000000000000000042")); + } + // According to RFC8259, only numbers in base ten are allowed. For bases lower than ten, this can + // not be checked using the numerical representation and checking the grammar, assuming that the + // standard digits are used; instead, this is the job of the parser. + // For bases exceeding ten, this can be checked. In particular hexadecimal can be tested for. + // For base eight, this can also be tested assuming that one of the conventions for the + // representation is used. + SECTION("bases") + { + SECTION("Octal") + { + CHECK(!json::accept("o42")); + CHECK(!json::accept("q42")); + CHECK(!json::accept("0o42")); + CHECK(!json::accept("\\42")); + CHECK(!json::accept("@42")); + CHECK(!json::accept("&42")); + CHECK(!json::accept("$42")); + CHECK(!json::accept("42o")); + } + // Recall that hexadecimal is also checked in nst_json_testsuite2/test_parsing/n_number_hex_1_digit.json + // and nst_json_testsuite2/test_parsing/n_number_hex_2_digits.json + SECTION("Hexadecimal") + { + CHECK(!json::accept("0x42")); + CHECK(!json::accept("42h")); + CHECK(!json::accept("42H")); + CHECK(!json::accept("0F42h")); + CHECK(!json::accept("0F42H")); + CHECK(!json::accept("$4A2")); + CHECK(!json::accept("16r42")); + CHECK(!json::accept("0h42")); + CHECK(!json::accept("#42")); + CHECK(!json::accept("#16r42")); + CHECK(!json::accept("42F3")); + } + } +} + +TEST_CASE("parse") +{ + // While leading zeroes are forbidden according to RFC8259, + // leading zeroes in the exponent are allowed and ignored in the parsing. + SECTION("exponents") + { + // The only valid exponents are U+0065 and U+0045. + // Their look-alikes, in particular U+0425 and U+0436 are forbidden. + SECTION("U+0425") + { + CHECK_THROWS_AS(parser_helper("0\u0425123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u04250"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.123\u0425123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0425123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u04250"),json::parse_error&); + } + SECTION("U+0436") + { + CHECK_THROWS_AS(parser_helper("0\u0436123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u04360"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.123\u0436123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0436123"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u04360"),json::parse_error&); + } + SECTION("leading zeroes") + { + CHECK(json::parse("1\u00451")==json::parse("1\u004501")); + CHECK(json::parse("0.1\u00451")==json::parse("0.1\u004501")); + CHECK(json::parse("1\u004545")==json::parse("1\u004500000000000000000000000045")); + CHECK(json::parse("12415\u004516")==json::parse("12415\u00450016")); + CHECK(json::parse("12.415\u004516")==json::parse("12.415\u00450016")); + CHECK(json::parse("1\u00651")==json::parse("1\u006501")); + CHECK(json::parse("0.1\u00651")==json::parse("0.1\u006501")); + CHECK(json::parse("1\u006545")==json::parse("1\u006500000000000000000000000045")); + CHECK(json::parse("12415\u006516")==json::parse("12415\u00650016")); + CHECK(json::parse("12.415\u006516")==json::parse("12.415\u00650016")); + } + SECTION("leading plus") + { + CHECK(json::parse("1\u0045+1")==json::parse("1\u00451")); + CHECK(json::parse("1\u0065+1")==json::parse("1\u00651")); + CHECK(json::parse("1.0034\u0045+23")==json::parse("1.0034\u004523")); + CHECK(json::parse("1.0034\u0065+23")==json::parse("1.0034\u006523")); + } + SECTION("Capitalisation") + { + CHECK(json::parse("3.1415\u00454")==json::parse("3.1415\u00654")); + } + } + SECTION("operators") + { + SECTION("plus") + { + CHECK_THROWS_AS(parser_helper("1+1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1+1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1+1.0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0+0.1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1\u00452+1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1\u00652+1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1+0.1\u00652"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("3.5+0.1\u00652"),json::parse_error&); + } + SECTION("minus") + { + CHECK_THROWS_AS(parser_helper("1-1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1-1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1-1.0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0-0.1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1\u00452-1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0.1\u00652-1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1-0.1\u00652"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("3.5-0.1\u00652"),json::parse_error&); + } + SECTION("brackets") + { + CHECK_THROWS_AS(parser_helper("(145)"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("(34.32874)"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("42\u0045(134)"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("42\u0065(134)"),json::parse_error&); + } + SECTION("factorial") + { + CHECK_THROWS_AS(parser_helper("13!"),json::parse_error&); + } + SECTION("multiplication") + { + CHECK_THROWS_AS(parser_helper("1*1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.45*5"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("154*23.76"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1\u004545*3"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1\u006545*3"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("3*6\u004512"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("3*6\u006512"),json::parse_error&); + } + SECTION("division") + { + CHECK_THROWS_AS(parser_helper("0/0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.45/5"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("154/23.76"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1\u004545/3"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1\u006545/3"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("7/6\u004512"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("7/6\u006512"),json::parse_error&); + } + SECTION("comma") + { + CHECK_THROWS_AS(parser_helper("0,0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("100,000"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1,000.23"),json::parse_error&); + } + } + SECTION("trailing zeroes") + { + // Trailing zeroes after the decimal point do not influence the parsing + CHECK(json::parse("3.1415000000000000000000000")==json::parse("3.1415")); + CHECK(json::parse("3.1415000000000\u004515")==json::parse("3.1415\u004515")); + CHECK(json::parse("3.1415926000000000\u006515")==json::parse("3.1415926\u006515")); + // This also works for numbers that are not parsed correctly anyway + CHECK(json::parse("2.2250738585072011360574097967091319759348195463516456400000000e-308")==json::parse("2.22507385850720113605740979670913197593481954635164564e-308")); + CHECK(json::parse("0.999999999999999944488848768742172978818416595458984374")==json::parse("0.999999999999999944488848768742172978818416595458984374000000")); + } + SECTION("whitespace") + { + // Leading and trailing whitespace is ignored. + CHECK(json::parse("\n\n\t 123\n\t\t \u000d")==json::parse("123")); + CHECK(json::parse(" 123 ")==json::parse("123")); + CHECK(json::parse(" 123\t")==json::parse("123")); + CHECK(json::parse(" 123\n")==json::parse("123")); + CHECK(json::parse(" 123\u000d")==json::parse("123")); + CHECK(json::parse("\t123 ")==json::parse("123")); + CHECK(json::parse("\t123\t")==json::parse("123")); + CHECK(json::parse("\t123\n")==json::parse("123")); + CHECK(json::parse("\t123\u000d")==json::parse("123")); + CHECK(json::parse("\n123 ")==json::parse("123")); + CHECK(json::parse("\n123\t")==json::parse("123")); + CHECK(json::parse("\n123\n")==json::parse("123")); + CHECK(json::parse("\n123\u000d")==json::parse("123")); + CHECK(json::parse("\u000d123 ")==json::parse("123")); + CHECK(json::parse("\u000d123\t")==json::parse("123")); + CHECK(json::parse("\u000d123\n")==json::parse("123")); + CHECK(json::parse("\u000d123\u000d")==json::parse("123")); + } + SECTION("invalid whitespace") + { + SECTION("space") + { + CHECK_THROWS_AS(parser_helper("0 1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1234 567"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123.456 789"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00450 0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0045132 94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00450 0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0045132 94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00650 0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0065132 94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00650 0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0065132 94"),json::parse_error&); + } + SECTION("tab") + { + CHECK_THROWS_AS(parser_helper("0\t1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1234\t567"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123.456\t789"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00450\t0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0045132\t94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00450\t0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0045132\t94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00650\t0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0065132\t94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00650\t0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0065132\t94"),json::parse_error&); + } + SECTION("new line") + { + CHECK_THROWS_AS(parser_helper("0\n1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1234\n567"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123.456\n789"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00450\n0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0045132\n94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00450\n0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0045132\n94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00650\n0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0065132\n94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00650\n0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0065132\n94"),json::parse_error&); + } + SECTION("Carriage return") + { + CHECK_THROWS_AS(parser_helper("0\u000d1"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1234\u000d567"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123.456\u000d789"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00450\u000d0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0045132\u000d94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00450\u000d0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0045132\u000d94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u00650\u000d0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("123\u0065132\u000d94"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u00650\u000d0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("1.23\u0065132\u000d94"),json::parse_error&); + } + } + SECTION("Leading zeroes") + { + CHECK_THROWS_AS(parser_helper("01.0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("05\u004542"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("05\u006542"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("00"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("000000000000000000000000000000000000000000000000"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("0000000000000000000000000000000000042"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-01.0"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-05\u004542"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-05\u006542"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-00"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-000000000000000000000000000000000000000000000000"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("-0000000000000000000000000000000000042"),json::parse_error&); + } + SECTION("Precision") + { + CHECK(json::parse("1.7976931348623158e308").dump()=="1.7976931348623157e+308"); // maximum double value + CHECK(json::parse("-1.7976931348623158e308").dump()=="-1.7976931348623157e+308"); // minimum double value + } +} diff --git a/TSF/tests/unit-objects.cpp b/TSF/tests/unit-objects.cpp new file mode 100644 index 0000000000..eca7ba403b --- /dev/null +++ b/TSF/tests/unit-objects.cpp @@ -0,0 +1,254 @@ +#include +#include "doctest_compatibility.h" + +#include +using nlohmann::json; + +namespace +{ +void parser_helper(const std::string& input); + +void parser_helper(const std::string& input){ + const json temp = json::parse(input); +} +} //namespace + +TEST_CASE("accept") +{ + // A name (or key) is a string. No other token is a valid name + // See also n_object_missing_key.json, n_object_non_string_key.json, + // n_object_non_string_key_but_huge_number_instead.json, n_object_repeated_null_null + // n_object_unquoted_key for some non-exhaustive tests + SECTION("names") + { + SECTION("numbers") + { + // cf. n_object_non_string_key.json, n_object_non_string_key_but_huge_number_instead.json, for some integers + CHECK(!json::accept("{0.1:\"foo\"}")); + CHECK(!json::accept("{3\u004542:\"foo\"}")); + CHECK(!json::accept("{3.1415\u006542:\"foo\"}")); + CHECK(!json::accept("{-15:\"foo\"}")); + } + SECTION("arrays") + { + CHECK(!json::accept("{[]:\"foo\"}")); + CHECK(!json::accept("{[1]:\"foo\"}")); + CHECK(!json::accept("{[1,\"foo\"]:\"bar\"}")); + } + SECTION("objects") + { + CHECK(!json::accept("{{}:\"foo\"}")); + CHECK(!json::accept("{{\"a\":1}:\"foo\"}")); + CHECK(!json::accept("{{\"a\":1,\"b\":\"foo\"}:\"bar\"}")); + } + SECTION("literals") + { + CHECK(!json::accept("true:\"foo\"")); + CHECK(!json::accept("false:\"foo\"")); + CHECK(!json::accept("null:\"foo\"")); + } + // Various valid strings are tests for in unit-testsuites.cpp + // However, these do not include unicode utf-8 and utf-16 surrogate characters, + // or control characters. + SECTION("strings") + { + SECTION("control characters") + { + CHECK(json::accept("{\"foo\\u0000bar\":123}")); + CHECK(json::accept("{\"foo\\u0001bar\":123}")); + CHECK(json::accept("{\"foo\\u0002bar\":123}")); + CHECK(json::accept("{\"foo\\u0003bar\":123}")); + CHECK(json::accept("{\"foo\\u0004bar\":123}")); + CHECK(json::accept("{\"foo\\u0005bar\":123}")); + CHECK(json::accept("{\"foo\\u0006bar\":123}")); + CHECK(json::accept("{\"foo\\u0007bar\":123}")); + CHECK(json::accept("{\"foo\\u0008bar\":123}")); + CHECK(json::accept("{\"foo\\u0009bar\":123}")); + CHECK(json::accept("{\"foo\\u000abar\":123}")); + CHECK(json::accept("{\"foo\\u000bbar\":123}")); + CHECK(json::accept("{\"foo\\u000cbar\":123}")); + CHECK(json::accept("{\"foo\\u000dbar\":123}")); + CHECK(json::accept("{\"foo\\u000ebar\":123}")); + CHECK(json::accept("{\"foo\\u000fbar\":123}")); + CHECK(json::accept("{\"foo\\u0010bar\":123}")); + CHECK(json::accept("{\"foo\\u0011bar\":123}")); + CHECK(json::accept("{\"foo\\u0012bar\":123}")); + CHECK(json::accept("{\"foo\\u0013bar\":123}")); + CHECK(json::accept("{\"foo\\u0014bar\":123}")); + CHECK(json::accept("{\"foo\\u0015bar\":123}")); + CHECK(json::accept("{\"foo\\u0016bar\":123}")); + CHECK(json::accept("{\"foo\\u0017bar\":123}")); + CHECK(json::accept("{\"foo\\u0018bar\":123}")); + CHECK(json::accept("{\"foo\\u0019bar\":123}")); + CHECK(json::accept("{\"foo\\u001abar\":123}")); + CHECK(json::accept("{\"foo\\u001bbar\":123}")); + CHECK(json::accept("{\"foo\\u001cbar\":123}")); + CHECK(json::accept("{\"foo\\u001dbar\":123}")); + CHECK(json::accept("{\"foo\\u001ebar\":123}")); + CHECK(json::accept("{\"foo\\u001fbar\":123}")); + } + SECTION("unicode") + { + // escaped + CHECK(json::accept("{\"\\u0066\\u006f\\u006f\\u0062\\u0061\\u0072\":123}")); + // unescaped + CHECK(json::accept("{\"\u0066\u006f\u006f\u0062\u0061\u0072\":123}")); + } + SECTION("escaped UTF-16 surrogates") + { + CHECK(json::accept("{\"\\ud834\\udd1e\":123}")); + CHECK(json::accept("{\"\\ud83d\\ude00\":123}")); + CHECK(json::accept("{\"\\ud83d\\udca9\":123}")); + CHECK(json::accept("{\"\\ud83e\\udda5\":123}")); + CHECK(json::accept("{\"\\ud83d\\ude80\":123}")); + CHECK(json::accept("{\"\\ud840\\udc00\":123}")); + CHECK(json::accept("{\"\\udbff\\udfff\":123}")); + CHECK(json::accept("{\"\\ud83c\\udfc3\":123}")); + CHECK(json::accept("{\"\\ud801\\udc37\":123}")); + CHECK(json::accept("{\"\\ud83d\\udcbb\":123}")); + } + } + } + // Name/key and value of an array are treated as any other token. + // In particular, leading and trailing whitespace are ignored + SECTION("whitespace") + { + SECTION("empty object") + { + CHECK(json::accept("{ }")); + CHECK(json::accept("{\t}")); + CHECK(json::accept("{\n}")); + CHECK(json::accept("{\u000d}")); + CHECK(json::accept("{\u000d\u000d\u000d \t\t\t\n\n \u000d \n\t \t \u000d}")); + } + SECTION("non-empty object") + { + CHECK(json::accept("{ \"foo\" : \"bar\" }")); + CHECK(json::accept("{\t\"foo\"\t:\t\"bar\"\t}")); + CHECK(json::accept("{\n\"foo\"\n:\n\"bar\"\n}")); + CHECK(json::accept("{\u000d\"foo\"\u000d:\u000d\"bar\"\u000d}")); + CHECK(json::accept("{ \"foo\"\t:\n\"bar\"\n}")); + CHECK(json::accept("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")); + } + } + // The colon U+003A is the only valid member separator. + // Look-alikes are illegal. + // All other valid structural characters are illegal. + SECTION("member separator") + { + CHECK(json::accept("{\"foo\"\u003a\"bar\"}")); //: + CHECK(!json::accept("{\"foo\"\uff1a\"bar\"}")); + CHECK(!json::accept("{\"foo\"\ua789\"bar\"}")); + CHECK(!json::accept("{\"foo\"\u005b\"bar\"}")); //[ + CHECK(!json::accept("{\"foo\"\u007b\"bar\"}")); //{ + CHECK(!json::accept("{\"foo\"\u005d\"bar\"}")); //] + CHECK(!json::accept("{\"foo\"\u007d\"bar\"}")); //} + CHECK(!json::accept("{\"foo\"\u002c\"bar\"}")); //, + CHECK(!json::accept("{\"foo\"\u003b\"bar\"}")); //; + } +} + +TEST_CASE("parse") +{ + SECTION("whitespace") + { + SECTION("empty object") + { + CHECK(json::parse("{ }")==json({})); + CHECK(json::parse("{\t}")==json({})); + CHECK(json::parse("{\n}")==json({})); + CHECK(json::parse("{\u000d}")==json({})); + CHECK(json::parse("{\u000d\u000d\u000d \t\t\t\n\n \u000d \n\t \t \u000d}")==json({})); + } + SECTION("non-empty object") + { + CHECK(json::parse("{ \"foo\" : \"bar\" }")==json::parse("{\"foo\":\"bar\"}")); + CHECK(json::parse("{\t\"foo\"\t:\t\"bar\"\t}")==json::parse("{\"foo\":\"bar\"}")); + CHECK(json::parse("{\n\"foo\"\n:\n\"bar\"\n}")==json::parse("{\"foo\":\"bar\"}")); + CHECK(json::parse("{\u000d\"foo\"\u000d:\u000d\"bar\"\u000d}")==json::parse("{\"foo\":\"bar\"}")); + CHECK(json::parse("{ \"foo\"\t:\n\"bar\"\n}")==json::parse("{\"foo\":\"bar\"}")); + CHECK(json::parse("{\t\t\t\t\t\n\n\u000d\"foo\"\t \t\t \n\n \u000d:\"bar\"}")==json::parse("{\"foo\":\"bar\"}")); + } + } + // The colon U+003A is the only valid member separator. + // Look-alikes are illegal. + // All other valid structural characters are illegal. + SECTION("member separator") + { + CHECK_NOTHROW(parser_helper("{\"foo\"\u003a\"bar\"}")); //: + CHECK_THROWS_AS(parser_helper("{\"foo\"\uff1a\"bar\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{\"foo\"\ua789\"bar\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{\"foo\"\u005b\"bar\"}"),json::parse_error&); //[ + CHECK_THROWS_AS(parser_helper("{\"foo\"\u007b\"bar\"}"),json::parse_error&); //{ + CHECK_THROWS_AS(parser_helper("{\"foo\"\u005d\"bar\"}"),json::parse_error&); //] + CHECK_THROWS_AS(parser_helper("{\"foo\"\u007d\"bar\"}"),json::parse_error&); //} + CHECK_THROWS_AS(parser_helper("{\"foo\"\u002c\"bar\"}"),json::parse_error&); //, + CHECK_THROWS_AS(parser_helper("{\"foo\"\u003b\"bar\"}"),json::parse_error&); //; + } + SECTION("names") + { + SECTION("numbers") + { + // cf. n_object_non_string_key.json, n_object_non_string_key_but_huge_number_instead.json, for some integers + CHECK_THROWS_AS(parser_helper("{0.1:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{3\u004542:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{3.1415\u006542:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{-15:\"foo\"}"),json::parse_error&); + } + SECTION("arrays") + { + CHECK_THROWS_AS(parser_helper("{[]:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{[1]:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{[1,\"foo\"]:\"bar\"}"),json::parse_error&); + } + SECTION("objects") + { + CHECK_THROWS_AS(parser_helper("{{}:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{{\"a\":1}:\"foo\"}"),json::parse_error&); + CHECK_THROWS_AS(parser_helper("{{\"a\":1,\"b\":\"foo\"}:\"bar\"}"),json::parse_error&); + } + SECTION("literals") + { + CHECK_THROWS_AS(parser_helper("true:\"foo\""),json::parse_error&); + CHECK_THROWS_AS(parser_helper("false:\"foo\""),json::parse_error&); + CHECK_THROWS_AS(parser_helper("null:\"foo\""),json::parse_error&); + } + } + // It is checked in unit-testsuites that duplicate values are parsed without error. + // The exact behaviour, however, appears to be not tested for yet. + SECTION("duplicate names") + { + SECTION("100,000 identical keys") + { + // object containing 100,000 members with the same name and different values. + std::stringstream ss1; + ss1 << "{"; + for (int i = 1; i<100000;i++){ + ss1 << R"("name":)" << i << ","; + } + ss1 << R"("name":"value"})"; + json _1; + ss1 >> _1; + CHECK(_1 == json::parse(R"({"name":"value"})")); + } + SECTION("first and last key duplicate") + { + // object containing 100,000 members with only first and last member of the same name + std::stringstream ss2; + std::stringstream ss3; + ss2 << R"({"key0":0,)"; + ss3 << "{"; + for (int i = 1; i<100000; i++){ + ss2 << R"("key)" << i << "\":" << i << ","; + ss3 << R"("key)" << i << "\":" << i << ","; + } + ss2 << R"("key0":0})"; + ss3 << R"("key0":0})"; + json _2; + json _3; + ss2 >> _2; + ss3 >> _3; + CHECK(_2==_3); + } + } +} diff --git a/TSF/tests/unit-strings.cpp b/TSF/tests/unit-strings.cpp new file mode 100644 index 0000000000..e0ad653691 --- /dev/null +++ b/TSF/tests/unit-strings.cpp @@ -0,0 +1,364 @@ +#include +#include +#include + +#include "doctest_compatibility.h" + +#include +using nlohmann::json; + +namespace +{ +void parser_helper(const std::string& input); +std::string uint_to_utf8(const uint32_t& input); + +void parser_helper(const std::string& input){ + const json temp = json::parse(input); +} + +std::string uint_to_utf8(const uint32_t& input){ + std::string temp = "\""; + // evil chat-gpt magic transforms input into utf-8 encoded unescaped character + if (input <= 0x7F) { + temp += static_cast(input); // 1-byte (ASCII) + } else if (input <= 0x7FF) { + temp += static_cast(0xC0 | ((input >> 6) & 0x1F)); // 2-byte sequence + temp += static_cast(0x80 | (input & 0x3F)); + } else if (input <= 0xFFFF) { + temp += static_cast(0xE0 | ((input >> 12) & 0x0F)); // 3-byte sequence + temp += static_cast(0x80 | ((input >> 6) & 0x3F)); + temp += static_cast(0x80 | (input & 0x3F)); + } else if (input <= 0x10FFFF) { + temp += static_cast(0xF0 | ((input >> 18) & 0x07)); // 4-byte sequence + temp += static_cast(0x80 | ((input >> 12) & 0x3F)); + temp += static_cast(0x80 | ((input >> 6) & 0x3F)); + temp += static_cast(0x80 | (input & 0x3F)); + } + temp += "\""; + return temp; +} +} //namespace + +TEST_CASE("accept") +{ + // noncharacters + // The parsing of these is tested in unit-unicode1.cpp; + // here: replace roundtrip() (i.e. test for parse() and dump()) with accept. + SECTION("noncharacter code positions") + { + // 5.3.1 U+FFFE = ef bf be + CHECK(json::accept("\"\xef\xbf\xbe\"")); + // 5.3.2 U+FFFF = ef bf bf + CHECK(json::accept("\"\xef\xbf\xbf\"")); + + // 5.3.3 U+FDD0 .. U+FDEF + CHECK(json::accept("\"\xEF\xB7\x90\"")); + CHECK(json::accept("\"\xEF\xB7\x91\"")); + CHECK(json::accept("\"\xEF\xB7\x92\"")); + CHECK(json::accept("\"\xEF\xB7\x93\"")); + CHECK(json::accept("\"\xEF\xB7\x94\"")); + CHECK(json::accept("\"\xEF\xB7\x95\"")); + CHECK(json::accept("\"\xEF\xB7\x96\"")); + CHECK(json::accept("\"\xEF\xB7\x97\"")); + CHECK(json::accept("\"\xEF\xB7\x98\"")); + CHECK(json::accept("\"\xEF\xB7\x99\"")); + CHECK(json::accept("\"\xEF\xB7\x9A\"")); + CHECK(json::accept("\"\xEF\xB7\x9B\"")); + CHECK(json::accept("\"\xEF\xB7\x9C\"")); + CHECK(json::accept("\"\xEF\xB7\x9D\"")); + CHECK(json::accept("\"\xEF\xB7\x9E\"")); + CHECK(json::accept("\"\xEF\xB7\x9F\"")); + CHECK(json::accept("\"\xEF\xB7\xA0\"")); + CHECK(json::accept("\"\xEF\xB7\xA1\"")); + CHECK(json::accept("\"\xEF\xB7\xA2\"")); + CHECK(json::accept("\"\xEF\xB7\xA3\"")); + CHECK(json::accept("\"\xEF\xB7\xA4\"")); + CHECK(json::accept("\"\xEF\xB7\xA5\"")); + CHECK(json::accept("\"\xEF\xB7\xA6\"")); + CHECK(json::accept("\"\xEF\xB7\xA7\"")); + CHECK(json::accept("\"\xEF\xB7\xA8\"")); + CHECK(json::accept("\"\xEF\xB7\xA9\"")); + CHECK(json::accept("\"\xEF\xB7\xAA\"")); + CHECK(json::accept("\"\xEF\xB7\xAB\"")); + CHECK(json::accept("\"\xEF\xB7\xAC\"")); + CHECK(json::accept("\"\xEF\xB7\xAD\"")); + CHECK(json::accept("\"\xEF\xB7\xAE\"")); + CHECK(json::accept("\"\xEF\xB7\xAF\"")); + + // 5.3.4 U+nFFFE U+nFFFF (for n = 1..10) + CHECK(json::accept("\"\xF0\x9F\xBF\xBF\"")); + CHECK(json::accept("\"\xF0\xAF\xBF\xBF\"")); + CHECK(json::accept("\"\xF0\xBF\xBF\xBF\"")); + CHECK(json::accept("\"\xF1\x8F\xBF\xBF\"")); + CHECK(json::accept("\"\xF1\x9F\xBF\xBF\"")); + CHECK(json::accept("\"\xF1\xAF\xBF\xBF\"")); + CHECK(json::accept("\"\xF1\xBF\xBF\xBF\"")); + CHECK(json::accept("\"\xF2\x8F\xBF\xBF\"")); + CHECK(json::accept("\"\xF2\x9F\xBF\xBF\"")); + CHECK(json::accept("\"\xF2\xAF\xBF\xBF\"")); + } + + // also copied from unit-unicode.cpp with replaced roundtrip() + SECTION("overlong sequences") + { + SECTION("Examples of an overlong ASCII character") + { + // 4.1.1 U+002F = c0 af + CHECK(!json::accept("\"\xc0\xaf\"")); + // 4.1.2 U+002F = e0 80 af + CHECK(!json::accept("\"\xe0\x80\xaf\"")); + // 4.1.3 U+002F = f0 80 80 af + CHECK(!json::accept("\"\xf0\x80\x80\xaf\"")); + // 4.1.4 U+002F = f8 80 80 80 af + CHECK(!json::accept("\"\xf8\x80\x80\x80\xaf\"")); + // 4.1.5 U+002F = fc 80 80 80 80 af + CHECK(!json::accept("\"\xfc\x80\x80\x80\x80\xaf\"")); + } + + SECTION("Maximum overlong sequences") + { + // Below you see the highest Unicode value that is still resulting in an + // overlong sequence if represented with the given number of bytes. This + // is a boundary test for safe UTF-8 decoders. All five characters should + // be rejected like malformed UTF-8 sequences. + + // 4.2.1 U-0000007F = c1 bf + CHECK(!json::accept("\"\xc1\xbf\"")); + // 4.2.2 U-000007FF = e0 9f bf + CHECK(!json::accept("\"\xe0\x9f\xbf\"")); + // 4.2.3 U-0000FFFF = f0 8f bf bf + CHECK(!json::accept("\"\xf0\x8f\xbf\xbf\"")); + // 4.2.4 U-001FFFFF = f8 87 bf bf bf + CHECK(!json::accept("\"\xf8\x87\xbf\xbf\xbf\"")); + // 4.2.5 U-03FFFFFF = fc 83 bf bf bf bf + CHECK(!json::accept("\"\xfc\x83\xbf\xbf\xbf\xbf\"")); + } + + SECTION("Overlong representation of the NUL character") + { + // The following five sequences should also be rejected like malformed + // UTF-8 sequences and should not be treated like the ASCII NUL + // character. + + // 4.3.1 U+0000 = c0 80 + CHECK(!json::accept("\"\xc0\x80\"")); + // 4.3.2 U+0000 = e0 80 80 + CHECK(!json::accept("\"\xe0\x80\x80\"")); + // 4.3.3 U+0000 = f0 80 80 80 + CHECK(!json::accept("\"\xf0\x80\x80\x80\"")); + // 4.3.4 U+0000 = f8 80 80 80 80 + CHECK(!json::accept("\"\xf8\x80\x80\x80\x80\"")); + // 4.3.5 U+0000 = fc 80 80 80 80 80 + CHECK(!json::accept("\"\xfc\x80\x80\x80\x80\x80\"")); + } + } + // also copied from unit-unicode.cpp with replaced roundtrip() + SECTION("malformed sequences") + { + SECTION("Unexpected continuation bytes") + { + // Each unexpected continuation byte should be separately signalled as a + // malformed sequence of its own. + + // 3.1.1 First continuation byte 0x80 + CHECK(!json::accept("\"\x80\"")); + // 3.1.2 Last continuation byte 0xbf + CHECK(!json::accept("\"\xbf\"")); + + // 3.1.3 2 continuation bytes + CHECK(!json::accept("\"\x80\xbf\"")); + // 3.1.4 3 continuation bytes + CHECK(!json::accept("\"\x80\xbf\x80\"")); + // 3.1.5 4 continuation bytes + CHECK(!json::accept("\"\x80\xbf\x80\xbf\"")); + // 3.1.6 5 continuation bytes + CHECK(!json::accept("\"\x80\xbf\x80\xbf\x80\"")); + // 3.1.7 6 continuation bytes + CHECK(!json::accept("\"\x80\xbf\x80\xbf\x80\xbf\"")); + // 3.1.8 7 continuation bytes + CHECK(!json::accept("\"\x80\xbf\x80\xbf\x80\xbf\x80\"")); + + // 3.1.9 Sequence of all 64 possible continuation bytes (0x80-0xbf) + CHECK(!json::accept("\"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\"")); + } + + SECTION("Lonely start characters") + { + // 3.2.1 All 32 first bytes of 2-byte sequences (0xc0-0xdf) + CHECK(!json::accept("\"\xc0 \xc1 \xc2 \xc3 \xc4 \xc5 \xc6 \xc7 \xc8 \xc9 \xca \xcb \xcc \xcd \xce \xcf \xd0 \xd1 \xd2 \xd3 \xd4 \xd5 \xd6 \xd7 \xd8 \xd9 \xda \xdb \xdc \xdd \xde \xdf\"")); + // 3.2.2 All 16 first bytes of 3-byte sequences (0xe0-0xef) + CHECK(!json::accept("\"\xe0 \xe1 \xe2 \xe3 \xe4 \xe5 \xe6 \xe7 \xe8 \xe9 \xea \xeb \xec \xed \xee \xef\"")); + // 3.2.3 All 8 first bytes of 4-byte sequences (0xf0-0xf7) + CHECK(!json::accept("\"\xf0 \xf1 \xf2 \xf3 \xf4 \xf5 \xf6 \xf7\"")); + // 3.2.4 All 4 first bytes of 5-byte sequences (0xf8-0xfb) + CHECK(!json::accept("\"\xf8 \xf9 \xfa \xfb\"")); + // 3.2.5 All 2 first bytes of 6-byte sequences (0xfc-0xfd) + CHECK(!json::accept("\"\xfc \xfd\"")); + } + + SECTION("Sequences with last continuation byte missing") + { + // All bytes of an incomplete sequence should be signalled as a single + // malformed sequence, i.e., you should see only a single replacement + // character in each of the next 10 tests. (Characters as in section 2) + + // 3.3.1 2-byte sequence with last byte missing (U+0000) + CHECK(!json::accept("\"\xc0\"")); + // 3.3.2 3-byte sequence with last byte missing (U+0000) + CHECK(!json::accept("\"\xe0\x80\"")); + // 3.3.3 4-byte sequence with last byte missing (U+0000) + CHECK(!json::accept("\"\xf0\x80\x80\"")); + // 3.3.4 5-byte sequence with last byte missing (U+0000) + CHECK(!json::accept("\"\xf8\x80\x80\x80\"")); + // 3.3.5 6-byte sequence with last byte missing (U+0000) + CHECK(!json::accept("\"\xfc\x80\x80\x80\x80\"")); + // 3.3.6 2-byte sequence with last byte missing (U-000007FF) + CHECK(!json::accept("\"\xdf\"")); + // 3.3.7 3-byte sequence with last byte missing (U-0000FFFF) + CHECK(!json::accept("\"\xef\xbf\"")); + // 3.3.8 4-byte sequence with last byte missing (U-001FFFFF) + CHECK(!json::accept("\"\xf7\xbf\xbf\"")); + // 3.3.9 5-byte sequence with last byte missing (U-03FFFFFF) + CHECK(!json::accept("\"\xfb\xbf\xbf\xbf\"")); + // 3.3.10 6-byte sequence with last byte missing (U-7FFFFFFF) + CHECK(!json::accept("\"\xfd\xbf\xbf\xbf\xbf\"")); + } + + SECTION("Concatenation of incomplete sequences") + { + // All the 10 sequences of 3.3 concatenated, you should see 10 malformed + // sequences being signalled: + CHECK(!json::accept("\"\xc0\xe0\x80\xf0\x80\x80\xf8\x80\x80\x80\xfc\x80\x80\x80\x80\xdf\xef\xbf\xf7\xbf\xbf\xfb\xbf\xbf\xbf\xfd\xbf\xbf\xbf\xbf\"")); + } + + SECTION("Impossible bytes") + { + // The following two bytes cannot appear in a correct UTF-8 string + + // 3.5.1 fe + CHECK(!json::accept("\"\xfe\"")); + // 3.5.2 ff + CHECK(!json::accept("\"\xff\"")); + // 3.5.3 fe fe ff ff + CHECK(!json::accept("\"\xfe\xfe\xff\xff\"")); + } + } +} + +TEST_CASE("Unicode" * doctest::skip()) +{ + SECTION("escaped unicode") + { + for (uint32_t i = 0x0000; i<=0xFFFF; i++) + { + std::ostringstream temp; + std::ostringstream temp2; + temp << "\"\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << i << "\""; + temp2 << "\"\\u" << std::hex << std::nouppercase << std::setfill('0') << std::setw(4) << i << "\""; + if (i>=0xD800 && i<=0xDFFF) + { + // Unpaired utf-16 surrogates are illegal. + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // Illegal characters are not parsed anyway. + CHECK(!json::accept(temp.str())); + CHECK(!json::accept(temp2.str())); + CHECK_THROWS_AS(parser_helper(temp.str()),json::parse_error&); + CHECK_THROWS_AS(parser_helper(temp2.str()),json::parse_error&); + } else { + // all other characters of the basic multilingual plane are accepted. + CHECK(json::accept(temp.str())); + CHECK(json::accept(temp2.str())); + CHECK(json::parse(temp.str())==json::parse(temp2.str())); + } + } + } + SECTION("unescaped unicode") + { + for (uint32_t i = 0x0000; i<=0x10FFFF; i++) + { + std::string temp = uint_to_utf8(i); + if ((i>=0xD800 && i<=0xDFFF)) { + // Unpaired utf-16 surrogates are illegal. + // Observe that this verbatim not what RFC8259 §7 prescribes; + // it appears, however, to be in the spirit of RFC8259, cf. §8.2 + // The other characters are illegal if unescaped. + CHECK(!json::accept(temp)); + CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); + if (i<=0xDBFF){ + for (uint32_t j = 0xDC00; j<=0xDFFF; j++){ + temp += uint_to_utf8(j); + CHECK(!json::accept(temp)); + CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); + } + } + } else if (i<0x0020||i==0x0022||i==0x005c){ + CHECK(!json::accept(temp)); + CHECK_THROWS_AS(parser_helper(temp),json::parse_error&); + } else { + // All other characters are valid according to RFC8259 + CHECK_NOTHROW(parser_helper(temp)); + } + } + } + // escaped utf-16 surrogate pairs are accepted and parsed. + SECTION("escaped utf-16 surrogates") + { + SECTION("well-formed") + { + for (uint16_t i = 0xD800; i <= 0xDBFF; i++){ + for (uint16_t j = 0xD800; j <= 0xDFFF; j++){ + std::ostringstream temp; + temp << "\"\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << i\ + << "\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << j\ + << "\"" ; + if (j>=0xDC00){ + CHECK(json::accept(temp.str())); + CHECK_NOTHROW(parser_helper(temp.str())); + } else { + CHECK(!json::accept(temp.str())); + CHECK_THROWS_AS(parser_helper(temp.str()),json::parse_error&); + } + } + } + } + SECTION("ill-formed") + { + for (uint16_t i = 0xDC00; i <= 0xDFFF; i++){ + for (uint16_t j = 0xD800; j <= 0xDFFF; j++){ + std::ostringstream temp; + temp << "\"\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << i\ + << "\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << j\ + << "\"" ; + CHECK(!json::accept(temp.str())); + CHECK_THROWS_AS(parser_helper(temp.str()),json::parse_error&); + } + } + } + } + +} + +TEST_CASE("parse") +{ + SECTION("whitespace") + { + // leading and trailing whitespace is ignored. + CHECK(json::parse(" \"foo\" ")==json::parse("\"foo\"")); + CHECK(json::parse(" \"foo\"\t")==json::parse("\"foo\"")); + CHECK(json::parse(" \"foo\"\n")==json::parse("\"foo\"")); + CHECK(json::parse(" \"foo\"\u000d")==json::parse("\"foo\"")); + CHECK(json::parse("\t\"foo\" ")==json::parse("\"foo\"")); + CHECK(json::parse("\t\"foo\"\t")==json::parse("\"foo\"")); + CHECK(json::parse("\t\"foo\"\n")==json::parse("\"foo\"")); + CHECK(json::parse("\t\"foo\"\u000d")==json::parse("\"foo\"")); + CHECK(json::parse("\n\"foo\" ")==json::parse("\"foo\"")); + CHECK(json::parse("\n\"foo\"\t")==json::parse("\"foo\"")); + CHECK(json::parse("\n\"foo\"\n")==json::parse("\"foo\"")); + CHECK(json::parse("\n\"foo\"\u000d")==json::parse("\"foo\"")); + CHECK(json::parse("\u000d\"foo\" ")==json::parse("\"foo\"")); + CHECK(json::parse("\u000d\"foo\"\t")==json::parse("\"foo\"")); + CHECK(json::parse("\u000d\"foo\"\n")==json::parse("\"foo\"")); + CHECK(json::parse("\u000d\"foo\"\u000d")==json::parse("\"foo\"")); + } +} diff --git a/TSF/trustable/TRUSTABLE-SOFTWARE.md b/TSF/trustable/TRUSTABLE-SOFTWARE.md new file mode 100644 index 0000000000..e3f477df3a --- /dev/null +++ b/TSF/trustable/TRUSTABLE-SOFTWARE.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +This release of JSON-Library also referred in the documentation as nlohmann/json library is Trustable. diff --git a/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md b/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md new file mode 100644 index 0000000000..b1b19b5e98 --- /dev/null +++ b/TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md @@ -0,0 +1,64 @@ +#### Checklist for TA-ANALYSIS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What fraction of Expectations are covered by the test data? + + **Answer**: The two expectations are JLEX-01 and JLEX-02. Every statement supporting both of the expectations is ultimately supported by a test, except for WFJ-06. For WFJ-06 it is impossible to provide a direct tests, since this is a statement on infinitely many cases. Indirect tests are provided by the rejection of ill-formed json data. + +* What fraction of Misbehaviours are covered by the monitored indicator data? + + **Answer**: For the intended use-case, no misbehaviours have been identified. Furthermore, no indicator data are collected. + +* How confident are we that the indicator data are accurate and timely? + + **Answer**: No indicator data are collected. + +* How reliable is the monitoring process? + + **Answer**: Due to no indicator data being collected, there is no monitoring process. + +* How well does the production data correlate with our test data? + + **Answer**: Due to the general nature of the library, there are no production data. + +* Are we publishing our data analysis? + + **Answer**: Since we have no production data with which to compare our not collected indicator data or our test data, no data analysis is done, which is not published. + +* Are we comparing and analysing production data vs test? + + **Answer**: There are no production data. + +* Are our results getting better, or worse? + + **Answer**: Neither. + +* Are we addressing spikes/regressions? + **Answer**: There are no spikes in the non-existent indicator data. If a test ever fails, then the spike is investigated. The results of fuzz testing are investigated in the original nlohmann/json. + +* Do we have sensible/appropriate target failure rates? + + **Answer**: For the unit and integration tests, zero. The target failure rate of fuzz testing is not under our control. + +* Do we need to check the targets? + + **Answer**: For the unit and integration tests, no. Since the fuzz testing runs and is investigated in the original nlohmann/json, there is no need to check the target. + +* Are we achieving the targets? + + **Answer**: For the unit and integration tests, yes. The achieving of the targets for the fuzz-testing is evaluated within the original nlohmann/json. + +* Are all underlying assumptions and target conditions for the analysis specified? + + **Answer**: Since none of the unit and integration tests are expected to fail, there is no further analysis of the results besides verifying the expectation. In case any test fails ever, the failure of the CI-pipeline encourages the maintainer to investigate. + +* Have the underlying assumptions been verified using known good data? + + **Answer**: The assumption that all unit and integration tests succeed under the expected conditions is demonstrated by the non-failure of the CI-Pipeline. + +* Has the Misbehaviour identification process been verified using known bad data? + + **Answer**: Misbehaviours published on nlohmann/json usually provide minimal working examples for reproducing the faulty behaviour, enabling everyone to verify the identified misbehaviours. There is, however, no automatic process for the identification of misbehaviours. + +* Are results shown to be reproducible? + + **Answer**: It is expected that the tests can be reproduced on every modern sufficiently powerful machine. diff --git a/TSF/trustable/assertions/TA-ANALYSIS.md b/TSF/trustable/assertions/TA-ANALYSIS.md new file mode 100644 index 0000000000..0b42206cf1 --- /dev/null +++ b/TSF/trustable/assertions/TA-ANALYSIS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-ANALYSIS-CHECKLIST.md +--- + +Collected data from tests and monitoring of deployed software in eclipse-score/inc_nlohmann_json is analysed according to specified objectives. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md b/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md new file mode 100644 index 0000000000..bfd9dfbb66 --- /dev/null +++ b/TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md @@ -0,0 +1,50 @@ +#### Checklist for TA-BEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of Expectations varied over time? + + **Answer**: The list of expectations is taken from [here](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html), whose development can be retraced using git. + +* How confident can we be that this list is comprehensive? + + **Answer**: The list of expectations has been collected amongst the stakeholders in S-CORE, so that we are very confident that the list is comprehensive. + The expectation to serialize user data into JSON format + +* Could some participants have incentives to manipulate information? + + **Answer**: We can not imagine any reason. + +* Could there be whole categories of Expectations still undiscovered? + + **Answer**: It is unlikely, but the parsing of CBOR could become relevant at some time. + +* Can we identify Expectations that have been understood but not specified? + + **Answer**: No. + +* Can we identify some new Expectations, right now? + + **Answer**: No. + +* How confident can we be that this list covers all critical requirements? + + **Answer**: We can not think of any more critical requirement of a JSON parser in the sense of RFC8259 than to parse JSON data in the sense of RFC8259. + +* How comprehensive is the list of tests? + + **Answer**: Currently, the branch coverage is 93.865% and the line coverage is 99.186%, cf. JLS-27. + +* Is every Expectation covered by at least one implemented test? + + **Answer**: Yes, both of the expectations are covered by at least one implemented test. Moreover, each statement supporting the expectations is covered by a test with the exception of WFJ-06. + +* Are there any Expectations where we believe more coverage would help? + + **Answer**: No. + +* How do dependencies affect Expectations, and are their properties verifiable? + + **Answer**: The library nlohmann/json does not have external dependencies, so that there are in particular none that affect Expectations. + +* Are input analysis findings from components, tools, and data considered in relation to Expectations? + + **Answer**: No findings have been found. diff --git a/TSF/trustable/assertions/TA-BEHAVIOURS.md b/TSF/trustable/assertions/TA-BEHAVIOURS.md new file mode 100644 index 0000000000..e2b8b5c3ed --- /dev/null +++ b/TSF/trustable/assertions/TA-BEHAVIOURS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-BEHAVIOURS-CHECKLIST.md +--- + +Expected or required behaviours for the nlohmann/json library are identified, specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md b/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md new file mode 100644 index 0000000000..0160733b07 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md @@ -0,0 +1,17 @@ +#### Checklist for TA-CONFIDENCE from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* What is the algorithm for combining/comparing the scores? + + **Answer**: It is the standard algorithm of trudag. + +* How confident are we that this algorithm is fit for purpose? + + **Answer**: We have no reason to assume that the standard algorithm is not fit for our purpose. + +* What are the trends for each score? + + **Answer**: CAN NOT BE ANSWERED NOW + +* How well do our scores correlate with external feedback signals? + + **Answer**: CAN NOT BE ANSWERED NOW \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-CONFIDENCE.md b/TSF/trustable/assertions/TA-CONFIDENCE.md new file mode 100644 index 0000000000..54e0006966 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONFIDENCE.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-CONFIDENCE-CHECKLIST.md +--- + +Confidence in the nlohmann/json library is measured based on results of analysis. diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md b/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md new file mode 100644 index 0000000000..2c123d8ee7 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md @@ -0,0 +1,29 @@ +#### Checklist for TA-CONSTRAINTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Are the constraints grounded in realistic expectations, backed by real-world examples? + + **Answer**: The constraints originate from S-CORE (e.g. AOU-04, AOU-05, AOU-07, AOU-21), the standard RFC-8259 (e.g. AOU-05, AOU-20, AOU-21) and the library nlohmann/json itself (AOU-06, AOU-20) in order to ensure that the expectations are met. + +* Do they effectively guide downstream consumers in expanding upon existing Statements? + + **Answer**: ????? + +* Do they provide clear guidance for upstreams on reusing components with well-defined claims? + + **Answer**: ????? + +* Are any Statements explicitly designated as not reusable or adaptable? + + **Answer**: No statement has been intentionally designated as not reusable or adaptable. + +* Are there worked examples from downstream or upstream users demonstrating these constraints in practice? + + **Answer**: ???? + +* Have there been any documented misunderstandings from users, and are these visibly resolved? + + **Answer**: Yes, it is documented that the [brace initialisation](https://json.nlohmann.me/home/faq/) (cf. AOU-06) regularly leads to confusion, cf. [here](https://github.com/nlohmann/json/issues/4898). + +* Do external users actively keep up with updates, and are they properly notified of any changes? + + **Answer**: External users of the library are not necessarily automatically notified of an update, and are neither assumed nor required to keep up to date. If the external user forks the github repository, however, then github shows automatically whenever the upstream changes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-CONSTRAINTS.md b/TSF/trustable/assertions/TA-CONSTRAINTS.md new file mode 100644 index 0000000000..6db0e6a836 --- /dev/null +++ b/TSF/trustable/assertions/TA-CONSTRAINTS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-CONSTRAINTS-CHECKLIST.md +--- + +Constraints on adaptation and deployment of eclipse-score/inc_nlohmann_json are specified. diff --git a/TSF/trustable/assertions/TA-DATA-CHECKLIST.md b/TSF/trustable/assertions/TA-DATA-CHECKLIST.md new file mode 100644 index 0000000000..a0830a0fd2 --- /dev/null +++ b/TSF/trustable/assertions/TA-DATA-CHECKLIST.md @@ -0,0 +1,41 @@ +#### Checklist for TA-DATA from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Is all test data stored with long-term accessibility? + + **Answer**: If we assume that github is long-term accessible, then yes. + +* Is all monitoring data stored with long-term accessibility? + + **Answer**: There are no monitoring data. + +* Are extensible data models implemented? + + **Answer**: The data are stored in an sqlite database. + +* Is sensitive data handled correctly (broadcasted, stored, discarded, or anonymised) with appropriate encryption and redundancy? + + **Answer**: There are no sensitive data produced, collected or stored. + +* Are proper backup mechanisms in place? + + **Answer**: Not more than the default mechanisms of github. + +* Are storage and backup limits tested? + + **Answer**: No. + +* Are all data changes traceable? + + **Answer**: Yes, due to the usage of github. + +* Are concurrent changes correctly managed and resolved? + + **Answer**: Yes, due to the usage of github. + +* Is data accessible only to intended parties? + + **Answer**: Since the library is open source, there are no unintended parties. + +* Are any subsets of our data being published? + + **Answer**: Yes, the collected data are publicly available. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-DATA.md b/TSF/trustable/assertions/TA-DATA.md new file mode 100644 index 0000000000..53443a5f65 --- /dev/null +++ b/TSF/trustable/assertions/TA-DATA.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-DATA-CHECKLIST.md +--- + +Data in eclipse-score/inc_nlohmann_json is collected from tests, and from monitoring of deployed software, according to specified objectives. diff --git a/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md b/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md new file mode 100644 index 0000000000..aa0d619c07 --- /dev/null +++ b/TSF/trustable/assertions/TA-FIXES-CHECKLIST.md @@ -0,0 +1,62 @@ +#### Checklist for TA-FIXES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + + +* How many faults have we identified in XYZ? + + **Answer**: There are no identifiable faults concerning the expectations. + +* How many unknown faults remain to be found, based on the number that have been processed so far? + + **Answer**: It is unlikely that there are unknown faults concerning the expectations. + +* Is there any possibility that people could be motivated to manipulate the lists (e.g. bug bonus or pressure to close). + + **Answer**: Since the project is entirely open source, it is quite unlikely. + +* How many faults may be unrecorded (or incorrectly closed, or downplayed)? + + **Answer**: There may be none, at least when it concerns the expectations. + +* How do we collect lists of bugs and known vulnerabilities from components? + + **Answer**: We pull the list from the issues reported to nlohmann/json labelled as bug and open or opened since the last release. This list is then stored using github, thereby enabling a traceability of the list. + +* How (and how often) do we check these lists for relevant bugs and known vulnerabilities? + + **Answer**: Whenever we generate the documentation, the list is pulled. If there is an issue previously unrecorded, then the maintainer is encouraged by the change of the trustable score to check this issue on applicability. + +* How confident can we be that the lists are honestly maintained? + + **Answer**: We can not imagine a reason why the list could be dishonestly maintained. + +* Could some participants have incentives to manipulate information? + + **Answer**: We can not think of a reason why. + +* How confident are we that the lists are comprehensive? + + **Answer**: We have no reason to assume that discovered bugs are not reported to nlohmann/json. + +* Could there be whole categories of bugs/vulnerabilities still undiscovered? + + **Answer**: There could be a mislabelling of issues, but it is unlikely that there are bugs or vulnerabilities not labelled as bug, instead it is likely that perceived issues due to a misunderstanding of how the library works are labelled as bug. + +* How effective is our triage/prioritisation? + + **Answer**: ????? Since it is not intended to fix the library within S-CORE, but instead leave the development to the original nlohmann/json, there is no need to have a triage or prioritisation. + +* How many components have never been updated? + + **Answer**: None, the single component is up to date. + +* How confident are we that we could update them? + + **Answer**: If nlohmann/json would release an new version, we are very confident that we can update to that version. + +* How confident are we that outstanding fixes do not impact our Expectations? + + **Answer**: We have not found any outstanding fixes impacting our expectations. + +* How confident are we that outstanding fixes do not address Misbehaviours? + + **Answer**: For all of the none identified misbehaviours, we are very confident that none of the outstanding fixes do not address them. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-FIXES.md b/TSF/trustable/assertions/TA-FIXES.md new file mode 100644 index 0000000000..bf0111cf83 --- /dev/null +++ b/TSF/trustable/assertions/TA-FIXES.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-FIXES-CHECKLIST.md +--- + +In the nlohmann/json repository, known bugs or misbehaviours are analysed and triaged, and critical fixes or mitigations are implemented or applied. diff --git a/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md b/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md new file mode 100644 index 0000000000..0ca7405667 --- /dev/null +++ b/TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md @@ -0,0 +1,49 @@ +#### Checklist for TA-INDICATORS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How appropriate/thorough are the analyses that led to the indicators? + + **Answer**: Since no misbehaviours for the use of the library for parsing and verification of JSON data according to RFC8259 have been identified, no warning indicators are implemented. + +* How confident can we be that the list of indicators is comprehensive? + + **Answer**: There are no warning indicators implemented, of which we are very confident. + +* Could there be whole categories of warning indicators still missing? + + **Answer**: Yes, there could. Within S-CORE, however, any warning indicator that is not natively implemented within the original nlohmann/json should be implemented in the wrapper defining the interface between the library and the project using it. + +* How has the list of advance warning indicators varied over time? + + **Answer**: It has stayed constant. + +* How confident are we that the indicators are leading/predictive? + + **Answer**: There are none. + +* Are there misbehaviours that have no advance warning indicators? + + **Answer**: There are no misbehaviours identified. + +* Can we collect data for all indicators? + + **Answer**: There are currently no implemented indicators, so that no data are collected. + +* Are the monitoring mechanisms used included in our Trustable scope? + + **Answer**: No, but there are also none. + +* Are there gaps or trends in the data? + + **Answer**: There are no data where gaps or trends could be identified. + +* If there are gaps or trends, are they analysed and addressed? + + **Answer**: There are no data. + +* Is the data actually predictive/useful? + + **Answer**: There are no data. + +* Are indicators from code, component, tool, or data inspections taken into consideration? + + **Answer**: There are no indicators. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-INDICATORS.md b/TSF/trustable/assertions/TA-INDICATORS.md new file mode 100644 index 0000000000..d97301176f --- /dev/null +++ b/TSF/trustable/assertions/TA-INDICATORS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-INDICATORS-CHECKLIST.md +--- + +In eclipse-score/inc_nlohmann_json, advanced warning indicators for misbehaviours are identified, and monitoring mechanisms are specified, verified and validated based on analysis. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md b/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md new file mode 100644 index 0000000000..d624636ceb --- /dev/null +++ b/TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md @@ -0,0 +1,63 @@ +#### Checklist for TA-INPUTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +The single_include/nlohmann/json.hpp is the single and only component of the library. + +* Are there components that are not on the list? + + **Answer**: No. + +* Are there assessments for all components? + + **Answer**: ????? + +* Has an assessment been done for the current version of the component? + + **Answer**: ????? + +* Have sources of bug and/or vulnerability data been identified? + + **Answer**: There are no bug and/or vulnerability data. + +* Have additional tests and/or Expectations been documented and linked to component assessment? + + **Answer**: ?????? + +* Are component tests run when integrating new versions of components? + + **Answer**: There are no further components. + +* Are there tools that are not on the list? + + **Answer**: The library does not use external tools, except for the tools provided by the C++ standard library. + +* Are there impact assessments for all tools? + + **Answer**: ?????? The library does not use external tools for which an impact assessment has to be done. + +* Have tools with high impact been qualified? + + **Answer**: There are no tools with high impact. + +* Were assessments or reviews done for the current tool versions? + + **Answer**: ????? The library does not use external tools for which an impact assessment has to be done. + +* Have additional tests and/or Expectations been documented and linked to tool assessments? + + **Answer**: No. + +* Are tool tests run when integrating new versions of tools? + + **Answer**: The library does not use external tools for which a new version needs to be integrated. + +* Are tool and component tests included in release preparation? + + **Answer**: Yes, the tests of the library are included in the release. + +* Can patches be applied, and then upstreamed for long-term maintenance? + + **Answer**: Yes, if ever a misbehaviour is found and patched, then a pull-request to the original nlohmann/json repository can be opened to upstream the changes. + +* Do all dependencies comply with acceptable licensing terms? + + **Answer**: Yes, the library is licensed under MIT License . \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-INPUTS.md b/TSF/trustable/assertions/TA-INPUTS.md new file mode 100644 index 0000000000..93013745c1 --- /dev/null +++ b/TSF/trustable/assertions/TA-INPUTS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-INPUTS-CHECKLIST.md +--- + +All inputs to the nlohmann/json library are assessed, to identify potential risks and issues. diff --git a/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md b/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md new file mode 100644 index 0000000000..5a26f8b75e --- /dev/null +++ b/TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md @@ -0,0 +1,21 @@ +#### Checklist for TA-ITERATIONS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How much of the software is provided as binary only, expressed as a fraction of the BoM list? + + **Answer**: None. + +* How much is binary, expressed as a fraction of the total storage footprint? + + **Answer**: None. + +* For binaries, what claims are being made and how confident are we in the people/organisations making the claims? + + **Answer**: There are no binaries. + +* For third-party source code, what claims are we making, and how confident are we about these claims? + + **Answer**: There is no third-party source code in the library. + +* For software developed by us, what claims are we making, and how confident are we about these claims? + + **Answer**: This is the remainder of the documentation. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-ITERATIONS.md b/TSF/trustable/assertions/TA-ITERATIONS.md new file mode 100644 index 0000000000..378dbda92b --- /dev/null +++ b/TSF/trustable/assertions/TA-ITERATIONS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-ITERATIONS-CHECKLIST.md +--- + +All constructed iterations of the nlohmann/json library include source code, build instructions, tests, results and attestations. diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md b/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md new file mode 100644 index 0000000000..101fedf5b4 --- /dev/null +++ b/TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md @@ -0,0 +1,43 @@ +#### Checklist for TA-METHODOLOGIES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +This project follows purely the Methodologies of Eclipse S-CORE. + +* Are the identified gaps documented clearly to justify using a manual process? + + **Answer**: + +* Are the goals for each process clearly defined? + + **Answer**: + +* Is the sequence of procedures documented in an unambiguous manner? + + **Answer**: + +* Can improvements to the processes be suggested and implemented? + + **Answer**: + +* How frequently are processes changed? + + **Answer**: + +* How are changes to manual processes communicated? + + **Answer**: + +* Are there any exceptions to the processes? + + **Answer**: + +* How is evidence of process adherence recorded? + + **Answer**: + +* How is the effectiveness of the process evaluated? + + **Answer**: + +* Is ongoing training required to follow these processes? + + **Answer**: \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-METHODOLOGIES.md b/TSF/trustable/assertions/TA-METHODOLOGIES.md new file mode 100644 index 0000000000..4773814954 --- /dev/null +++ b/TSF/trustable/assertions/TA-METHODOLOGIES.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-METHODOLOGIES-CHECKLIST.md +--- + +Manual methodologies applied for the nlohmann/json library by contributors, and their results, are managed according to specified objectives. diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md new file mode 100644 index 0000000000..773a481558 --- /dev/null +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md @@ -0,0 +1,49 @@ +#### Checklist for TA-MISBEHAVIOURS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How has the list of misbehaviours varied over time? + + **Answer**: The list of misbehaviours is collected using github and its development is thereby understandable. + +* How confident can we be that this list is comprehensive? + + **Answer**: Due to the collaborative nature of the open source community, we deem it quite unlikely that there are any known misbehaviours which are not reported to the repository nlohmann/json. + +* How well do the misbehaviours map to the expectations? + + **Answer**: There are no identified misbehaviours that tangent the expectations. + +* Could some participants have incentives to manipulate information? + + **Answer**: We could not think of an incentive that any collaborateur could have to manipulate the information. + +* Could there be whole categories of misbehaviours still undiscovered? + + **Answer**: Due to the wide use and long-standing development of the library it is quite unlikely that any major misbehaviors, in particular regarding the parsing and validating of JSON data in the sense of RFC-8259, is undiscovered. + +* Can we identify misbehaviours that have been understood but not specified? + + **Answer**: No. + +* Can we identify some new misbehaviours, right now? + + **Answer**: No. + +* Is every misbehaviour represented by at least one fault induction test? + + **Answer**: Since there are no misbehaviours that concern the use within S-CORE, no. + +* Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately? + + **Answer**: ?????? No. + +* Are all the fault induction results actually collected? + + **Answer**: ?????? No. + +* Are the results evaluated? + + **Answer**: ?????? No. + +* Do input analysis findings on verifiable tool or component claims and features identify additional misbehaviours or support existing mitigations? + + **Answer**: Currently, there is no analysis which identifies additional misbehaviours. The only such analysis is indirectly via the analysis of the fuzz testing, which currently does not identifies additional misbehaviours. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-MISBEHAVIOURS.md b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md new file mode 100644 index 0000000000..e052a440bc --- /dev/null +++ b/TSF/trustable/assertions/TA-MISBEHAVIOURS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-MISBEHAVIOURS-CHECKLIST.md +--- + +Prohibited misbehaviours for the nlohmann/json library are identified, and mitigations are specified, verified and validated based on analysis. diff --git a/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md b/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md new file mode 100644 index 0000000000..ece1bc453f --- /dev/null +++ b/TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-RELEASES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that all components are taken from within our controlled environment? + + **Answer**: This library does not take anything from outside of this repository. + +* How confident are we that all of the tools we are using are also under our control? + + **Answer**: The version of nlohmann/json that is documented with this documentation is under the full control of the Eclipse S-CORE organisation. + +* Are our builds repeatable on a different server, or in a different context? + + **Answer**: Since there is no "build" of the header-only library, yes. + +* How sure are we that our builds don't access the internet? + + **Answer**: There is no implemented access to the internet in the library itself. The testsuite is downloaded from a within Eclipse S-CORE. + +* How many of our components are non-reproducible? + + **Answer**: The single component is reproducible. + +* How confident are we that our reproducibility check is correct? + + **Answer**: Quite. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-RELEASES.md b/TSF/trustable/assertions/TA-RELEASES.md new file mode 100644 index 0000000000..5cf9412fe9 --- /dev/null +++ b/TSF/trustable/assertions/TA-RELEASES.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-RELEASES-CHECKLIST.md +--- + +Construction of releases for the nlohmann/json library is fully repeatable and the results are fully reproducible, with any exceptions documented and justified. diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md new file mode 100644 index 0000000000..dbb2e3f0a3 --- /dev/null +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-SUPPLY_CHAIN from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Could there be other components, missed from the list? + + **Answer**: Since the library does not contain any external components, no. + +* Does the list include all toolchain components? + + **Answer**: Since the library does not contain any external components, yes. + +* Does the toolchain include a bootstrap? + + **Answer**: ???? No. + +* Could the content of a mirrored project be compromised by an upstream change? + + **Answer**: Since the library does not contain any external components, no. + +* Are mirrored projects up to date with the upstream project? + + **Answer**: Yes, the library is up to date with the most recent release of the original nlohmann/json + +* Are mirrored projects based on the correct upstream? + + **Answer**: Yes. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md new file mode 100644 index 0000000000..e585a17752 --- /dev/null +++ b/TSF/trustable/assertions/TA-SUPPLY_CHAIN.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-SUPPLY_CHAIN-CHECKLIST.md +--- + +All sources and tools for the nlohmann/json library are mirrored in our controlled environment. diff --git a/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md b/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md new file mode 100644 index 0000000000..8f05b60bdd --- /dev/null +++ b/TSF/trustable/assertions/TA-TESTS-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-TESTS from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* How confident are we that our test tooling and environment setups used for tests, fault inductions, and analyses are reproducible? + + **Answer**: The test can be reproduced any time on any machine running the versions of the operating systems and compilers as provided (TODO, cf. AOU-14) + +* Are any exceptions identified, documented and justified? + + **Answer**: To the best of our understanding, there are no exceptions identified. + +* How confident are we that all test components are taken from within our controlled environment? + + **Answer**: All tests are either self-contained or download test data from [within Eclipse S-CORE](https://github.com/eclipse-score/inc_nlohmann_json/tree/json_test_data_version_3_1_0_mirror). + +* How confident are we that all of the test environments we are using are also under our control? + + **Answer**: ???? The environments are standard docker images of ubuntu and standard versions of compilers. + +* Do we record all test environment components, including hardware and infrastructure used for exercising tests and processing input/output data? + + **Answer**: No, since the tests are independent from hard-ware, these data are not collected. + +* How confident are we that all tests scenarios are repeatable? + + **Answer**: All test scenarios are repeated daily in the CI pipeline. \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-TESTS.md b/TSF/trustable/assertions/TA-TESTS.md new file mode 100644 index 0000000000..7269be53e3 --- /dev/null +++ b/TSF/trustable/assertions/TA-TESTS.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-TESTS-CHECKLIST.md +--- + +All tests for the nlohmann/json library, and its build and test environments, are constructed from controlled/mirrored sources and are reproducible, with any exceptions documented. diff --git a/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md b/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md new file mode 100644 index 0000000000..482d412201 --- /dev/null +++ b/TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md @@ -0,0 +1,25 @@ +#### Checklist for TA-UPDATES from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +* Where are the change and configuration management controls specified? + + **Answer**: WIP + +* Are these controls enforced for all of components, tools, data, documentation and configurations? + + **Answer**: The S-CORE Methodology is followed, compliance with which enforces the change process to be followed. + +* Are there any ways in which these controls can be subverted, and have we mitigated them? + + **Answer**: Yes, the change process can just not be followed. We have no real method to enforce it other than to trust that the committers follow the S-CORE processes. + +* Does change control capture all potential regressions? + + **Answer**: Due to the test coverage of 99.186%, it is unlikely that a potential regression is not captured. + +* Is change control timely enough? + + **Answer**: Not applicable, as far as can be understood right now, there is no immanent need to keep the library up to date. + +* Are all guidance and checks understandable and consistently followed? + + **Answer**: WIP \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-UPDATES.md b/TSF/trustable/assertions/TA-UPDATES.md new file mode 100644 index 0000000000..0113c23a4f --- /dev/null +++ b/TSF/trustable/assertions/TA-UPDATES.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-UPDATES-CHECKLIST.md +--- + +nlohmann/json library components, configurations and tools are updated under specified change and configuration management controls. diff --git a/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md b/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md new file mode 100644 index 0000000000..8e23bae526 --- /dev/null +++ b/TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md @@ -0,0 +1,55 @@ +#### Checklist for TA-VALIDATION from [Codethink](https://codethinklabs.gitlab.io/trustable/trustable/print_page.html) + +I DO NOT FEEL CONFIDENT TO **Answer** THIS! + +* Is the selection of tests correct? + + **Answer**: ???? Who could tell this? + +* Are the tests executed enough times? + + **Answer**: ???? Define "enough times" + +* How confident are we that all test results are being captured? + + **Answer**: ???? How fine-grained is a test-result supposed to be? + +* Can we look at any individual test result, and establish what it relates to? + + **Answer**: ???? + +* Can we trace from any test result to the expectation it relates to? + + **Answer**: No, there are more tests than expectations, and in particular tests that relate to the inner workings of the library which are not used by S-CORE. + +* Can we identify precisely which environment (software and hardware) were used? + + **Answer**: ???? How precisely shall that be? Moreover, the tests are supposed to run independent of underlying hardware, since this is a software. + +* How many pass/fail results would be expected, based on the scheduled tests? + + **Answer**: Zero fails. + +* Do we have all of the expected results? + + **Answer**: Yes. + +* Do we have time-series data for all of those results? + + **Answer**: Yes, there are time-series data. + +* If there are any gaps, do we understand why? + + **Answer**: ???? Define gaps + +* Are the test validation strategies credible and appropriate? + + **Answer**: ???? Define test validation strategies + +* What proportion of the implemented tests are validated? + + **Answer**: ???? None. + +* Have the tests been verified using known good and bad data? + + **Answer**: ???? \ No newline at end of file diff --git a/TSF/trustable/assertions/TA-VALIDATION.md b/TSF/trustable/assertions/TA-VALIDATION.md new file mode 100644 index 0000000000..e10fc5302b --- /dev/null +++ b/TSF/trustable/assertions/TA-VALIDATION.md @@ -0,0 +1,9 @@ +--- +level: 1.1 +normative: true +references: + - type: checklist + path: ./TSF/trustable/assertions/TA-VALIDATION-CHECKLIST.md +--- + +All specified tests are executed repeatedly, under defined conditions in controlled environments, according to specified objectives. (To revisit) \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-01.md b/TSF/trustable/assumptions-of-use/AOU-01.md new file mode 100644 index 0000000000..d53bae1616 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-01.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall report problems with eclipse-score/inc_nlohmann_json's implementation to the upstream nlohmann/json repository whenever a problem is detected. diff --git a/TSF/trustable/assumptions-of-use/AOU-02.md b/TSF/trustable/assumptions-of-use/AOU-02.md new file mode 100644 index 0000000000..d859996ad7 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-02.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the build environment used for eclipse-score/inc_nlohmann_json is supplied with consistent dependencies in every integrating system. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-03.md b/TSF/trustable/assumptions-of-use/AOU-03.md new file mode 100644 index 0000000000..f60d9002df --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-03.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that integrator-controlled mirrors of the dependencies of the nlohmann/json repository are persistently and accessibly stored as long as the nlohmann/json library is used within eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-04.md b/TSF/trustable/assumptions-of-use/AOU-04.md new file mode 100644 index 0000000000..296999d512 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-04.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that exceptions are properly handled or turned off in eclipse-score/inc_nlohmann_json, whenever eclipse-score/inc_nlohmann_json's implementation of nlohmann/json is used. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-04_CONTEXT.md b/TSF/trustable/assumptions-of-use/AOU-04_CONTEXT.md new file mode 100644 index 0000000000..2cebb69f0b --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-04_CONTEXT.md @@ -0,0 +1,10 @@ +--- +level: 1.1 +normative: false +--- + +All exceptions (json::parse_error, json::invalid_iterator, json::type_error, json::out_of_range, json::other_error) inherit from json::exception. + +The nlohman/json library uses JSON_TRY, JSON_CATCH, etc., marcos instead of the exception keywords try, catch, etc., which may be overwritten to suppress exceptions. Each keyword can be individually overwritten (e.g. #define JSON_THROW(exception) std::abort()) or you can set (#define JSON_NOEXCEPTION) which leads to suppressing exceptions. + +Alternatively, the accept function may first be used to check if the JSON is valid since the accept function only throws an exception for an empty input. In case of invalid JSON, false is returned and no exception. The parse function also has a parameter allow_exceptions to turn off parse error exceptions. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-05.md b/TSF/trustable/assumptions-of-use/AOU-05.md new file mode 100644 index 0000000000..5c2e005d6c --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-05.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that in eclipse-score/inc_nlohmann_json, input is encoded as UTF-8 (as required by RFC8259) and that in case other string formats are used, thrown exceptions are properly handled. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-06.md b/TSF/trustable/assumptions-of-use/AOU-06.md new file mode 100644 index 0000000000..2cf025d440 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-06.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that in eclipse-score/inc_nlohmann_json brace initialization (e.g. json j{true};) is not used with the types basic_json, json, or ordered_json, unless an object or array is created. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-07.md b/TSF/trustable/assumptions-of-use/AOU-07.md new file mode 100644 index 0000000000..4b0a38917c --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-07.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure in eclipse-score/inc_nlohmann_json that exceptions, which are expected during parsing with default parameters, are properly handled whenever the input is no valid JSON. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-08.md b/TSF/trustable/assumptions-of-use/AOU-08.md new file mode 100644 index 0000000000..f59e599442 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-08.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that all necessary source files and built tools are mirrored in eclipse-score/inc_nlohmann_json, e.g. using a built server without internet access, as long as nlohmann/json is actively used within eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-09.md b/TSF/trustable/assumptions-of-use/AOU-09.md new file mode 100644 index 0000000000..b2fb8ac3d8 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-09.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure inside eclipse-score/inc_nlohmann_json that advanced warning indicators for misbehaviours are identified, and monitoring mechanisms are specified, verified and validated based on analysis. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-10.md b/TSF/trustable/assumptions-of-use/AOU-10.md new file mode 100644 index 0000000000..1480e917a0 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-10.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall, whenever possible, turn any remaining Assumptions-of-Use (AOU) items into statements and add suitable references and/or validators. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-11.md b/TSF/trustable/assumptions-of-use/AOU-11.md new file mode 100644 index 0000000000..51946413ac --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-11.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall, whenever possible, replace outdated and/or provide additional references and validators that would further improve the trustability of a statement. diff --git a/TSF/trustable/assumptions-of-use/AOU-14.md b/TSF/trustable/assumptions-of-use/AOU-14.md new file mode 100644 index 0000000000..943c8dddef --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-14.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the eclipse-score/inc_nlohmann_json is built with tools from the provided matrix specification, whenever nlohmann/json is used within eclipse-score/inc_nlohmann_json. (not yet provided) \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-15.md b/TSF/trustable/assumptions-of-use/AOU-15.md new file mode 100644 index 0000000000..3b0a6d5cff --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-15.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall maintain mirrors for all code and tools utilized in testing as long as nlohmann/json is actively used within eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-16.md b/TSF/trustable/assumptions-of-use/AOU-16.md new file mode 100644 index 0000000000..63452376b8 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-16.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall use C++ versions and compilers that are tested in the CI pipeline, whenever nlohmann/json is used within eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-17.md b/TSF/trustable/assumptions-of-use/AOU-17.md new file mode 100644 index 0000000000..fe86876933 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-17.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall identify misbehaviours for the nlohmann/json library, define appropriate mitigations, and ensure that these mitigations are thoroughly validated, whenever using eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-18.md b/TSF/trustable/assumptions-of-use/AOU-18.md new file mode 100644 index 0000000000..c8c2f9dfce --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-18.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that monitoring data from deployed software is accurately captured, securely stored, and well-documented for analysis within eclipse-score/inc_nlohmann_json, as long as the nlohmann/json library is actively used within eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-19.md b/TSF/trustable/assumptions-of-use/AOU-19.md new file mode 100644 index 0000000000..ad3b960223 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-19.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall analyze monitoring data systematically to detect trends and identify issues, as long as the nlohmann/json library is actively used within eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-20.md b/TSF/trustable/assumptions-of-use/AOU-20.md new file mode 100644 index 0000000000..fab26e4e2b --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-20.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the keys within an object are unique, whenever an object is to be parsed by eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-21.md b/TSF/trustable/assumptions-of-use/AOU-21.md new file mode 100644 index 0000000000..290b090818 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-21.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that a string does not contain escaped unpaired utf-16 surrogate characters, and that exceptions are properly handled in eclipse-score/inc_nlohmann_json, whenever a string is to be parsed. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-22.md b/TSF/trustable/assumptions-of-use/AOU-22.md new file mode 100644 index 0000000000..7c3ddd0638 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-22.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that numbers are written in base 10, and that exceptions and misbehaviours in case that any other base is used are properly handled and mitigated within eclipse-score/inc_nlohmann_json, whenever a number is parsed. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-23.md b/TSF/trustable/assumptions-of-use/AOU-23.md new file mode 100644 index 0000000000..63f6aa1d64 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-23.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that data are complete and error-free, whenever they are transmitted to eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-24.md b/TSF/trustable/assumptions-of-use/AOU-24.md new file mode 100644 index 0000000000..94935b2586 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-24.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the data do not change during reading, whenever transmitted to eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-25.md b/TSF/trustable/assumptions-of-use/AOU-25.md new file mode 100644 index 0000000000..c2580d30ba --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-25.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall convince themselves that the behaviour of the used C++ standard library is known, verified and validated. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-26.md b/TSF/trustable/assumptions-of-use/AOU-26.md new file mode 100644 index 0000000000..ba8aa1bac6 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-26.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall convince themselves that the misbehaviours of the C++ standard library and mitigations are known, verified and validated. \ No newline at end of file diff --git a/TSF/trustable/assumptions-of-use/AOU-27.md b/TSF/trustable/assumptions-of-use/AOU-27.md new file mode 100644 index 0000000000..8587ba4304 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-27.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the 'Release management' and 'Update concepts' in TSF/README.md are followed whenever any changes are done in eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-28.md b/TSF/trustable/assumptions-of-use/AOU-28.md new file mode 100644 index 0000000000..702cddfb56 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-28.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall ensure that the known open bugs of the nlohmann/json repository are regularly reviewed on their impact on the use of the documented version of nlohmann/json, as long as the nlohmann/json library is actively used within eclipse-score/inc_nlohmann_json. diff --git a/TSF/trustable/assumptions-of-use/AOU-29.md b/TSF/trustable/assumptions-of-use/AOU-29.md new file mode 100644 index 0000000000..aeeb503b94 --- /dev/null +++ b/TSF/trustable/assumptions-of-use/AOU-29.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The integrator shall check the security tab in the GitHub UI on a regular basis, analyze and either fix or dismiss any outstanding CVEs. \ No newline at end of file diff --git a/TSF/trustable/expectations/JLEX-01.md b/TSF/trustable/expectations/JLEX-01.md new file mode 100644 index 0000000000..e1b6cffcce --- /dev/null +++ b/TSF/trustable/expectations/JLEX-01.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for single inputs" + overload: 1 + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for iterator inputs" + overload: 2 + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for input buffer" + overload: 3 + - type: function_reference + name: "parser::accept" + path: "include/nlohmann/detail/input/parser.hpp" + description: "the internal `accept`-functionality called by basic_json::accept" + - type: function_reference + name: "parser::sax_parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::accept" + - type: function_reference + name: "parser::sax_parse_internal" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::sax_parse" + - type: function_reference + name: "lexer::scan" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "scans input, called in parser::sax_parse_internal" +--- + +The requirement regarding [JSON Validation](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html#comp_req__json__validation) is fulfilled. \ No newline at end of file diff --git a/TSF/trustable/expectations/JLEX-02.md b/TSF/trustable/expectations/JLEX-02.md new file mode 100644 index 0000000000..d76809b506 --- /dev/null +++ b/TSF/trustable/expectations/JLEX-02.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "basic_json::parse" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `parse`-functionality of nlohmann/json for single inputs" + overload: 1 + - type: function_reference + name: "basic_json::parse" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `parse`-functionality of nlohmann/json for iterator inputs" + overload: 2 + - type: function_reference + name: "basic_json::parse" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `parse`-functionality of nlohmann/json for input buffer" + overload: 3 + - type: function_reference + name: "parser::parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "the internal `parse`-functionality called by basic_json::parse" + - type: function_reference + name: "parser::sax_parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::parse" + - type: function_reference + name: "parser::sax_parse_internal" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::sax_parse" + - type: function_reference + name: "lexer::scan" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "scans input, called in parser::sax_parse_internal" +--- + +The requirement regarding [JSON Deserialization](https://eclipse-score.github.io/score/main/modules/baselibs/json/docs/requirements/index.html#comp_req__json__deserialization) is fulfilled. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-01.md b/TSF/trustable/no-json-faults/NJF-01.md new file mode 100644 index 0000000000..2036272639 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-01.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + path: "TSF/tests/unit-class_parser_core.cpp" + name: "parser class - core;accept;null" + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/y_structure_lonely_null.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_structure_lonely_null.json" + description: "" + - type: function_reference + name: "lexer::scan_literal" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function to verify whether a candidate literal coincides with its expected value; here called with literal_text = ['n','u','l','l']." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts the literal name null. diff --git a/TSF/trustable/no-json-faults/NJF-02.md b/TSF/trustable/no-json-faults/NJF-02.md new file mode 100644 index 0000000000..5ee93f1079 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-02.md @@ -0,0 +1,42 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;true" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "deserialization;contiguous containers;directly" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/y_structure_lonely_true.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_structure_lonely_true.json" + description: "" + - type: function_reference + name: "lexer::scan_literal" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function to verify whether a candidate literal coincides with its expected value; here called with literal_text = ['t','r','u','e']." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - deserialization +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts the literal name true. diff --git a/TSF/trustable/no-json-faults/NJF-03.md b/TSF/trustable/no-json-faults/NJF-03.md new file mode 100644 index 0000000000..6c7b407deb --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-03.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;false" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/y_structure_lonely_false.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_structure_lonely_false.json" + description: "" + - type: function_reference + name: "lexer::scan_literal" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function to verify whether a candidate literal coincides with its expected value; here called with literal_text = ['f','a','l','s','e']." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts the literal name false. diff --git a/TSF/trustable/no-json-faults/NJF-04.md b/TSF/trustable/no-json-faults/NJF-04.md new file mode 100644 index 0000000000..7985dfaad8 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-04.md @@ -0,0 +1,54 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;parse errors (accept)" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/n_incomplete_false.json" + - "/nst_json_testsuite/test_parsing/n_incomplete_null.json" + - "/nst_json_testsuite/test_parsing/n_incomplete_true.json" + - "/nst_json_testsuite/test_parsing/n_structure_number_with_trailing_garbage.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_incomplete_false.json" + - "/nst_json_testsuite2/test_parsing/n_incomplete_null.json" + - "/nst_json_testsuite2/test_parsing/n_incomplete_true.json" + - "/nst_json_testsuite2/test_parsing/n_structure_capitalized_True.json" + description: "" + - type: cpp_test + name: "accept;unicode" + path: "TSF/tests/unit-literals.cpp" + - type: cpp_test + name: "accept;capitalisation" + path: "TSF/tests/unit-literals.cpp" + - type: cpp_test + name: "accept;illegal literals" + path: "TSF/tests/unit-literals.cpp" + - type: function_reference + name: "lexer::scan_literal" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function to verify whether a candidate literal coincides with its expected value; only ever called with the three admissible expected values" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - literals +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library does not accept any other literal name. diff --git a/TSF/trustable/no-json-faults/NJF-05.1.md b/TSF/trustable/no-json-faults/NJF-05.1.md new file mode 100644 index 0000000000..050422f656 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.1.md @@ -0,0 +1,39 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;array;empty array" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/y_array_empty.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_array_empty.json" + - "/nst_json_testsuite2/test_parsing/y_array_arraysWithSpaces.json" + description: "Checks that the empty array [] is accepted." + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts the empty array. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.2.md b/TSF/trustable/no-json-faults/NJF-05.2.md new file mode 100644 index 0000000000..0368ee973d --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.2.md @@ -0,0 +1,44 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;array;nonempty array" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_array_false.json" + - "/nst_json_testsuite2/test_parsing/y_array_heterogeneous.json" + - "/nst_json_testsuite2/test_parsing/y_array_null.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_1_and_newline.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_leading_space.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_several_null.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_trailing_space.json" + description: "Checks that various valid arrays are accepted." + - type: JSON_testsuite + name: "json.org examples" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + description: "Checks that various valid arrays in combination with objects are accepted." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.85 +--- + +The service provided by the nlohmann/json library accepts the non-empty arrays. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.3.md b/TSF/trustable/no-json-faults/NJF-05.3.md new file mode 100644 index 0000000000..098d52b09d --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.3.md @@ -0,0 +1,40 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "deserialization;successful deserialization;stream" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "json.org examples" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + description: "Checks that various valid arrays in combination with objects are accepted." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_string_in_array.json" + - "/nst_json_testsuite2/test_parsing/y_string_in_array_with_leading_space.json" + - "/nst_json_testsuite2/test_parsing/y_structure_true_in_array.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - deserialization + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If every value candidate of a properly bounded array is accepted as singleton, then the service provided by the nlohmann/json library accepts the array. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.4.md b/TSF/trustable/no-json-faults/NJF-05.4.md new file mode 100644 index 0000000000..a261990e3b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.4.md @@ -0,0 +1,54 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;parse errors (accept)" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "accept;boundaries" + path: "TSF/tests/unit-arrays.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n (previously overflowed)" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_100000_opening_arrays.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_close_unopened_array.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_double_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_end_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_lone-invalid-utf-8.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_apostrophe.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_comma.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_open_object.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_object_close_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_partial_null.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_unfinished_false.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_unfinished_true.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - arrays +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any improperly bounded arrays. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.5.md b/TSF/trustable/no-json-faults/NJF-05.5.md new file mode 100644 index 0000000000..fd92407894 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.5.md @@ -0,0 +1,31 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_array_double_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_double_extra_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_just_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_number_and_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_number_and_several_commas.json" + - "/nst_json_testsuite2/test_parsing/n_structure_array_with_unclosed_string.json" + - "/nst_json_testsuite2/test_parsing/n_array_invalid_utf8.json" + - "/nst_json_testsuite2/test_parsing/n_array_just_minus.json" + description: "Checks that various \"proper\" arrays with improper elements are rejected." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept arrays with improper values. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.5_CONTEXT.md b/TSF/trustable/no-json-faults/NJF-05.5_CONTEXT.md new file mode 100644 index 0000000000..38ffdf6892 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.5_CONTEXT.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: false +--- + +An improper value is either an empty value within a non-empty array or an inadmissible token according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.6.0.md b/TSF/trustable/no-json-faults/NJF-05.6.0.md new file mode 100644 index 0000000000..730e25e775 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.6.0.md @@ -0,0 +1,24 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;i -> y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/i_structure_500_nested_arrays.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 0.975 + Erikhu1: 0.9 +--- + +The acceptance of nested arrays by the service provided by the nlohmann/json library does not depend on the depth of nesting. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.6.md b/TSF/trustable/no-json-faults/NJF-05.6.md new file mode 100644 index 0000000000..b08a379bbd --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.6.md @@ -0,0 +1,30 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "compliance tests from json.org;expected passes" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json_tests/pass2.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;i -> y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/i_structure_500_nested_arrays.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library accepts nested arrays. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.7.1.md b/TSF/trustable/no-json-faults/NJF-05.7.1.md new file mode 100644 index 0000000000..defc79c693 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.7.1.md @@ -0,0 +1,36 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_array_with_several_null.json" + description: "Checks that [1,null,null,null,2] is accepted." + - type: JSON_testsuite + name: "json.org examples;4.json" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/4.json" + description: "" + - type: JSON_testsuite + name: "json.org examples;5.json" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/5.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept comma as value separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.7.2.md b/TSF/trustable/no-json-faults/NJF-05.7.2.md new file mode 100644 index 0000000000..8a51f7002e --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.7.2.md @@ -0,0 +1,24 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_array_colon_instead_of_comma.json" + description: "Tests whether colon as value separator is rejected." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any other value separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.7.md b/TSF/trustable/no-json-faults/NJF-05.7.md new file mode 100644 index 0000000000..29a0de1747 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.7.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library does only accept comma as value separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-05.md b/TSF/trustable/no-json-faults/NJF-05.md new file mode 100644 index 0000000000..acc37fdedb --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-05.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library accepts and rejects arrays according to RFC8259 §5. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.1.md b/TSF/trustable/no-json-faults/NJF-06.1.md new file mode 100644 index 0000000000..f0068b8e84 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.1.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "parser class - core;accept;object;empty object" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "accept;whitespace;empty object" + path: "TSF/tests/unit-objects.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - objects +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts the empty object. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.2.md b/TSF/trustable/no-json-faults/NJF-06.2.md new file mode 100644 index 0000000000..e1ad561145 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.2.md @@ -0,0 +1,30 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "deserialization;contiguous containers;error cases;case 15" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_comma_instead_of_closing_brace.json" + - "/nst_json_testsuite2/test_parsing/n_structure_object_followed_by_closing_object.json" + - "/nst_json_testsuite2/test_parsing/n_structure_object_unclosed_no_value.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - deserialization + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept improperly bounded objects. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.3.md b/TSF/trustable/no-json-faults/NJF-06.3.md new file mode 100644 index 0000000000..9975cd8f39 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.3.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "deserialization;JSON Lines" + path: "tests/src/unit-deserialization.cpp" + - type: cpp_test + name: "parser class - core;accept;object;nonempty object" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - deserialization +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library accepts the non-empty objects. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.4.1.md b/TSF/trustable/no-json-faults/NJF-06.4.1.md new file mode 100644 index 0000000000..1c6b532fc3 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.4.1.md @@ -0,0 +1,52 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-06.4 + - type: cpp_test + name: "parser class - core;accept;object;nonempty object" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_object_basic.json" + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key_and_value.json" + - "/nst_json_testsuite2/test_parsing/y_object_empty.json" + - "/nst_json_testsuite2/test_parsing/y_object_empty_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_escaped_null_in_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_extreme_numbers.json" + - "/nst_json_testsuite2/test_parsing/y_object_long_strings.json" + - "/nst_json_testsuite2/test_parsing/y_object_simple.json" + - "/nst_json_testsuite2/test_parsing/y_object_string_unicode.json" + - "/nst_json_testsuite2/test_parsing/y_object_with_newlines.json" + description: "Checks that various keys, particularly containing unicode characters, are accepted." + - type: cpp_test + name: "accept;names;strings;control characters" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;names;strings;unicode" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;names;strings;escaped UTF-16 surrogates" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - objects +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If the service provided by the nlohmann/json library recognises the name candidate as string, then it accepts the name candidate. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.4.2.md b/TSF/trustable/no-json-faults/NJF-06.4.2.md new file mode 100644 index 0000000000..078e4e31f7 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.4.2.md @@ -0,0 +1,42 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-06.4 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key_but_huge_number_instead.json" + description: "Checks that numbers as keys are rejected." + - type: cpp_test + name: "accept;names;numbers" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;names;arrays" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;names;objects" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;names;literals" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any other token as name. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.4.3.md b/TSF/trustable/no-json-faults/NJF-06.4.3.md new file mode 100644 index 0000000000..2579c67ea3 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.4.3.md @@ -0,0 +1,43 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-06.4 + - type: cpp_test + name: "parser class - core;accept;object;nonempty object" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_object_basic.json" + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key_and_value.json" + - "/nst_json_testsuite2/test_parsing/y_object_empty.json" + - "/nst_json_testsuite2/test_parsing/y_object_empty_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_escaped_null_in_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_extreme_numbers.json" + - "/nst_json_testsuite2/test_parsing/y_object_long_strings.json" + - "/nst_json_testsuite2/test_parsing/y_object_simple.json" + - "/nst_json_testsuite2/test_parsing/y_object_string_unicode.json" + - "/nst_json_testsuite2/test_parsing/y_object_with_newlines.json" + description: "Checks that various strings and numbers are accepted values." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - objects +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If the service provided by the nlohmann/json library accepts the value-candidate as a singleton, then the value-candidate is accepted. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.4.md b/TSF/trustable/no-json-faults/NJF-06.4.md new file mode 100644 index 0000000000..ba5578ebef --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.4.md @@ -0,0 +1,16 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "nst's JSONTestSuite;test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "compliance tests from json.org;expected failures" + path: "tests/src/unit-testsuites.cpp" +--- + +The admissible members of an object provided to the eclipse-score/inc_nlohmann_json have the form name : value. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.5.1.md b/TSF/trustable/no-json-faults/NJF-06.5.1.md new file mode 100644 index 0000000000..616a75d245 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.5.1.md @@ -0,0 +1,41 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-06.5 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_single_quote.json" + - "/nst_json_testsuite2/test_parsing/n_object_unquoted_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key_but_huge_number_instead.json" + - "/nst_json_testsuite2/test_parsing/n_object_key_with_single_quotes.json" + - "/nst_json_testsuite2/test_parsing/n_object_bracket_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_unquoted_key.json" + description: "Checks that invalid names are rejected." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;i -> n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/i_object_key_lone_2nd_surrogate.json" + description: "Checks that string with invalid utf16 surrogate is rejected as name" + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If the service provided by the nlohmann/json library does not accept any name candidate as singleton, then the service does not accept the object candidate. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.5.2.md b/TSF/trustable/no-json-faults/NJF-06.5.2.md new file mode 100644 index 0000000000..a36d71d3a1 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.5.2.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-06.5 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_bad_value.json" + description: "Checks that the invalid literal \"truth\" as value is rejected." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If the service provided by the nlohmann/json library does not accept any value candidate as singleton, then the service does not accept the object candidate. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.5.md b/TSF/trustable/no-json-faults/NJF-06.5.md new file mode 100644 index 0000000000..9a2d8f7568 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.5.md @@ -0,0 +1,14 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_trailing_comma.json" + - "/nst_json_testsuite2/test_parsing/n_object_two_commas_in_a_row.json" + description: "Checks that the empty member in a nonempty object is rejected." +--- + +The service provided by the nlohmann/json library does not accept objects with improper members. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.5_CONTEXT.md b/TSF/trustable/no-json-faults/NJF-06.5_CONTEXT.md new file mode 100644 index 0000000000..b118934b1a --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.5_CONTEXT.md @@ -0,0 +1,8 @@ +--- +level: 1.1 +normative: false +--- + +An improper name is either not a string (i.e. any other token, or empty), or a string-candidate which does not fulfil the requirements of RFC8259. + +An improper value is either empty or an inadmissible token according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.6.0.md b/TSF/trustable/no-json-faults/NJF-06.6.0.md new file mode 100644 index 0000000000..6f8b5d7bf5 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.6.0.md @@ -0,0 +1,32 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "json.org examples" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + description: "Checks that various nested objects are accepted." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 0.975 + Erikhu1: 0.95 +--- + +The acceptance of nested objects inside the nlohmann/json library does not depend on the depth of nesting. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.6.md b/TSF/trustable/no-json-faults/NJF-06.6.md new file mode 100644 index 0000000000..bfa1ccca7c --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.6.md @@ -0,0 +1,32 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "json.org examples" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + description: "Checks that various nested objects are accepted." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library accepts the nested objects. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.7.1.md b/TSF/trustable/no-json-faults/NJF-06.7.1.md new file mode 100644 index 0000000000..7cededd718 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.7.1.md @@ -0,0 +1,37 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "json.org examples" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + - "/json.org/1.json" + - "/json.org/2.json" + - "/json.org/3.json" + - "/json.org/4.json" + - "/json.org/5.json" + description: "Checks that various arrays with more than one value are accepted." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts comma as member separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.7.2.md b/TSF/trustable/no-json-faults/NJF-06.7.2.md new file mode 100644 index 0000000000..0f8164192b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.7.2.md @@ -0,0 +1,42 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_comma_instead_of_colon.json" + description: "Checks that comma instead of colon is rejected." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_double_colon.json" + description: "Checks that double colon is rejected." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_missing_colon.json" + - "/nst_json_testsuite2/test_parsing/n_object_missing_semicolon.json" + - "/nst_json_testsuite2/test_parsing/n_object_missing_semicolon.json" + description: "Checks that the empty member separator is rejected." + - type: cpp_test + name: "accept;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any other member separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.7.md b/TSF/trustable/no-json-faults/NJF-06.7.md new file mode 100644 index 0000000000..9151b12206 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.7.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library does only accept comma as member separator. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-06.md b/TSF/trustable/no-json-faults/NJF-06.md new file mode 100644 index 0000000000..1816b7e743 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-06.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library accepts and rejects objects according to RFC8259 §4. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.1.md b/TSF/trustable/no-json-faults/NJF-07.1.md new file mode 100644 index 0000000000..dda4d06d71 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.1.md @@ -0,0 +1,28 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "parser class - core;accept;string" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "compliance tests from nativejson-benchmark;strings" + path: "tests/src/unit-testsuites.cpp" + +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept empty strings. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.2.md b/TSF/trustable/no-json-faults/NJF-07.2.md new file mode 100644 index 0000000000..cf8e004cf1 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.2.md @@ -0,0 +1,36 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "parser class - core;accept;parse errors (accept)" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "deserialization;contiguous containers;error cases" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_string_no_quotes_with_bad_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_doublequote.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_quote.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_string_no_double_quotes.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - deserialisation +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept the improperly bounded strings. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.3.md b/TSF/trustable/no-json-faults/NJF-07.3.md new file mode 100644 index 0000000000..567e44cf3f --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.3.md @@ -0,0 +1,23 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "parser class - core;accept;string;errors" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept unescaped control characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.4.md b/TSF/trustable/no-json-faults/NJF-07.4.md new file mode 100644 index 0000000000..4479235a45 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.4.md @@ -0,0 +1,48 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "parser class - core;accept;string;escaped" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_string_1_2_3_bytes_UTF-8_sequences.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pair.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pairs.json" + - "/nst_json_testsuite2/test_parsing/y_string_allowed_escapes.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_and_u_escaped_zero.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_doublequotes.json" + - "/nst_json_testsuite2/test_parsing/y_string_comments.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_a.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_n.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_control_character.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_noncharacter.json" + description: "Checks that various escaped control and unicode characters are accepted." + - type: cpp_test + name: "Unicode (1/5);\\\\uxxxx sequences;correct sequences" + path: "tests/src/unit-unicode1.cpp" + - type: cpp_test + name: "Unicode;escaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - unicode1 + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept escaped control characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.5.md b/TSF/trustable/no-json-faults/NJF-07.5.md new file mode 100644 index 0000000000..86cc8bd931 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.5.md @@ -0,0 +1,35 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "parser class - core;accept;string;escaped" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pair.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pairs.json" + description: "Checks that single and multiple surrogates are accepted." + - type: cpp_test + name: "Unicode;escaped utf-16 surrogates;well-formed" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts UTF-16 surrogate pairs. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.6.md b/TSF/trustable/no-json-faults/NJF-07.6.md new file mode 100644 index 0000000000..8eea00395e --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.6.md @@ -0,0 +1,68 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_string_1_2_3_bytes_UTF-8_sequences.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pair.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pairs.json" + - "/nst_json_testsuite2/test_parsing/y_string_allowed_escapes.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_and_u_escaped_zero.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_doublequotes.json" + - "/nst_json_testsuite2/test_parsing/y_string_comments.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_a.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_n.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_control_character.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_noncharacter.json" + - "/nst_json_testsuite2/test_parsing/y_string_in_array.json" + - "/nst_json_testsuite2/test_parsing/y_string_in_array_with_leading_space.json" + - "/nst_json_testsuite2/test_parsing/y_string_last_surrogates_1_and_2.json" + - "/nst_json_testsuite2/test_parsing/y_string_nbsp_uescaped.json" + - "/nst_json_testsuite2/test_parsing/y_string_nonCharacterInUTF-8_U+10FFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_nonCharacterInUTF-8_U+FFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_null_escape.json" + - "/nst_json_testsuite2/test_parsing/y_string_one-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_pi.json" + - "/nst_json_testsuite2/test_parsing/y_string_reservedCharacterInUTF-8_U+1BFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_simple_ascii.json" + - "/nst_json_testsuite2/test_parsing/y_string_space.json" + - "/nst_json_testsuite2/test_parsing/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json" + - "/nst_json_testsuite2/test_parsing/y_string_three-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_two-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_u+2028_line_sep.json" + - "/nst_json_testsuite2/test_parsing/y_string_u+2029_par_sep.json" + - "/nst_json_testsuite2/test_parsing/y_string_uEscape.json" + - "/nst_json_testsuite2/test_parsing/y_string_uescaped_newline.json" + - "/nst_json_testsuite2/test_parsing/y_string_unescaped_char_delete.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicodeEscapedBackslash.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_2.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+10FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+1FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+2064_invisible_plus.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+FDD0_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_escaped_double_quote.json" + - "/nst_json_testsuite2/test_parsing/y_string_utf8.json" + - "/nst_json_testsuite2/test_parsing/y_string_with_del_character.json" + - "/nst_json_testsuite2/test_parsing/y_structure_lonely_string.json" + description: "Checks that various non-empty valid strings are accepted." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library does accept non-empty strings. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.7.md b/TSF/trustable/no-json-faults/NJF-07.7.md new file mode 100644 index 0000000000..4333777f8b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.7.md @@ -0,0 +1,46 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u1.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u1x.json" + - "/nst_json_testsuite2/test_parsing/n_string_accentuated_char_no_quotes.json" + - "/nst_json_testsuite2/test_parsing/n_string_backslash_00.json" + - "/nst_json_testsuite2/test_parsing/n_string_escape_x.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_backslash_bad.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_ctrl_char_tab.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_emoji.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_escaped_character.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_surrogate.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_surrogate_escape_invalid.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid-utf-8-in-escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_backslash_esc.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_unicode_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_utf8_after_escape.json" + description: "Checks that various illegal control characters and utf-8 characters are rejected." + - type: cpp_test + name: "Unicode;escaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept escaped invalid characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.8.md b/TSF/trustable/no-json-faults/NJF-07.8.md new file mode 100644 index 0000000000..397960c661 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.8.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "Unicode;escaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept single unpaired utf-16 surrogates. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.9.md b/TSF/trustable/no-json-faults/NJF-07.9.md new file mode 100644 index 0000000000..cfa0237635 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.9.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-07 + - type: cpp_test + name: "Unicode;unescaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 1.0 +--- + +The service provided by the nlohmann/json library does not accept unescaped UTF-16 surrogate pairs. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-07.md b/TSF/trustable/no-json-faults/NJF-07.md new file mode 100644 index 0000000000..2a60a1f300 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-07.md @@ -0,0 +1,11 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "lexer::scan_string" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function which scans a string and verifies *en passant* that the string is in accordance with RFC8259" +--- + +The service provided by the nlohmann/json library accepts and rejects strings according to RFC8259 §7. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.1.md b/TSF/trustable/no-json-faults/NJF-08.1.md new file mode 100644 index 0000000000..72853b3850 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.1.md @@ -0,0 +1,23 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;integers" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept integers within the limits of 64-bit double. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.2.md b/TSF/trustable/no-json-faults/NJF-08.2.md new file mode 100644 index 0000000000..e8374b783d --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.2.md @@ -0,0 +1,23 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;integers;edge cases" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept integers according to IEEE 754 binary64. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.3.md b/TSF/trustable/no-json-faults/NJF-08.3.md new file mode 100644 index 0000000000..dd36d6e46b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.3.md @@ -0,0 +1,34 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_+Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_-NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + description: "Checks that NaN and Inf are rejected." + - type: cpp_test + name: "accept;illegal literal numbers" + path: "TSF/tests/unit-literals.cpp" +evidence: + type: check_test_results + configuration: + tests: + - literals + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept NaN and infinity. + diff --git a/TSF/trustable/no-json-faults/NJF-08.4.md b/TSF/trustable/no-json-faults/NJF-08.4.md new file mode 100644 index 0000000000..6709d02b39 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.4.md @@ -0,0 +1,36 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;integers;with exponent" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e_neg_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e_pos_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_exponent.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_fraction_exponent.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_neg_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_pos_exponent.json" + description: "Checks that various numbers with exponent are accepted." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept e or E for numbers with exponent within the bounds of double. diff --git a/TSF/trustable/no-json-faults/NJF-08.5.md b/TSF/trustable/no-json-faults/NJF-08.5.md new file mode 100644 index 0000000000..088aca6f05 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.5.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "accept;exponents;U+0425" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "accept;exponents;U+0436" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept u0415 and u0436 (cyrillic e and E) as exponent signs in numbers with exponent. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.6.md b/TSF/trustable/no-json-faults/NJF-08.6.md new file mode 100644 index 0000000000..0252ac3641 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.6.md @@ -0,0 +1,84 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;invalid numbers" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_++.json" + - "/nst_json_testsuite2/test_parsing/n_number_+1.json" + - "/nst_json_testsuite2/test_parsing/n_number_+Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_-01.json" + - "/nst_json_testsuite2/test_parsing/n_number_-1.0..json" + - "/nst_json_testsuite2/test_parsing/n_number_-2..json" + - "/nst_json_testsuite2/test_parsing/n_number_-NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_.-1.json" + - "/nst_json_testsuite2/test_parsing/n_number_.2e-3.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.1.2.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.3e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.3e.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.e1.json" + - "/nst_json_testsuite2/test_parsing/n_number_0_capital_E+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0_capital_E.json" + - "/nst_json_testsuite2/test_parsing/n_number_0e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0e.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e-.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e.json" + - "/nst_json_testsuite2/test_parsing/n_number_1_000.json" + - "/nst_json_testsuite2/test_parsing/n_number_1eE2.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e+3.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e-3.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e3.json" + - "/nst_json_testsuite2/test_parsing/n_number_9.e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_U+FF11_fullwidth_digit_one.json" + - "/nst_json_testsuite2/test_parsing/n_number_expression.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_1_digit.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_2_digits.json" + - "/nst_json_testsuite2/test_parsing/n_number_infinity.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid+-.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-negative-real.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-bigger-int.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-exponent.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-int.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_infinity.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_sign_with_trailing_garbage.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_space_1.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_int_starting_with_zero.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_real_without_int_part.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_with_garbage_at_end.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_garbage_after_e.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_with_invalid_utf8_after_e.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_without_fractional_part.json" + - "/nst_json_testsuite2/test_parsing/n_number_starting_with_dot.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_alpha.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_alpha_char.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_leading_zero.json" + description: "Tests whether various numbers with invalid syntax according to RFC8259 are rejected." + - type: cpp_test + name: "accept;operators" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept invalid syntax for numbers. diff --git a/TSF/trustable/no-json-faults/NJF-08.7.md b/TSF/trustable/no-json-faults/NJF-08.7.md new file mode 100644 index 0000000000..b541b3cc33 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.7.md @@ -0,0 +1,23 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;floating-point" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept decimal points in numbers within the bounds of double. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.8.md b/TSF/trustable/no-json-faults/NJF-08.8.md new file mode 100644 index 0000000000..ee1bbd47e2 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.8.md @@ -0,0 +1,35 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_-01.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_int_starting_with_zero.json" + description: "Checks that -01 is rejected." + - type: cpp_test + name: "parser class - core;accept;number;invalid numbers" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "accept;Leading zeroes" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept leading zeroes. diff --git a/TSF/trustable/no-json-faults/NJF-08.9.md b/TSF/trustable/no-json-faults/NJF-08.9.md new file mode 100644 index 0000000000..4048c7a850 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.9.md @@ -0,0 +1,36 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - NJF-08 + - type: cpp_test + name: "parser class - core;accept;number;integers" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_hex_1_digit.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_2_digits.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_2_digits.json" + description: "Rejects Hexadecimals" + - type: cpp_test + name: "accept;bases" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any other digit symbol than 0-9. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-08.md b/TSF/trustable/no-json-faults/NJF-08.md new file mode 100644 index 0000000000..cb110cfe3d --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-08.md @@ -0,0 +1,11 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "lexer::scan_number" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which scans numbers and verifies *en passant* that these number is in accordance with RFC8259" +--- + +The service provided by the nlohmann/json library accepts numbers according to RFC8259 §6. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-09.md b/TSF/trustable/no-json-faults/NJF-09.md new file mode 100644 index 0000000000..88976537d2 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-09.md @@ -0,0 +1,35 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: function_reference + name: "parser::accept" + path: "include/nlohmann/detail/input/parser.hpp" + description: "function, which implements the service to check for well-formed json" + - type: function_reference + name: "parser::sax_parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "function, which is called by parser::accept" + - type: function_reference + name: "parser::sax_parse_internal" + path: "include/nlohmann/detail/input/parser.hpp" + decscription: "function, which is called by parser::sax_parse" + - type: function_reference + name: "lexer::scan" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which is called by parser::sax_parse_internal to read input data" + - type: JSON_testsuite + name: "json.org examples;1.json" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/json.org/1.json" + description: "Checks that a valid json object containing all six structural characters is accepted." +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does accept the six structural characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-10.md b/TSF/trustable/no-json-faults/NJF-10.md new file mode 100644 index 0000000000..5da127e3e1 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-10.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept any other structural characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-11.md b/TSF/trustable/no-json-faults/NJF-11.md new file mode 100644 index 0000000000..ed918cf65b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-11.md @@ -0,0 +1,32 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-literals.cpp" + - type: cpp_test + name: "accept;whitespace;Leading and tailing" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-strings.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "accept;whitespace" + path: "TSF/tests/unit-arrays.cpp" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts leading and closing whitespaces. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.1.md b/TSF/trustable/no-json-faults/NJF-12.1.md new file mode 100644 index 0000000000..9e97a2d98a --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.1.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;malformed sequences" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 0.9 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library rejects malformed UTF-8 data. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.2.md b/TSF/trustable/no-json-faults/NJF-12.2.md new file mode 100644 index 0000000000..82724e5ad3 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.2.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;overlong sequences" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 0.6 + Erikhu1: 0.9 +--- + +The service provided by the nlohmann/json library rejects "overlong sequences". \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.3.md b/TSF/trustable/no-json-faults/NJF-12.3.md new file mode 100644 index 0000000000..eb9e45f453 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.3.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_lone-invalid-utf-8.json" + description: "" + - type: cpp_test + name: "Unicode;unescaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings + - testsuites +score: + Jonas-Kirchhoff: 0.9 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library rejects single escaped and unescaped, and paired unescaped utf-16 surrogates. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.5.md b/TSF/trustable/no-json-faults/NJF-12.5.md new file mode 100644 index 0000000000..0cb63a7b3e --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.5.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;noncharacter code positions" + path: "TSF/tests/unit-strings.cpp" + - type: cpp_test + name: "Unicode;unescaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 0.75 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts Non-Characters. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.6.md b/TSF/trustable/no-json-faults/NJF-12.6.md new file mode 100644 index 0000000000..45ee5e6397 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.6.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "Unicode;escaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 0.9 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library accepts well-formed UTF-8 data. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-12.md b/TSF/trustable/no-json-faults/NJF-12.md new file mode 100644 index 0000000000..b068f703ac --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-12.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library decodes UTF-8 data. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-13.md b/TSF/trustable/no-json-faults/NJF-13.md new file mode 100644 index 0000000000..64f314c6dc --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-13.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true + +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "compliance tests from json.org;expected passes" + path: "tests/src/unit-testsuites.cpp" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library accepts JSON data consisting of combinations of the data types. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.1.md b/TSF/trustable/no-json-faults/NJF-14.1.md new file mode 100644 index 0000000000..171fb4249b --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.1.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;UTF-8;single BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +If the service provided by the nlohmann/json library accepts an input containing no BOM, then it accepts a single UTF-8 byte order mark followed by that input. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.2.md b/TSF/trustable/no-json-faults/NJF-14.2.md new file mode 100644 index 0000000000..0811bf0dd5 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.2.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;UTF-8;multiple BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept multiple UTF-8 byte order marks. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.3.md b/TSF/trustable/no-json-faults/NJF-14.3.md new file mode 100644 index 0000000000..1fc432995e --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.3.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: function_reference + name: "parser::accept" + path: "include/nlohmann/detail/input/parser.hpp" + description: "function, which implements the service to check for well-formed json" + - type: function_reference + name: "parser::sax_parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "function, which is called by parser::accept" + - type: function_reference + name: "parser::sax_parse_internal" + path: "include/nlohmann/detail/input/parser.hpp" + description: "function, which is called by parser::sax_parse" + - type: function_reference + name: "lexer::scan" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which is called by parser::sax_parse_internal to read input data" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept incomplete or perturbed UTF-8 byte order marks within the first three characters of the input. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.4.md b/TSF/trustable/no-json-faults/NJF-14.4.md new file mode 100644 index 0000000000..d1e81a0798 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.4.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;UTF-8;Other byte-order marks;UTF-32" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept UTF-16 and UTF-32 byte order marks instead of the UTF-8 byte order mark. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.5.md b/TSF/trustable/no-json-faults/NJF-14.5.md new file mode 100644 index 0000000000..b950a39eb4 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.5.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-01 + - type: cpp_test + name: "accept;UTF-8;unexpected BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not accept UTF-8 byte order mark outside of a string and outside of the first three characters of the input. \ No newline at end of file diff --git a/TSF/trustable/no-json-faults/NJF-14.md b/TSF/trustable/no-json-faults/NJF-14.md new file mode 100644 index 0000000000..a00ae04258 --- /dev/null +++ b/TSF/trustable/no-json-faults/NJF-14.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library accepts a single complete UTF-8 byte order mark at the beginning of the input only. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.1.md b/TSF/trustable/no-parsing-faults/NPF-01.1.md new file mode 100644 index 0000000000..9c69dbefb2 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.1.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "Unicode (1/5);ignore byte-order-mark" + path: "tests/src/unit-unicode1.cpp" + - type: cpp_test + name: "deserialization;ignoring byte-order marks;BOM and content" + path: "tests/src/unit-deserialization.cpp" +evidence: + type: check_test_results + configuration: + tests: + - unicode1 + - deserialization +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores the presence of a single UTF-8 byte order mark at the very beginning of the input. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.2.md b/TSF/trustable/no-parsing-faults/NPF-01.2.md new file mode 100644 index 0000000000..dc114b91c5 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.2.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parse;UTF-8;multiple BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not parse multiple UTF-8 byte order marks at the beginning of the input and throws an exception. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.3.md b/TSF/trustable/no-parsing-faults/NPF-01.3.md new file mode 100644 index 0000000000..cd69bb8c96 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.3.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parse;UTF-8;unexpected BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not parse UTF-8 byte order marks outside of a string and the first three characters of the input, and throws an exception. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.4.md b/TSF/trustable/no-parsing-faults/NPF-01.4.md new file mode 100644 index 0000000000..e5da74cb01 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.4.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parse;other BOM" + path: "TSF/tests/unit-byte_order_mark.cpp" +evidence: + type: check_test_results + configuration: + tests: + - byte_order_mark +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not parse UTF-16 and UTF-32 byte order mark instead of an UTF-8 byte order mark, and throws an exception. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.5.md b/TSF/trustable/no-parsing-faults/NPF-01.5.md new file mode 100644 index 0000000000..e04f1f4afd --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.5.md @@ -0,0 +1,31 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "deserialization;ignoring byte-order marks;2 byte of BOM" + path: "tests/src/unit-deserialization.cpp" + - type: cpp_test + name: "deserialization;ignoring byte-order marks;1 byte of BOM" + path: "tests/src/unit-deserialization.cpp" + - type: cpp_test + name: "deserialization;ignoring byte-order marks;variations" + path: "tests/src/unit-deserialization.cpp" + - type: cpp_test + name: "Unicode (1/5);error for incomplete/wrong BOM" + path: "tests/src/unit-unicode1.cpp" +evidence: + type: check_test_results + configuration: + tests: + - deserialization + - unicode1 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library does not parse partial and perturbed UTF-8 byte order marks within the first three characters of the input and throws an exception. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-01.md b/TSF/trustable/no-parsing-faults/NPF-01.md new file mode 100644 index 0000000000..6ec1ea2387 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-01.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library ignores the presence of a byte order mark. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.1.md b/TSF/trustable/no-parsing-faults/NPF-02.1.md new file mode 100644 index 0000000000..1179df581d --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.1.md @@ -0,0 +1,36 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parser class - core;parse;number;floating-point;without exponent" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "parser class - core;parse;number;integers;without exponent" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_number_simple_int.json" + - "/nst_json_testsuite2/test_parsing/y_number_simple_real.json" + - "/nst_json_testsuite2/test_parsing/y_number_negative_int.json" + - "/nst_json_testsuite2/test_parsing/y_number_negative_one.json" + - "/nst_json_testsuite2/test_parsing/y_number_negative_zero.json" + description: "Tests whether several numbers without exponent are parsed without throwing an exception." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses integers without exponent within the precision of int64_t. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.10.md b/TSF/trustable/no-parsing-faults/NPF-02.10.md new file mode 100644 index 0000000000..1a4c0985c7 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.10.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "regression tests 1;issue #379 - locale-independent str-to-num" + path: "tests/src/unit-regression1.cpp" + - type: cpp_test + name: "parse;trailing zeroes" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - regression1 + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores trailing zeroes after the decimal point. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.11.md b/TSF/trustable/no-parsing-faults/NPF-02.11.md new file mode 100644 index 0000000000..fda51845c7 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.11.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "regression tests 1;issue #186 miloyip/nativejson-benchmark: floating-point parsing" + path: "tests/src/unit-regression1.cpp" +evidence: + type: check_test_results + configuration: + tests: + - regression1 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses numbers within the 64-bit double range but outside of the double precision without throwing an exception and without guarantee of precision. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.11_CONTEXT.md b/TSF/trustable/no-parsing-faults/NPF-02.11_CONTEXT.md new file mode 100644 index 0000000000..05bfc0c322 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.11_CONTEXT.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: false +--- + +Although numbers are parsed without throwing an exception, they are not parsed accurately. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.12.md b/TSF/trustable/no-parsing-faults/NPF-02.12.md new file mode 100644 index 0000000000..fd497326d4 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.12.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parse;exponents;Capitalisation" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores capitalisation of the exponent. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.2.md b/TSF/trustable/no-parsing-faults/NPF-02.2.md new file mode 100644 index 0000000000..dde94d497a --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.2.md @@ -0,0 +1,42 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parser class - core;parse;number;floating-point;with exponent" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "parser class - core;parse;number;integers;with exponent" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e_neg_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_capital_e_pos_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_exponent.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_fraction_exponent.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_neg_exp.json" + - "/nst_json_testsuite2/test_parsing/y_number_real_pos_exponent.json" + description: "Tests whether several numbers with exponent are parsed without throwing an exception." + - type: cpp_test + name: "parse;Precision" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses integers with exponent within the precision of 64-bit double. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.3.md b/TSF/trustable/no-parsing-faults/NPF-02.3.md new file mode 100644 index 0000000000..6d48afffca --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.3.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parser class - core;parse;number;integers" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses floating point values without exponent within the precision of 64-bit double. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.4.md b/TSF/trustable/no-parsing-faults/NPF-02.4.md new file mode 100644 index 0000000000..cc163e46f0 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.4.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parser class - core;parse;number;floating-point" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "regression tests 1;issue #360 - Loss of precision when serializing " + path: "tests/src/unit-regression1.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - regression1 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses floating point values with exponent within the precision of 64-bit double. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.5.md b/TSF/trustable/no-parsing-faults/NPF-02.5.md new file mode 100644 index 0000000000..8dc456d82e --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.5.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parse;exponents;leading zeroes" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading zeroes in the exponent. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.6.md b/TSF/trustable/no-parsing-faults/NPF-02.6.md new file mode 100644 index 0000000000..b0bf6331f9 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.6.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parser class - core;parse;number;integers;edge cases" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "parser class - core;parse;number;integers;over the edge cases" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses integers within IEEE 754-2008 binary64. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.7.md b/TSF/trustable/no-parsing-faults/NPF-02.7.md new file mode 100644 index 0000000000..66965a5e8f --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.7.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parse;whitespace" + path: "TSF/tests/unit-numbers.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading and trailing whitespace. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.8.md b/TSF/trustable/no-parsing-faults/NPF-02.8.md new file mode 100644 index 0000000000..cd91aaff3d --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.8.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "parse;exponents;leading plus" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores one singular leading plus of the exponent. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.9.md b/TSF/trustable/no-parsing-faults/NPF-02.9.md new file mode 100644 index 0000000000..44b76fab66 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.9.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-02 + - type: cpp_test + name: "compliance tests from nativejson-benchmark;doubles" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "regression tests 1;issue #360 - Loss of precision when serializing " + path: "tests/src/unit-regression1.cpp" +evidence: + type: check_test_results + configuration: + tests: + - regression1 + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses floating point numbers within IEEE 754-2008 binary64 standard. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-02.md b/TSF/trustable/no-parsing-faults/NPF-02.md new file mode 100644 index 0000000000..2f42710b13 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-02.md @@ -0,0 +1,11 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "lexer::scan_number" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which parses numbers into C++ number-types and verifies *en passant* that these numbers are in accordance with RFC8259" +--- + +The service provided by the nlohmann/json library parses numbers according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.1.md b/TSF/trustable/no-parsing-faults/NPF-03.1.md new file mode 100644 index 0000000000..3aa9d190a5 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.1.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "parse;whitespace" + path: "TSF/tests/unit-strings.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading and trailing whitespace. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.2.md b/TSF/trustable/no-parsing-faults/NPF-03.2.md new file mode 100644 index 0000000000..01e3f63096 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.2.md @@ -0,0 +1,30 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "parser class - core;parse;string;escaped" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "compliance tests from nativejson-benchmark;strings" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "Unicode (1/5);\\\\uxxxx sequences;correct sequences" + path: "tests/src/unit-unicode1.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - unicode1 +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses escaped characters in the basic multilingual plane. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.3.md b/TSF/trustable/no-parsing-faults/NPF-03.3.md new file mode 100644 index 0000000000..059ed1cb07 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.3.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "Unicode;escaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library ignores capitalisation in escaped hexadecimal unicode. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.4.md b/TSF/trustable/no-parsing-faults/NPF-03.4.md new file mode 100644 index 0000000000..ad44c62301 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.4.md @@ -0,0 +1,33 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "RFC 8259 examples;7. Strings" + path: "tests/src/unit-testsuites.cpp" + - type: JSON_testsuite + name: "Unicode (1/5);read all unicode characters" + path: "tests/src/unit-unicode1.cpp" + test_suite_paths: + - "/json_nlohmann_tests/all_unicode.json" + description: "" + - type: cpp_test + name: "Unicode;unescaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - unicode1 + - testsuites + - strings +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses all unescaped utf-8 characters except quotation marks, reverse solidus and the control characters. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.5.md b/TSF/trustable/no-parsing-faults/NPF-03.5.md new file mode 100644 index 0000000000..93bf004a55 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.5.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "compliance tests from nativejson-benchmark;strings" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "parser class - core;parse;string;escaped" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses \\, \\/, \\b,\\f, \\n, \\r, \\t and escaped quotation marks. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.6.md b/TSF/trustable/no-parsing-faults/NPF-03.6.md new file mode 100644 index 0000000000..cb2cd1d81b --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.6.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "parser class - core;parse;string" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses the empty string. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.7.md b/TSF/trustable/no-parsing-faults/NPF-03.7.md new file mode 100644 index 0000000000..6a694ab632 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.7.md @@ -0,0 +1,77 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-03 + - type: cpp_test + name: "parser class - core;parse;string;escaped" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "compliance tests from nativejson-benchmark;strings" + path: "tests/src/unit-testsuites.cpp" + - type: cpp_test + name: "RFC 8259 examples;7. Strings" + path: "tests/src/unit-testsuites.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_string_1_2_3_bytes_UTF-8_sequences.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pair.json" + - "/nst_json_testsuite2/test_parsing/y_string_accepted_surrogate_pairs.json" + - "/nst_json_testsuite2/test_parsing/y_string_allowed_escapes.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_and_u_escaped_zero.json" + - "/nst_json_testsuite2/test_parsing/y_string_backslash_doublequotes.json" + - "/nst_json_testsuite2/test_parsing/y_string_comments.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_a.json" + - "/nst_json_testsuite2/test_parsing/y_string_double_escape_n.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_control_character.json" + - "/nst_json_testsuite2/test_parsing/y_string_escaped_noncharacter.json" + - "/nst_json_testsuite2/test_parsing/y_string_in_array.json" + - "/nst_json_testsuite2/test_parsing/y_string_in_array_with_leading_space.json" + - "/nst_json_testsuite2/test_parsing/y_string_last_surrogates_1_and_2.json" + - "/nst_json_testsuite2/test_parsing/y_string_nbsp_uescaped.json" + - "/nst_json_testsuite2/test_parsing/y_string_nonCharacterInUTF-8_U+10FFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_nonCharacterInUTF-8_U+FFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_null_escape.json" + - "/nst_json_testsuite2/test_parsing/y_string_one-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_pi.json" + - "/nst_json_testsuite2/test_parsing/y_string_reservedCharacterInUTF-8_U+1BFFF.json" + - "/nst_json_testsuite2/test_parsing/y_string_simple_ascii.json" + - "/nst_json_testsuite2/test_parsing/y_string_space.json" + - "/nst_json_testsuite2/test_parsing/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json" + - "/nst_json_testsuite2/test_parsing/y_string_three-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_two-byte-utf-8.json" + - "/nst_json_testsuite2/test_parsing/y_string_u+2028_line_sep.json" + - "/nst_json_testsuite2/test_parsing/y_string_u+2029_par_sep.json" + - "/nst_json_testsuite2/test_parsing/y_string_uEscape.json" + - "/nst_json_testsuite2/test_parsing/y_string_uescaped_newline.json" + - "/nst_json_testsuite2/test_parsing/y_string_unescaped_char_delete.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicodeEscapedBackslash.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_2.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+10FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+1FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+2064_invisible_plus.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+FDD0_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_U+FFFE_nonchar.json" + - "/nst_json_testsuite2/test_parsing/y_string_unicode_escaped_double_quote.json" + - "/nst_json_testsuite2/test_parsing/y_string_utf8.json" + - "/nst_json_testsuite2/test_parsing/y_string_with_del_character.json" + description: "Tests whether several non-empty strings are parsed without throwing an exception." +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses non-empty strings. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-03.md b/TSF/trustable/no-parsing-faults/NPF-03.md new file mode 100644 index 0000000000..aed32371e1 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-03.md @@ -0,0 +1,11 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "lexer::scan_string" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which parses strings into C++ std::string and verifies *en passant* that these strings are in accordance with RFC8259" +--- + +The service provided by the nlohmann/json library parses strings according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-04.1.md b/TSF/trustable/no-parsing-faults/NPF-04.1.md new file mode 100644 index 0000000000..95d01d1464 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-04.1.md @@ -0,0 +1,26 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-04 + - type: cpp_test + name: "parse;whitespace" + path: "TSF/tests/unit-literals.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - literals +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading and trailing whitespace. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-04.2.md b/TSF/trustable/no-parsing-faults/NPF-04.2.md new file mode 100644 index 0000000000..c17f849ff1 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-04.2.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-04 + - type: cpp_test + name: "parser class - core;parse;true" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses the literal name true. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-04.3.md b/TSF/trustable/no-parsing-faults/NPF-04.3.md new file mode 100644 index 0000000000..7e6c58b51a --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-04.3.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-04 + - type: cpp_test + name: "parser class - core;parse;false" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses the literal name false. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-04.4.md b/TSF/trustable/no-parsing-faults/NPF-04.4.md new file mode 100644 index 0000000000..195832d165 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-04.4.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - NPF-04 + - type: cpp_test + name: "parser class - core;parse;null" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses the literal name null. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-04.md b/TSF/trustable/no-parsing-faults/NPF-04.md new file mode 100644 index 0000000000..6415539283 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-04.md @@ -0,0 +1,11 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: lexer::scan_literal + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function to scan a literal candidate, compare it to its expected value and return the corresponding C++ literal" +--- + +The service provided by the nlohmann/json library parses literal names "true", "false" and "null" according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-05.1.md b/TSF/trustable/no-parsing-faults/NPF-05.1.md new file mode 100644 index 0000000000..0ccf928aab --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-05.1.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parser class - core;parse;array;empty array" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "parse;whitespace" + path: "TSF/tests/unit-arrays.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - arrays +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading and trailing whitespace for each value. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-05.2.md b/TSF/trustable/no-parsing-faults/NPF-05.2.md new file mode 100644 index 0000000000..b5de34aeeb --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-05.2.md @@ -0,0 +1,28 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_array_empty.json" + description: "Tests whether the empty array is parsed without exception." + - type: cpp_test + name: "parser class - core;parse;array;empty array" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses empty arrays. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-05.3.md b/TSF/trustable/no-parsing-faults/NPF-05.3.md new file mode 100644 index 0000000000..c24f0a50e7 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-05.3.md @@ -0,0 +1,37 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parser class - core;parse;array;nonempty array" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_array_arraysWithSpaces.json" + - "/nst_json_testsuite2/test_parsing/y_array_empty-string.json" + - "/nst_json_testsuite2/test_parsing/y_array_ending_with_newline.json" + - "/nst_json_testsuite2/test_parsing/y_array_false.json" + - "/nst_json_testsuite2/test_parsing/y_array_heterogeneous.json" + - "/nst_json_testsuite2/test_parsing/y_array_null.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_1_and_newline.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_leading_space.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_several_null.json" + - "/nst_json_testsuite2/test_parsing/y_array_with_trailing_space.json" + description: "Tests whether several non-empty arrays are parsed without exception" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses non-empty arrays. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-05.md b/TSF/trustable/no-parsing-faults/NPF-05.md new file mode 100644 index 0000000000..cdf6578114 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-05.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library parses arrays according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-06.1.md b/TSF/trustable/no-parsing-faults/NPF-06.1.md new file mode 100644 index 0000000000..885fec0fde --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-06.1.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parser class - core;parse;object;empty object" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "regression tests 1;example from #529" + path: "tests/src/unit-regression1.cpp" + - type: function_reference + name: "lexer::skip_whitespace" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "function, which skips admissible whitespace during reading" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - regression1 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library ignores leading and trailing whitespace for name and value of each member. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-06.2.md b/TSF/trustable/no-parsing-faults/NPF-06.2.md new file mode 100644 index 0000000000..bf516d2d99 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-06.2.md @@ -0,0 +1,25 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;y" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key.json" + - "/nst_json_testsuite2/test_parsing/y_object_duplicated_key_and_value.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 0.9 + Erikhu1: 0.95 +--- + +The service provided by the nlohmann/json library parses duplicate names without error and reports the last member with that name only. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-06.3.md b/TSF/trustable/no-parsing-faults/NPF-06.3.md new file mode 100644 index 0000000000..1ae3fe9285 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-06.3.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parser class - core;parse;object;empty object" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses empty objects. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-06.4.md b/TSF/trustable/no-parsing-faults/NPF-06.4.md new file mode 100644 index 0000000000..bb77de77f6 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-06.4.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "parser class - core;parse;object;nonempty object" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 0.95 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses non-empty objects. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-06.md b/TSF/trustable/no-parsing-faults/NPF-06.md new file mode 100644 index 0000000000..f1bee78863 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-06.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library parses objects according to RFC8259. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-07.1.md b/TSF/trustable/no-parsing-faults/NPF-07.1.md new file mode 100644 index 0000000000..e6364f33b5 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-07.1.md @@ -0,0 +1,48 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-1 (x00-x7F);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-2 (xC2-xDF UTF8-tail);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE0 xA0-BF UTF8-tail);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE1-xEC UTF8-tail UTF8-tail);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xED x80-9F UTF8-tail);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xEE-xEF UTF8-tail UTF8-tail);well-formed" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);well-formed" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);well-formed" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);well-formed" + path: "tests/src/unit-unicode5.cpp" +evidence: + type: check_test_results + configuration: + tests: + - unicode2 + - unicode3 + - unicode4 + - unicode5 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library parses UTF-8 encoded data. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-07.2.md b/TSF/trustable/no-parsing-faults/NPF-07.2.md new file mode 100644 index 0000000000..737508fbc5 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-07.2.md @@ -0,0 +1,132 @@ +--- +level: 1.1 +normative: true +references: + - type: item + items: + - JLEX-02 + - type: cpp_test + name: "Unicode (2/5);RFC 3629;ill-formed first byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-2 (xC2-xDF UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-2 (xC2-xDF UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE0 xA0-BF UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE0 xA0-BF UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE0 xA0-BF UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE0 xA0-BF UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE1-xEC UTF8-tail UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE1-xEC UTF8-tail UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE1-xEC UTF8-tail UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xE1-xEC UTF8-tail UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xED x80-9F UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xED x80-9F UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xED x80-9F UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xED x80-9F UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xEE-xEF UTF8-tail UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xEE-xEF UTF8-tail UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xEE-xEF UTF8-tail UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (2/5);RFC 3629;UTF8-3 (xEE-xEF UTF8-tail UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode2.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: missing fourth byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (3/5);RFC 3629;UTF8-4 (xF0 x90-BF UTF8-tail UTF8-tail);ill-formed: wrong fourth byte" + path: "tests/src/unit-unicode3.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: missing fourth byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (4/5);RFC 3629;UTF8-4 (xF1-F3 UTF8-tail UTF8-tail UTF8-tail);ill-formed: wrong fourth byte" + path: "tests/src/unit-unicode4.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: missing second byte" + path: "tests/src/unit-unicode5.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: missing third byte" + path: "tests/src/unit-unicode5.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: missing fourth byte" + path: "tests/src/unit-unicode5.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: wrong second byte" + path: "tests/src/unit-unicode5.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: wrong third byte" + path: "tests/src/unit-unicode5.cpp" + - type: cpp_test + name: "Unicode (5/5);RFC 3629;UTF8-4 (xF4 x80-8F UTF8-tail UTF8-tail);ill-formed: wrong fourth byte" + path: "tests/src/unit-unicode5.cpp" +evidence: + type: check_test_results + configuration: + tests: + - unicode2 + - unicode3 + - unicode4 + - unicode5 +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on ill-formed UTF-8 data. \ No newline at end of file diff --git a/TSF/trustable/no-parsing-faults/NPF-07.md b/TSF/trustable/no-parsing-faults/NPF-07.md new file mode 100644 index 0000000000..b83c184bd7 --- /dev/null +++ b/TSF/trustable/no-parsing-faults/NPF-07.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library parses well-formed UTF-8 encoded data only. \ No newline at end of file diff --git a/TSF/trustable/parse-json-data/PJD-01.md b/TSF/trustable/parse-json-data/PJD-01.md new file mode 100644 index 0000000000..1815268d66 --- /dev/null +++ b/TSF/trustable/parse-json-data/PJD-01.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library provides implementations that parses JSON texts, which ignores the presence of a byte order mark rather than treating it as an error. \ No newline at end of file diff --git a/TSF/trustable/parse-json-data/PJD-02.md b/TSF/trustable/parse-json-data/PJD-02.md new file mode 100644 index 0000000000..be09fb721c --- /dev/null +++ b/TSF/trustable/parse-json-data/PJD-02.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library transforms a JSON text into a C++ representation using C++ containers (for arrays and objects) and primitive datatypes (for strings, numbers, boolean, null). diff --git a/TSF/trustable/parse-json-data/PJD-03.md b/TSF/trustable/parse-json-data/PJD-03.md new file mode 100644 index 0000000000..4494ac8761 --- /dev/null +++ b/TSF/trustable/parse-json-data/PJD-03.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library parses all texts that conform to the JSON grammar. \ No newline at end of file diff --git a/TSF/trustable/parse-json-data/PJD-04.md b/TSF/trustable/parse-json-data/PJD-04.md new file mode 100644 index 0000000000..217fccaa39 --- /dev/null +++ b/TSF/trustable/parse-json-data/PJD-04.md @@ -0,0 +1,18 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "parser class - core;parse;number;integers" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library correctly parses 64-bit integers (exceeding the range defined in RFC8259). \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-01.md b/TSF/trustable/statements/JLS-01.md new file mode 100644 index 0000000000..bd8a4e06f2 --- /dev/null +++ b/TSF/trustable/statements/JLS-01.md @@ -0,0 +1,13 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://github.com/nlohmann/json/blob/develop/.github/workflows/ubuntu.yml#L9" + description: "The trigger condition for the CI workflow that executes the test suites." +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The CI pipeline in nlohmann/json executes the unit and integration test suites on each pull request (opened, reopened, synchronized). \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-02.md b/TSF/trustable/statements/JLS-02.md new file mode 100644 index 0000000000..814585ea16 --- /dev/null +++ b/TSF/trustable/statements/JLS-02.md @@ -0,0 +1,28 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://introspector.oss-fuzz.com/project-profile?project=json" + description: "most recent report for fuzzing introspection of nlohmann/json with historical plots" + - type: web_content + url: "https://storage.googleapis.com/oss-fuzz-introspector/json/inspector-report/20250824/fuzz_report.html" + description: "persistent storage of fuzz-testing-report for nlohmann/json version 3.12.0 on 24.08.2025" + - type: web_content + url: "https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/.github/workflows/cifuzz.yml" + description: "Configuration file for Fuzz-Testing pipeline in the original nlohmann/json repository" + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where score for 'Fuzzing' supports this statement." +evidence: + type: https_response_time + configuration: + target_seconds: 2 + urls: + - "https://introspector.oss-fuzz.com/project-profile?project=json" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +Fuzz testing is used in the original nlohmann/json repository (https://github.com/nlohmann/json) to uncover edge cases and failure modes throughout development. (https://github.com/nlohmann/json/blob/develop/tests/fuzzing.md) \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-03.md b/TSF/trustable/statements/JLS-03.md new file mode 100644 index 0000000000..c6ed45fede --- /dev/null +++ b/TSF/trustable/statements/JLS-03.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Automated tests within the TSF documentation are reviewed by a Subject Matter Expert to verify they test the properties they claim to. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-04.md b/TSF/trustable/statements/JLS-04.md new file mode 100644 index 0000000000..e271f5899f --- /dev/null +++ b/TSF/trustable/statements/JLS-04.md @@ -0,0 +1,23 @@ +--- +level: 1.1 +normative: true +references: + - type: verbose_file + path: ".github/workflows/dependency-review.yml" + description: "The workflow scans PRs for dependency changes and vulnerabilities." +evidence: + type: "check_artifact_exists" + configuration: + check_amalgamation: exclude + codeql: exclude + dependency_review: include + labeler: exclude + publish_documentation: exclude + test_trudag_extensions: exclude + ubuntu: exclude +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +External dependencies within nlohmann/json are checked for potential security vulnerabilities with each pull request to main. Merging is blocked until all warnings are resolved. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-05.md b/TSF/trustable/statements/JLS-05.md new file mode 100644 index 0000000000..712d5c25b0 --- /dev/null +++ b/TSF/trustable/statements/JLS-05.md @@ -0,0 +1,46 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json/issues" + description: "contains the collected github-issues for nlohmann/json" + - type: project_website + url: "https://github.com/nlohmann/json/graphs/commit-activity" + description: "presents the commit activity of the past year" + - type: project_website + url: "https://github.com/nlohmann/json/graphs/contributors" + description: "presents commits over time and per contributor" + - type: project_website + url: "https://github.com/nlohmann/json/forks?include=active&page=1&period=&sort_by=last_updated" + description: "lists all forks of nlohmann/json by last updated" + - type: project_website + url: "https://github.com/nlohmann/json/pulse" + description: "presents activity over the past week" + - type: project_website + url: "https://github.com/orgs/score-json/discussions/27#discussion-8594385" + description: "comparison between JSON libraries demonstrating the popularity of nlohmann/json" + - type: project_website + url: "https://json.nlohmann.me/home/customers/" + description: "presents a list of a subset of all customers who are using the nlohmann/json library" + - type: web_content + url: "https://github.com/nlohmann/json/releases/tag/v3.12.0" + description: "release notes for v3.12.0, listing bugs, CVEs and warnings which were either fixed or mitigated since last release" +evidence: + type: https_response_time + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json/issues" + - "https://github.com/nlohmann/json/graphs/commit-activity" + - "https://github.com/nlohmann/json/graphs/contributors" + - "https://github.com/nlohmann/json/forks?include=active&page=1&period=&sort_by=last_updated" + - "https://github.com/nlohmann/json/pulse" + - "https://github.com/orgs/score-json/discussions/27#discussion-8594385" + - "https://json.nlohmann.me/home/customers/" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The nlohmann/json library is widely used and actively maintained; bugs and misbehaviours are tracked publicly and transparently. diff --git a/TSF/trustable/statements/JLS-06.md b/TSF/trustable/statements/JLS-06.md new file mode 100644 index 0000000000..2ec39d1a94 --- /dev/null +++ b/TSF/trustable/statements/JLS-06.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where score for 'Code-Review' reflects this statement." +score: + Erikhu1: 0.3 +--- + +Pull requests in the nlohmann/json repository are merged only after code review. diff --git a/TSF/trustable/statements/JLS-07.md b/TSF/trustable/statements/JLS-07.md new file mode 100644 index 0000000000..870cfb9665 --- /dev/null +++ b/TSF/trustable/statements/JLS-07.md @@ -0,0 +1,19 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json/actions?query=event%3Apush+branch%3Adevelop" + description: "List of all pushes to the develop branch in nlohmann/json, showing that all commits are done by nlohmann and indicating that direct commits are not possible." +evidence: + type: https_response_time + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json/actions?query=event%3Apush+branch%3Adevelop" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The develop branch of nlohmann/json is protected, i.e. no direct commits are possible. diff --git a/TSF/trustable/statements/JLS-08.md b/TSF/trustable/statements/JLS-08.md new file mode 100644 index 0000000000..21c3ae182d --- /dev/null +++ b/TSF/trustable/statements/JLS-08.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Each statement within the TSF documentation is scored based on SME reviews or automatic validation functions. (TODO) \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-09.md b/TSF/trustable/statements/JLS-09.md new file mode 100644 index 0000000000..78c9b43d97 --- /dev/null +++ b/TSF/trustable/statements/JLS-09.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Scores within the TSF documentation are reasonably, systematically and repeatably accumulated. (TODO) \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-10.md b/TSF/trustable/statements/JLS-10.md new file mode 100644 index 0000000000..e6c0555d38 --- /dev/null +++ b/TSF/trustable/statements/JLS-10.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Every release of nlohmann/json includes source code, build instructions, tests and attestations. (TODO: Test result summary) \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-11.md b/TSF/trustable/statements/JLS-11.md new file mode 100644 index 0000000000..58d5348e91 --- /dev/null +++ b/TSF/trustable/statements/JLS-11.md @@ -0,0 +1,17 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://github.com/eclipse-score/inc_nlohmann_json/blob/save_historical_data/TSF/misbehaviours.md" + description: "List of outstanding bugs as well as fixes for developed code that are outstanding, not yet applied." +evidence: + type: check_issues + configuration: + release_date: "2025-04-11T08:43:39Z" + list_of_known_misbehaviours: "./TSF/docs/nlohmann_misbehaviours_comments.md" +score: + Erikhu1: 1.0 +--- + +Outstanding bugs or misbehaviours are analyzed within eclipse-score/inc_nlohmann_json to determine whether they are relevant for S-CORE's use cases of the nlohmann/json library. diff --git a/TSF/trustable/statements/JLS-12.md b/TSF/trustable/statements/JLS-12.md new file mode 100644 index 0000000000..3e3ac7d3f7 --- /dev/null +++ b/TSF/trustable/statements/JLS-12.md @@ -0,0 +1,13 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://github.com/nlohmann/json/community" + description: "List of defined community standards in nlohmann/json" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The nlohmann/json repository has well-defined community standards, including a contribution guideline and a security policy. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-13.md b/TSF/trustable/statements/JLS-13.md new file mode 100644 index 0000000000..dc142bdd43 --- /dev/null +++ b/TSF/trustable/statements/JLS-13.md @@ -0,0 +1,13 @@ +--- +level: 1.1 +normative: true +references: + - type: website + url: "https://eclipse-score.github.io/process_description/main/general_concepts/score_review_concept.html" + description: "Documentation of S-CORE methodologies" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The S-Core methodologies are followed in eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-14.md b/TSF/trustable/statements/JLS-14.md new file mode 100644 index 0000000000..bab50c00cb --- /dev/null +++ b/TSF/trustable/statements/JLS-14.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +references: + - type: website + url: "https://github.com/nlohmann/json/tree/v3.12.0" + description: "release site of nlohmann/json containing the sha values" +evidence: + type: sha_checker + configuration: + binary: "./single_include/nlohmann/json.hpp" + sha: "aaf127c04cb31c406e5b04a63f1ae89369fccde6d8fa7cdda1ed4f32dfc5de63" +--- + +The SHA value of the nlohmann/json library in use within eclipse-score/inc_nlohmann_json coincides with the SHA value provided by Niels Lohmann for that version. diff --git a/TSF/trustable/statements/JLS-16.md b/TSF/trustable/statements/JLS-16.md new file mode 100644 index 0000000000..9fee7fc698 --- /dev/null +++ b/TSF/trustable/statements/JLS-16.md @@ -0,0 +1,16 @@ +--- +references: + - type: verbose_file + path: "./TSF/docs/list_of_test_environments.md" + comment: "The list of all test-cases together with their execution environments" +evidence: + type: check_list_of_tests + configuration: + sources: + - "./tests/src" + - "./TSF/tests" +level: 1.1 +normative: true +--- + +A list of tests, which is extracted from the test execution, is provided, along with a list of test environments. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-17.md b/TSF/trustable/statements/JLS-17.md new file mode 100644 index 0000000000..9b9667d8ed --- /dev/null +++ b/TSF/trustable/statements/JLS-17.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +A github workflow calculates the fraction of expectations covered by tests in eclipse-score/inc_nlohmann_json (TODO). \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-18.md b/TSF/trustable/statements/JLS-18.md new file mode 100644 index 0000000000..e63e116174 --- /dev/null +++ b/TSF/trustable/statements/JLS-18.md @@ -0,0 +1,20 @@ +--- +level: 1.1 +normative: true +references: + - type: verbose_file + path: ./TSF/scripts/capture_test_data_memory_sensitive.py + description: "This script extracts the test-results from the junit-files generated by ctest and writes these into a data-base." + - type: verbose_file + path: ./.github/workflows/ubuntu.yml + description: "This workflow runs the tests and generates test-reports as junit-files, which are given to the script capture_test_data.py." +evidence: + type: file_exists + configuration: + files: + - "./artifacts/TestResults.db" +score: + Jonas-Kirchhoff: 0.75 +--- + +Results from tests are accurately captured. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-19.md b/TSF/trustable/statements/JLS-19.md new file mode 100644 index 0000000000..35e35b6974 --- /dev/null +++ b/TSF/trustable/statements/JLS-19.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/CMakeLists.txt" + description: "CMake build manifest file" + - type: web_content + url: "https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/BUILD.bazel" + description: "Bazel build manifest file" + - type: web_content + url: "https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/meson.build" + description: "Meson build manifest file" + - type: web_content + url: "https://raw.githubusercontent.com/nlohmann/json/refs/heads/develop/Package.swift" + description: "Swift Package Manager manifest file" +score: + Erikhu1: 1.0 +--- + +All library components, build dependencies, and build tools in the nlohmann/json repository are declared in build system manifests. diff --git a/TSF/trustable/statements/JLS-20.md b/TSF/trustable/statements/JLS-20.md new file mode 100644 index 0000000000..260608a70a --- /dev/null +++ b/TSF/trustable/statements/JLS-20.md @@ -0,0 +1,24 @@ +--- +level: 1.1 +normative: true +references: + - type: verbose_file + path: ./.github/workflows/parent-workflow.yml + description: "github workflow running on push to main and triggering the workflow publish_documentation" + - type: verbose_file + path: ./.github/workflows/publish_documentation.yml + description: "github workflow executing calculation and storage of trustable scores" + - type: website + url: "https://github.com/eclipse-score/inc_nlohmann_json/blob/save_historical_data/TSF/TrustableScoring.db" + description: "the database containing the trustable scores" +evidence: + type: https_response_time + configuration: + target: 2.0 + urls: + - https://github.com/eclipse-score/inc_nlohmann_json/blob/save_historical_data/TSF/TrustableScoring.db +score: + Jonas-Kirchhoff: 1.0 +--- + +A github workflow of eclipse-score/inc_nlohmann_json saves the history of scores in the trustable graph to derive trends. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-21.md b/TSF/trustable/statements/JLS-21.md new file mode 100644 index 0000000000..ca4ddbff62 --- /dev/null +++ b/TSF/trustable/statements/JLS-21.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +A score is calculated based on the number of mirrored and unmirrored things. (TODO) \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-22.md b/TSF/trustable/statements/JLS-22.md new file mode 100644 index 0000000000..af6a89706a --- /dev/null +++ b/TSF/trustable/statements/JLS-22.md @@ -0,0 +1,27 @@ +--- +level: 1.1 +normative: true +references: + - type: verbose_file + path: ./.github/workflows/parent-workflow.yml + description: "github workflow running daily and triggering the workflow ubuntu" + - type: verbose_file + path: ./.github/workflows/ubuntu.yml + description: "workflow, in which unit tests are executed with a myriad of test environments and test results are captured." + - type: verbose_file + path: ./TSF/scripts/capture_test_data.py + description: "script, which collects the data produced by ctest in a database" + - type: website + url: "https://github.com/eclipse-score/inc_nlohmann_json/blob/save_historical_data/TSF/MemoryEfficientTestResultData.db" + description: "the database containing the test results" +evidence: + type: https_response_time + configuration: + target: 2.0 + urls: + - https://github.com/eclipse-score/inc_nlohmann_json/blob/save_historical_data/TSF/MemoryEfficientTestResultData.db +score: + Jonas-Kirchhoff: 1.0 +--- + +A github workflow of eclipse-score/inc_nlohmann_json executes the unit tests daily and saves the results as time-series data. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-23.md b/TSF/trustable/statements/JLS-23.md new file mode 100644 index 0000000000..06388da9b0 --- /dev/null +++ b/TSF/trustable/statements/JLS-23.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json" + description: "Start-page of the original nlohmann/json project" + - type: project_website + url: "https://github.com/eclipse-score/inc_nlohmann_json" + description: "Start-page of the mirror of nlohmann/json within Eclipse S-CORE" +evidence: + type: https_response_time + configuration: + target_seconds: 2 + urls: + - "https://github.com/nlohmann/json" + - "https://github.com/eclipse-score/inc_nlohmann_json" +score: + mishu-dev: 1.0 +--- + +The Eclipse S-CORE organization mirrors the nlohmann/json project in a github fork. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-24.md b/TSF/trustable/statements/JLS-24.md new file mode 100644 index 0000000000..8559289037 --- /dev/null +++ b/TSF/trustable/statements/JLS-24.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The nlohmann/json library recognizes malformed JSON and returns an exception. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-25.md b/TSF/trustable/statements/JLS-25.md new file mode 100644 index 0000000000..897d7abd9c --- /dev/null +++ b/TSF/trustable/statements/JLS-25.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where scores for 'Security-Policy' and 'Code-Review' reflect this statement." + - type: project_website + url: "https://github.com/nlohmann/json?tab=contributing-ov-file#readme" + description: "Contribution Guidelines for nlohmann/json, where it is indirectly indicated that all changes are reviewed." +score: + Erikhu1: 0.8 +--- + +Malicious code changes in nlohmann/json are mitigated by code reviews, adhering to the contribution guidelines and security policy specified by nlohmann/json. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-26.md b/TSF/trustable/statements/JLS-26.md new file mode 100644 index 0000000000..11a3d32134 --- /dev/null +++ b/TSF/trustable/statements/JLS-26.md @@ -0,0 +1,14 @@ +--- +level: 1.1 +normative: true +references: + - type: workflow_failures + owner: "nlohmann" + repo: "json" + branch: "master" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +Any failed CI pipeline executions in the master branch of the nlohmann/json repository are analyzed and fixed. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-27.md b/TSF/trustable/statements/JLS-27.md new file mode 100644 index 0000000000..62cc61cd9b --- /dev/null +++ b/TSF/trustable/statements/JLS-27.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +evidence: + type: coveralls_reporter + configuration: + owner: "score-json" + repo: "json" + branch: "main" + line_coverage: 99.186 + branch_coverage: 93.865 + digits: 3 +--- + +The test coverage for this version of nlohmann/json is monitored using Coveralls and is not decreasing over time, unless reasonably justified. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-28.md b/TSF/trustable/statements/JLS-28.md new file mode 100644 index 0000000000..262f211555 --- /dev/null +++ b/TSF/trustable/statements/JLS-28.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json/discussions/4967" + description: "Discussion in nlohmann/json showing that no triage processes for bugs are currently in place" +score: + Erikhu1: 0.0 +--- + +Outstanding bugs and misbehaviours are triaged in the nlohmann/json repository. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-29.md b/TSF/trustable/statements/JLS-29.md new file mode 100644 index 0000000000..21e98def7b --- /dev/null +++ b/TSF/trustable/statements/JLS-29.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://github.com/nlohmann/json/releases/tag/v3.12.0" + description: "release notes for v3.12.0, listing bugs, CVEs and warnings which were either fixed or mitigated since last release" + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where scores for 'Vulnerabilities', 'Pinned-Dependencies' and 'Dangerous-Workflow' support this statement." +score: + Erikhu1: 0.7 +--- + +Known bugs, misbehaviours and CVEs are analyzed and either fixed or mitigated in the nlohmann/json repository. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-30.md b/TSF/trustable/statements/JLS-30.md new file mode 100644 index 0000000000..817a3a49ec --- /dev/null +++ b/TSF/trustable/statements/JLS-30.md @@ -0,0 +1,16 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where score for 'Vulnerabilities' shows that there are no outstanding CVEs." + - type: web_content + url: "https://github.com/nlohmann/json/discussions/4975" + description: "Screenshot of dismissed code scanning alerts, which can also be dismissed in S-CORE." + +score: + Erikhu1: 0.5 +--- + +Outstanding CVEs are analyzed within eclipse-score/inc_nlohmann_json to determine whether they can be dismissed, and/or are relevant for S-CORE's use cases of the nlohmann/json library. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-31.md b/TSF/trustable/statements/JLS-31.md new file mode 100644 index 0000000000..0d97ac3cad --- /dev/null +++ b/TSF/trustable/statements/JLS-31.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where score for 'SAST' supports this statement." +score: + Erikhu1: 0.9 +--- + +The nlohmann/json repository uses a static code analysis tool. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-32.md b/TSF/trustable/statements/JLS-32.md new file mode 100644 index 0000000000..be2adf4f54 --- /dev/null +++ b/TSF/trustable/statements/JLS-32.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json/blob/develop/.github/CODEOWNERS" + description: "CODEOWNERS file specifying that changes to any file requests @nlohmann for code review in case of a pull request" +score: + Erikhu1: 1.0 +--- + +All pull requests to the develop branch in the nlohmann/json repository trigger a request for review from Niels Lohmann (@nlohmann). \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-33.md b/TSF/trustable/statements/JLS-33.md new file mode 100644 index 0000000000..8bac5fb11b --- /dev/null +++ b/TSF/trustable/statements/JLS-33.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: project_website + url: "https://github.com/nlohmann/json/security/advisories/new" + description: "the vulnerability reporting template used in nlohmann/json, which includes a severity section and CVSS scoring" +score: + Erikhu1: 1.0 +--- + +Outstanding CVEs are triaged in the nlohmann/json repository. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-34.md b/TSF/trustable/statements/JLS-34.md new file mode 100644 index 0000000000..0cb6c0a2d5 --- /dev/null +++ b/TSF/trustable/statements/JLS-34.md @@ -0,0 +1,12 @@ +--- +level: 1.1 +normative: true +references: + - type: include_list + path: single_include/nlohmann/json.hpp + description: "file containing all include occurrences of the score-project." +score: + Erikhu1: 1.0 +--- + +The nlohmann/json library has no external components or dependencies besides the C++ standard components. \ No newline at end of file diff --git a/TSF/trustable/statements/JLS-35.md b/TSF/trustable/statements/JLS-35.md new file mode 100644 index 0000000000..eb1e3d01f6 --- /dev/null +++ b/TSF/trustable/statements/JLS-35.md @@ -0,0 +1,15 @@ +--- +level: 1.1 +normative: true +references: + - type: web_content + url: "https://scorecard.dev/viewer/?uri=github.com%2Fnlohmann%2Fjson" + description: "OpenSSF Scorecard Report for nlohmann/json, where score for 'CI-Tests' supports this statement" + - type: web_content + url: "https://github.com/nlohmann/json/pulls?q=is%3Apr+is%3Aclosed+review%3Aapproved" + description: "All approved pull requests in the nlohmann/json repository, with the results of the CI pipeline executions." +score: + Erikhu1: 0.9 +--- + +Pull requests in the nlohmann/json repository are merged only after running CI-tests and successfully passing the pipeline. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-CHANGES.md b/TSF/trustable/tenets/TT-CHANGES.md new file mode 100644 index 0000000000..9acdc7367e --- /dev/null +++ b/TSF/trustable/tenets/TT-CHANGES.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The nlohmann/json library is actively maintained, with regular updates to dependencies, and changes are verified to prevent regressions. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-CONFIDENCE.md b/TSF/trustable/tenets/TT-CONFIDENCE.md new file mode 100644 index 0000000000..6a33dcf793 --- /dev/null +++ b/TSF/trustable/tenets/TT-CONFIDENCE.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Confidence in the nlohmann/json library is achieved by measuring and analysing behaviour and evidence over time within eclipse-score/inc_nlohmann_json. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-CONSTRUCTION.md b/TSF/trustable/tenets/TT-CONSTRUCTION.md new file mode 100644 index 0000000000..e2f34bbdf7 --- /dev/null +++ b/TSF/trustable/tenets/TT-CONSTRUCTION.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Tools are provided to build the nlohmann/json library from trusted sources (also provided) with full reproducibility. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-EXPECTATIONS.md b/TSF/trustable/tenets/TT-EXPECTATIONS.md new file mode 100644 index 0000000000..b734a3420e --- /dev/null +++ b/TSF/trustable/tenets/TT-EXPECTATIONS.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Documentation is provided within eclipse-score/inc_nlohmann_json, specifying what the nlohmann/json library is expected to do, and what it must not do, and how this is verified. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-PROVENANCE.md b/TSF/trustable/tenets/TT-PROVENANCE.md new file mode 100644 index 0000000000..7c277854cc --- /dev/null +++ b/TSF/trustable/tenets/TT-PROVENANCE.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +All inputs (and attestations for claims) for the nlohmann/json library are provided with known provenance. \ No newline at end of file diff --git a/TSF/trustable/tenets/TT-RESULTS.md b/TSF/trustable/tenets/TT-RESULTS.md new file mode 100644 index 0000000000..a5065c60ad --- /dev/null +++ b/TSF/trustable/tenets/TT-RESULTS.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +Evidence is provided within eclipse-score/inc_nlohmann_json to demonstrate that the nlohmann/json library does what it is supposed to do, and does not do what it must not do. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-01.1.md b/TSF/trustable/throw-ill-formed-json/TIJ-01.1.md new file mode 100644 index 0000000000..900ad9c693 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-01.1.md @@ -0,0 +1,18 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "parse;capitalisation" + path: "TSF/tests/unit-literals.cpp" +evidence: + type: check_test_results + configuration: + tests: + - literals +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on capitalised literal names. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-01.2.md b/TSF/trustable/throw-ill-formed-json/TIJ-01.2.md new file mode 100644 index 0000000000..889921c82d --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-01.2.md @@ -0,0 +1,37 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite;test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite/test_parsing/n_incomplete_false.json" + - "/nst_json_testsuite/test_parsing/n_incomplete_null.json" + - "/nst_json_testsuite/test_parsing/n_incomplete_true.json" + - "/nst_json_testsuite/test_parsing/n_structure_number_with_trailing_garbage.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_incomplete_false.json" + - "/nst_json_testsuite2/test_parsing/n_incomplete_null.json" + - "/nst_json_testsuite2/test_parsing/n_incomplete_true.json" + - "/nst_json_testsuite2/test_parsing/n_structure_capitalized_True.json" + description: "" + - type: cpp_test + name: "parse;illegal literals" + path: "TSF/tests/unit-literals.cpp" +evidence: + type: check_test_results + configuration: + tests: + - literals + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on any other than the three literal names true, false, null. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-01.md b/TSF/trustable/throw-ill-formed-json/TIJ-01.md new file mode 100644 index 0000000000..197a5ab562 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-01.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The parsing service provided by the nlohmann/json library throws an exception on ill-formed literal names. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.1.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.1.md new file mode 100644 index 0000000000..9c7dc9275b --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.1.md @@ -0,0 +1,18 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "parser class - core;parse;number;invalid numbers" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on leading plus. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.2.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.2.md new file mode 100644 index 0000000000..28ca5afa15 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.2.md @@ -0,0 +1,30 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_-01.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_int_starting_with_zero.json" + description: "Checks that -01 is rejected." + - type: cpp_test + name: "parser class - core;parse;number;invalid numbers" + path: "TSF/tests/unit-class_parser_core.cpp" + - type: cpp_test + name: "parse;leading zeroes" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core + - testsuites + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on leading zeroes. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.3.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.3.md new file mode 100644 index 0000000000..2fab94820b --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.3.md @@ -0,0 +1,29 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_+Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_-NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + description: "Checks that NaN and Inf are rejected." + - type: cpp_test + name: "parse;illegal literal numbers" + path: "TSF/tests/unit-literals.cpp" +evidence: + type: check_test_results + configuration: + tests: + - literals + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on NaN and infinity. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.4.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.4.md new file mode 100644 index 0000000000..0b547a0670 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.4.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "parse;exponents;U+0425" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "parse;exponents;U+0436" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on U+0415 and U+0436 instead of U+0045 or U+0065. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.5.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.5.md new file mode 100644 index 0000000000..1b2f95e991 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.5.md @@ -0,0 +1,78 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_number_++.json" + - "/nst_json_testsuite2/test_parsing/n_number_+1.json" + - "/nst_json_testsuite2/test_parsing/n_number_+Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_-01.json" + - "/nst_json_testsuite2/test_parsing/n_number_-1.0..json" + - "/nst_json_testsuite2/test_parsing/n_number_-2..json" + - "/nst_json_testsuite2/test_parsing/n_number_-NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_.-1.json" + - "/nst_json_testsuite2/test_parsing/n_number_.2e-3.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.1.2.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.3e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.3e.json" + - "/nst_json_testsuite2/test_parsing/n_number_0.e1.json" + - "/nst_json_testsuite2/test_parsing/n_number_0_capital_E+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0_capital_E.json" + - "/nst_json_testsuite2/test_parsing/n_number_0e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_0e.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e-.json" + - "/nst_json_testsuite2/test_parsing/n_number_1.0e.json" + - "/nst_json_testsuite2/test_parsing/n_number_1_000.json" + - "/nst_json_testsuite2/test_parsing/n_number_1eE2.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e+3.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e-3.json" + - "/nst_json_testsuite2/test_parsing/n_number_2.e3.json" + - "/nst_json_testsuite2/test_parsing/n_number_9.e+.json" + - "/nst_json_testsuite2/test_parsing/n_number_Inf.json" + - "/nst_json_testsuite2/test_parsing/n_number_NaN.json" + - "/nst_json_testsuite2/test_parsing/n_number_U+FF11_fullwidth_digit_one.json" + - "/nst_json_testsuite2/test_parsing/n_number_expression.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_1_digit.json" + - "/nst_json_testsuite2/test_parsing/n_number_hex_2_digits.json" + - "/nst_json_testsuite2/test_parsing/n_number_infinity.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid+-.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-negative-real.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-bigger-int.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-exponent.json" + - "/nst_json_testsuite2/test_parsing/n_number_invalid-utf-8-in-int.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_infinity.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_sign_with_trailing_garbage.json" + - "/nst_json_testsuite2/test_parsing/n_number_minus_space_1.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_int_starting_with_zero.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_real_without_int_part.json" + - "/nst_json_testsuite2/test_parsing/n_number_neg_with_garbage_at_end.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_garbage_after_e.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_with_invalid_utf8_after_e.json" + - "/nst_json_testsuite2/test_parsing/n_number_real_without_fractional_part.json" + - "/nst_json_testsuite2/test_parsing/n_number_starting_with_dot.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_alpha.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_alpha_char.json" + - "/nst_json_testsuite2/test_parsing/n_number_with_leading_zero.json" + description: "Tests whether various numbers with invalid syntax according to RFC8259 throw an exception." + - type: cpp_test + name: "parse;operators" + path: "TSF/tests/unit-numbers.cpp" + - type: cpp_test + name: "parse;invalid whitespace" + path: "TSF/tests/unit-numbers.cpp" +evidence: + type: check_test_results + configuration: + tests: + - numbers + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on invalid number syntax. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-02.md b/TSF/trustable/throw-ill-formed-json/TIJ-02.md new file mode 100644 index 0000000000..57dc33ac46 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-02.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The parsing service provided by the nlohmann/json library throws an exception on ill-formed numbers. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.1.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.1.md new file mode 100644 index 0000000000..a95f8ce68c --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.1.md @@ -0,0 +1,18 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "parser class - core;parse;string;errors" + path: "TSF/tests/unit-class_parser_core.cpp" +evidence: + type: check_test_results + configuration: + tests: + - class_parser_core +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on unescaped control characters. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.2.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.2.md new file mode 100644 index 0000000000..e6dd3a8ec0 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.2.md @@ -0,0 +1,18 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "Unicode;unescaped unicode" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on unpaired utf-16 surrogates. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.3.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.3.md new file mode 100644 index 0000000000..a51f9a9abc --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.3.md @@ -0,0 +1,28 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "deserialization;contiguous containers;error cases" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_string_no_quotes_with_bad_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_doublequote.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_quote.json" + - "/nst_json_testsuite2/test_parsing/n_string_single_string_no_double_quotes.json" + description: "Tests whether several improperly bounded strings throw an exception." +evidence: + type: check_test_results + configuration: + tests: + - deserialization + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improperly bounded strings. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.4.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.4.md new file mode 100644 index 0000000000..5be74cacf8 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.4.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u1.json" + - "/nst_json_testsuite2/test_parsing/n_string_1_surrogate_then_escape_u1x.json" + - "/nst_json_testsuite2/test_parsing/n_string_accentuated_char_no_quotes.json" + - "/nst_json_testsuite2/test_parsing/n_string_backslash_00.json" + - "/nst_json_testsuite2/test_parsing/n_string_escape_x.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_backslash_bad.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_ctrl_char_tab.json" + - "/nst_json_testsuite2/test_parsing/n_string_escaped_emoji.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_escaped_character.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_surrogate.json" + - "/nst_json_testsuite2/test_parsing/n_string_incomplete_surrogate_escape_invalid.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid-utf-8-in-escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_backslash_esc.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_unicode_escape.json" + - "/nst_json_testsuite2/test_parsing/n_string_invalid_utf8_after_escape.json" + description: "Tests whether various illegal control characters and utf-8 characters throw an exception." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on escaped invalid characters. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.5.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.5.md new file mode 100644 index 0000000000..37c4210eb3 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.5.md @@ -0,0 +1,22 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "Unicode (1/5);\\\\uxxxx sequences;incorrect sequences;incorrect surrogate values" + path: "tests/src/unit-unicode1.cpp" + - type: cpp_test + name: "Unicode;escaped utf-16 surrogates;ill-formed" + path: "TSF/tests/unit-strings.cpp" +evidence: + type: check_test_results + configuration: + tests: + - unicode1 + - strings +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on incorrect surrogate pairs. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-03.md b/TSF/trustable/throw-ill-formed-json/TIJ-03.md new file mode 100644 index 0000000000..184a478727 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-03.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The parsing service provided by the nlohmann/json library throws an exception on ill-formed strings. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-04.1.md b/TSF/trustable/throw-ill-formed-json/TIJ-04.1.md new file mode 100644 index 0000000000..132cb4dbe0 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-04.1.md @@ -0,0 +1,43 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n (previously overflowed)" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_100000_opening_arrays.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_close_unopened_array.json" + description: "" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_double_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_end_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_lone-invalid-utf-8.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_apostrophe.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_comma.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_array_open_object.json" + - "/nst_json_testsuite2/test_parsing/n_structure_open_object_close_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_partial_null.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_unfinished_false.json" + - "/nst_json_testsuite2/test_parsing/n_structure_unclosed_array_unfinished_true.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improperly bounded arrays. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-04.2.md b/TSF/trustable/throw-ill-formed-json/TIJ-04.2.md new file mode 100644 index 0000000000..94037ce5b3 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-04.2.md @@ -0,0 +1,28 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_array_double_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_double_extra_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_just_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_number_and_comma.json" + - "/nst_json_testsuite2/test_parsing/n_array_number_and_several_commas.json" + - "/nst_json_testsuite2/test_parsing/n_structure_array_with_unclosed_string.json" + - "/nst_json_testsuite2/test_parsing/n_array_invalid_utf8.json" + - "/nst_json_testsuite2/test_parsing/n_array_just_minus.json" + description: "Checks that various \"proper\" arrays with improper elements throw an exception." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improper values within a properly bounded array. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-04.3.md b/TSF/trustable/throw-ill-formed-json/TIJ-04.3.md new file mode 100644 index 0000000000..af8a91a9a7 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-04.3.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_array_colon_instead_of_comma.json" + description: "Tests whether colon as value separator throws an exception." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improper value separators. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-04.md b/TSF/trustable/throw-ill-formed-json/TIJ-04.md new file mode 100644 index 0000000000..91c676fae1 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-04.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The parsing service provided by the nlohmann/json library throws an exception on ill-formed arrays. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.1.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.1.md new file mode 100644 index 0000000000..3a157ad401 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.1.md @@ -0,0 +1,27 @@ +--- +level: 1.1 +normative: true +references: + - type: cpp_test + name: "deserialization;contiguous containers;error cases;case 15" + path: "tests/src/unit-deserialization.cpp" + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_structure_comma_instead_of_closing_brace.json" + - "/nst_json_testsuite2/test_parsing/n_structure_object_followed_by_closing_object.json" + - "/nst_json_testsuite2/test_parsing/n_structure_object_unclosed_no_value.json" + description: "" +evidence: + type: check_test_results + configuration: + tests: + - deserialization + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improperly bounded objects. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.2.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.2.md new file mode 100644 index 0000000000..0fabb048aa --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.2.md @@ -0,0 +1,35 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key_but_huge_number_instead.json" + description: "Checks that numbers as keys are rejected." + - type: cpp_test + name: "parse;names;numbers" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "parse;names;arrays" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "parse;names;objects" + path: "TSF/tests/unit-objects.cpp" + - type: cpp_test + name: "parse;names;literals" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception if a non-string is used as name of any member. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.3.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.3.md new file mode 100644 index 0000000000..e42d38abee --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.3.md @@ -0,0 +1,33 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_single_quote.json" + - "/nst_json_testsuite2/test_parsing/n_object_unquoted_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_non_string_key_but_huge_number_instead.json" + - "/nst_json_testsuite2/test_parsing/n_object_key_with_single_quotes.json" + - "/nst_json_testsuite2/test_parsing/n_object_bracket_key.json" + - "/nst_json_testsuite2/test_parsing/n_object_unquoted_key.json" + description: "Checks that invalid names throw an exception." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;i -> n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/i_object_key_lone_2nd_surrogate.json" + description: "Checks that string with invalid utf16 surrogate as name throws an exception." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception if an improper string is used as name of any member. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.4.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.4.md new file mode 100644 index 0000000000..81928a112d --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.4.md @@ -0,0 +1,21 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_bad_value.json" + description: "Checks that the invalid literal \"truth\" as value throws an exception." +evidence: + type: check_test_results + configuration: + tests: + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception if any member has an improper value. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.5.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.5.md new file mode 100644 index 0000000000..2ff5e34c15 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.5.md @@ -0,0 +1,38 @@ +--- +level: 1.1 +normative: true +references: + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_comma_instead_of_colon.json" + description: "Checks that comma instead of colon is rejected." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_double_colon.json" + description: "Checks that double colon is rejected." + - type: JSON_testsuite + name: "nst's JSONTestSuite (2);test_parsing;n" + path: "tests/src/unit-testsuites.cpp" + test_suite_paths: + - "/nst_json_testsuite2/test_parsing/n_object_missing_colon.json" + - "/nst_json_testsuite2/test_parsing/n_object_missing_semicolon.json" + description: "Checks that the empty member separator is rejected." + - type: cpp_test + name: "parse;member separator" + path: "TSF/tests/unit-objects.cpp" +evidence: + type: check_test_results + configuration: + tests: + - objects + - testsuites +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library throws an exception on improper member separators. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-05.md b/TSF/trustable/throw-ill-formed-json/TIJ-05.md new file mode 100644 index 0000000000..823ba2023f --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-05.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The parsing service provided by the nlohmann/json library throws an exception on ill-formed objects. \ No newline at end of file diff --git a/TSF/trustable/throw-ill-formed-json/TIJ-06.md b/TSF/trustable/throw-ill-formed-json/TIJ-06.md new file mode 100644 index 0000000000..2272f69708 --- /dev/null +++ b/TSF/trustable/throw-ill-formed-json/TIJ-06.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library recognises ill-formed byte-order marks and throws an exception. \ No newline at end of file diff --git a/TSF/trustable/trustable_graph.rst b/TSF/trustable/trustable_graph.rst new file mode 100644 index 0000000000..ea09dac668 --- /dev/null +++ b/TSF/trustable/trustable_graph.rst @@ -0,0 +1,8 @@ +.. _ta-analysis-subgraph: + +Trustable Graph +==================== + +.. image:: generated/graph.svg + :alt: Trustable Graph + :width: 600px \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-01.md b/TSF/trustable/well-formed-json/WFJ-01.md new file mode 100644 index 0000000000..6260df334f --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-01.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks the well-formedness of the literal names. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-02.md b/TSF/trustable/well-formed-json/WFJ-02.md new file mode 100644 index 0000000000..33979005ed --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-02.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks the well-formedness of strings. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-03.md b/TSF/trustable/well-formed-json/WFJ-03.md new file mode 100644 index 0000000000..56e23b8bd8 --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-03.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks the well-formedness of numbers. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-04.md b/TSF/trustable/well-formed-json/WFJ-04.md new file mode 100644 index 0000000000..7b908ecfb9 --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-04.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks the well-formedness of array. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-05.md b/TSF/trustable/well-formed-json/WFJ-05.md new file mode 100644 index 0000000000..3d742a4e5d --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-05.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks the well-formedness of objects. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-06.md b/TSF/trustable/well-formed-json/WFJ-06.md new file mode 100644 index 0000000000..4c782a467c --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-06.md @@ -0,0 +1,41 @@ +--- +level: 1.1 +normative: true +references: + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for single inputs" + overload: 1 + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for iterator inputs" + overload: 2 + - type: function_reference + name: "basic_json::accept" + path: "include/nlohmann/json.hpp" + description: "the public interface of the `accept`-functionality of nlohmann/json for input buffer" + overload: 3 + - type: function_reference + name: "parser::accept" + path: "include/nlohmann/detail/input/parser.hpp" + description: "the internal `accept`-functionality called by basic_json::accept" + - type: function_reference + name: "parser::sax_parse" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::accept" + - type: function_reference + name: "parser::sax_parse_internal" + path: "include/nlohmann/detail/input/parser.hpp" + description: "called by parser::sax_parse" + - type: function_reference + name: "lexer::scan" + path: "include/nlohmann/detail/input/lexer.hpp" + description: "scans input, called in parser::sax_parse_internal" +score: + Jonas-Kirchhoff: 1.0 + Erikhu1: 1.0 +--- + +The service provided by the nlohmann/json library checks that a JSON value must be an object, array, number, or string, or one of the lowercase literal names false, null, or true \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-07.md b/TSF/trustable/well-formed-json/WFJ-07.md new file mode 100644 index 0000000000..453b0c810d --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-07.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library checks that JSON is only serialized using UTF-8. \ No newline at end of file diff --git a/TSF/trustable/well-formed-json/WFJ-08.md b/TSF/trustable/well-formed-json/WFJ-08.md new file mode 100644 index 0000000000..35c4baac5b --- /dev/null +++ b/TSF/trustable/well-formed-json/WFJ-08.md @@ -0,0 +1,6 @@ +--- +level: 1.1 +normative: true +--- + +The service provided by the nlohmann/json library ignores byte order marks. \ No newline at end of file diff --git a/cmake/ci.cmake b/cmake/ci.cmake index ef4c257b18..ebf3d8154e 100644 --- a/cmake/ci.cmake +++ b/cmake/ci.cmake @@ -87,7 +87,7 @@ add_custom_target(ci_test_gcc -DJSON_BuildTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_gcc_junit.xml" COMMENT "Compile and test with GCC using maximal warning flags" ) @@ -97,7 +97,7 @@ add_custom_target(ci_test_clang -DJSON_BuildTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_clang_junit.xml" COMMENT "Compile and test with Clang using maximal warning flags" ) @@ -113,7 +113,7 @@ foreach(CXX_STANDARD 11 14 17 20 23 26) -DJSON_TestStandards=${CXX_STANDARD} -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} - COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_gcc_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_gcc_cxx${CXX_STANDARD}_junit.xml" COMMENT "Compile and test with GCC for C++${CXX_STANDARD}" ) @@ -124,7 +124,7 @@ foreach(CXX_STANDARD 11 14 17 20 23 26) -DJSON_TestStandards=${CXX_STANDARD} -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_clang_cxx${CXX_STANDARD}_junit.xml" COMMENT "Compile and test with Clang for C++${CXX_STANDARD}" ) @@ -137,7 +137,7 @@ foreach(CXX_STANDARD 11 14 17 20 23 26) -DCMAKE_EXE_LINKER_FLAGS="-lc++abi" -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_cxx${CXX_STANDARD} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_clang_libcxx_cxx${CXX_STANDARD}_junit.xml" COMMENT "Compile and test with Clang for C++${CXX_STANDARD} (libc++)" ) endforeach() @@ -152,7 +152,7 @@ add_custom_target(ci_test_noexceptions -DJSON_BuildTests=ON -DCMAKE_CXX_FLAGS=-DJSON_NOEXCEPTION -DDOCTEST_TEST_FILTER=--no-throw -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noexceptions COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noexceptions - COMMAND cd ${PROJECT_BINARY_DIR}/build_noexceptions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_noexceptions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_noexceptions_junit.xml" COMMENT "Compile and test with exceptions switched off" ) @@ -166,7 +166,7 @@ add_custom_target(ci_test_noimplicitconversions -DJSON_BuildTests=ON -DJSON_ImplicitConversions=OFF -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noimplicitconversions COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noimplicitconversions - COMMAND cd ${PROJECT_BINARY_DIR}/build_noimplicitconversions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_noimplicitconversions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_noimplicitconversions_junit.xml" COMMENT "Compile and test with implicit conversions switched off" ) @@ -180,7 +180,7 @@ add_custom_target(ci_test_diagnostics -DJSON_BuildTests=ON -DJSON_Diagnostics=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_diagnostics COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_diagnostics - COMMAND cd ${PROJECT_BINARY_DIR}/build_diagnostics && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_diagnostics && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_diagnostics_junit.xml" COMMENT "Compile and test with improved diagnostics enabled" ) @@ -194,7 +194,7 @@ add_custom_target(ci_test_diagnostic_positions -DJSON_BuildTests=ON -DJSON_Diagnostic_Positions=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_diagnostic_positions COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_diagnostic_positions - COMMAND cd ${PROJECT_BINARY_DIR}/build_diagnostic_positions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_diagnostic_positions && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_diagnostic_positions_junit.xml" COMMENT "Compile and test with diagnostic positions enabled" ) @@ -208,7 +208,7 @@ add_custom_target(ci_test_legacycomparison -DJSON_BuildTests=ON -DJSON_LegacyDiscardedValueComparison=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_legacycomparison COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_legacycomparison - COMMAND cd ${PROJECT_BINARY_DIR}/build_legacycomparison && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_legacycomparison && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_legacycomparison_junit.xml" COMMENT "Compile and test with legacy discarded value comparison enabled" ) @@ -223,7 +223,7 @@ add_custom_target(ci_test_noglobaludls -DCMAKE_CXX_FLAGS=-DJSON_TEST_NO_GLOBAL_UDLS -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_noglobaludls COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_noglobaludls - COMMAND cd ${PROJECT_BINARY_DIR}/build_noglobaludls && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_noglobaludls && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_noglobaludls_junit.xml" COMMENT "Compile and test with global UDLs disabled" ) @@ -237,14 +237,14 @@ add_custom_target(ci_test_coverage -DJSON_BuildTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_coverage COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_coverage - COMMAND cd ${PROJECT_BINARY_DIR}/build_coverage && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_coverage && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_coverage_1_junit.xml" COMMAND CXX=g++ ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_FLAGS="-m32;--coverage;-fprofile-arcs;-ftest-coverage" -DJSON_BuildTests=ON -DJSON_32bitTest=ONLY -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_coverage32 COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_coverage32 - COMMAND cd ${PROJECT_BINARY_DIR}/build_coverage32 && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_coverage32 && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_coverage_2_junit.xml" COMMAND ${LCOV_TOOL} --directory . --capture --output-file json.info --rc branch_coverage=1 --rc geninfo_unexecuted_blocks=1 --ignore-errors mismatch --ignore-errors unused COMMAND ${LCOV_TOOL} -e json.info ${SRC_FILES} --output-file json.info.filtered --rc branch_coverage=1 --ignore-errors unused @@ -266,7 +266,7 @@ add_custom_target(ci_test_clang_sanitizer -DJSON_BuildTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_clang_sanitizer COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_clang_sanitizer - COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_clang_sanitizer && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_clang_sanitizer_junit.xml" COMMENT "Compile and test with sanitizers" ) @@ -317,7 +317,7 @@ add_custom_target(ci_test_single_header -DJSON_BuildTests=ON -DJSON_MultipleHeaders=OFF -DJSON_FastTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_single_header COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_single_header - COMMAND cd ${PROJECT_BINARY_DIR}/build_single_header && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_single_header && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_single_header_junit.xml" COMMENT "Compile and test single-header version" ) @@ -331,7 +331,7 @@ add_custom_target(ci_test_valgrind -DJSON_BuildTests=ON -DJSON_Valgrind=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_valgrind COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_valgrind - COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_valgrind && ${CMAKE_CTEST_COMMAND} -L valgrind --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_test_valgrind_junit.xml" COMMENT "Compile and test with Valgrind" ) @@ -454,13 +454,13 @@ add_custom_target(ci_infer add_custom_target(ci_offline_testdata COMMAND mkdir -p ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data - COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data && ${GIT_TOOL} clone -c advice.detachedHead=false --branch v3.1.0 https://github.com/nlohmann/json_test_data.git --quiet --depth 1 + COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata/test_data && ${GIT_TOOL} clone --branch json_test_data_version_3_1_0_mirror https://github.com/eclipse-score/inc_nlohmann_json.git --quiet --depth 1 json_test_data COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug -GNinja -DJSON_BuildTests=ON -DJSON_FastTests=ON -DJSON_TestDataDirectory=${PROJECT_BINARY_DIR}/build_offline_testdata/test_data/json_test_data -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_offline_testdata COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_offline_testdata - COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_offline_testdata && ${CMAKE_CTEST_COMMAND} --parallel ${N} --output-on-failure --output-junit "../my_logs/ci_offline_testdata_junit.xml" COMMENT "Check code with previously downloaded test data" ) @@ -477,7 +477,7 @@ add_custom_target(ci_non_git_tests -DJSON_BuildTests=ON -DJSON_FastTests=ON -S${PROJECT_BINARY_DIR}/build_non_git_tests/sources -B${PROJECT_BINARY_DIR}/build_non_git_tests COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_non_git_tests - COMMAND cd ${PROJECT_BINARY_DIR}/build_non_git_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE git_required --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_non_git_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE git_required --output-on-failure --output-junit "../my_logs/ci_non_git_tests_junit.xml" COMMENT "Check code when project was not checked out from Git" ) @@ -491,7 +491,7 @@ add_custom_target(ci_reproducible_tests -DJSON_BuildTests=ON -DJSON_FastTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_reproducible_tests COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_reproducible_tests - COMMAND cd ${PROJECT_BINARY_DIR}/build_reproducible_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE not_reproducible --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_reproducible_tests && ${CMAKE_CTEST_COMMAND} --parallel ${N} -LE not_reproducible --output-on-failure --output-junit "../my_logs/ci_reproducible_tests_junit.xml" COMMENT "Check code and exclude tests that change installed files" ) @@ -629,7 +629,7 @@ foreach(COMPILER g++-4.8 g++-4.9 g++-5 g++-6 g++-7 g++-8 g++-9 g++-10 g++-11 cla -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_compiler_${COMPILER} ${ADDITIONAL_FLAGS} COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_compiler_${COMPILER} - COMMAND cd ${PROJECT_BINARY_DIR}/build_compiler_${COMPILER} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_compiler_${COMPILER} && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" --output-on-failure --output-junit "../my_logs/ci_test_compiler_${COMPILER}_junit.xml" COMMENT "Compile and test with ${COMPILER}" ) endif() @@ -643,7 +643,7 @@ add_custom_target(ci_test_compiler_default -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_compiler_default ${ADDITIONAL_FLAGS} COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_compiler_default --parallel ${N} - COMMAND cd ${PROJECT_BINARY_DIR}/build_compiler_default && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" -LE git_required --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_compiler_default && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" -LE git_required --output-on-failure --output-junit "../my_logs/ci_test_compiler_default_junit.xml" COMMENT "Compile and test with default C++ compiler" ) @@ -670,7 +670,7 @@ add_custom_target(ci_icpc -DJSON_BuildTests=ON -DJSON_FastTests=ON -S${PROJECT_SOURCE_DIR} -B${PROJECT_BINARY_DIR}/build_icpc COMMAND ${CMAKE_COMMAND} --build ${PROJECT_BINARY_DIR}/build_icpc - COMMAND cd ${PROJECT_BINARY_DIR}/build_icpc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" --output-on-failure + COMMAND cd ${PROJECT_BINARY_DIR}/build_icpc && ${CMAKE_CTEST_COMMAND} --parallel ${N} --exclude-regex "test-unicode" --output-on-failure --output-junit "../my_logs/ci_icpc_junit.xml" COMMENT "Compile and test with ICPC" ) diff --git a/cmake/download_test_data.cmake b/cmake/download_test_data.cmake index 1bb998dae6..84bb5e3afd 100644 --- a/cmake/download_test_data.cmake +++ b/cmake/download_test_data.cmake @@ -1,5 +1,5 @@ -set(JSON_TEST_DATA_URL https://github.com/nlohmann/json_test_data) -set(JSON_TEST_DATA_VERSION 3.1.0) +set(JSON_TEST_DATA_URL https://github.com/eclipse-score/inc_nlohmann_json) +set(JSON_TEST_DATA_BRANCH json_test_data_version_3_1_0_mirror) # if variable is set, use test data from given directory rather than downloading them if(JSON_TestDataDirectory) @@ -10,8 +10,8 @@ else() find_package(Git) # target to download test data add_custom_target(download_test_data - COMMAND test -d json_test_data || ${GIT_EXECUTABLE} clone -c advice.detachedHead=false --branch v${JSON_TEST_DATA_VERSION} ${JSON_TEST_DATA_URL}.git --quiet --depth 1 - COMMENT "Downloading test data from ${JSON_TEST_DATA_URL} (v${JSON_TEST_DATA_VERSION})" + COMMAND test -d json_test_data || ${GIT_EXECUTABLE} clone --branch ${JSON_TEST_DATA_BRANCH} ${JSON_TEST_DATA_URL}.git --quiet --depth 1 json_test_data + COMMENT "Downloading test data from ${JSON_TEST_DATA_URL} (branch: ${JSON_TEST_DATA_BRANCH})" WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) # create a header with the path to the downloaded test data diff --git a/docs/mkdocs/docs/integration/package_managers.md b/docs/mkdocs/docs/integration/package_managers.md index 1fc2460d76..78ddc30ddf 100644 --- a/docs/mkdocs/docs/integration/package_managers.md +++ b/docs/mkdocs/docs/integration/package_managers.md @@ -1,20 +1,20 @@ # Package Managers
-![Homebrew](../images/package_managers/homebrew.svg){: style="height:1em"} [**Homebrew**](#homebrew) `nlohmann-json`   -![Meson](../images/package_managers/meson.svg){: style="height:1em"} [**Meson**](#meson) `nlohmann_json`   -![Bazel](../images/package_managers/bazel.svg){: style="height:1em"} [**Bazel**](#bazel) `nlohmann_json`
-![Conan](../images/package_managers/conan.svg){: style="height:1em"} [**Conan**](#conan) `nlohmann_json`   -![Spack](../images/package_managers/spack.svg){: style="height:1em"} [**Spack**](#spack) `nlohmann-json`   +![Homebrew](../images/package_managers/homebrew.svg) [**Homebrew**](#homebrew) `nlohmann-json`   +![Meson](../images/package_managers/meson.svg) [**Meson**](#meson) `nlohmann_json`   +![Bazel](../images/package_managers/bazel.svg) [**Bazel**](#bazel) `nlohmann_json`
+![Conan](../images/package_managers/conan.svg) [**Conan**](#conan) `nlohmann_json`   +![Spack](../images/package_managers/spack.svg) [**Spack**](#spack) `nlohmann-json`   [**Hunter**](#hunter) `nlohmann_json`
-![vcpkg](../images/package_managers/vcpkg.png){: style="height:1em"} [**vcpkg**](#vcpkg) `nlohmann-json`   +![vcpkg](../images/package_managers/vcpkg.png) [**vcpkg**](#vcpkg) `nlohmann-json`   [**cget**](#cget) `nlohmann/json`   -![Swift Package Manager](../images/package_managers/swift.svg){: style="height:1em"} [**Swift Package Manager**](#swift-package-manager) `nlohmann/json`
-![Nuget](../images/package_managers/nuget.svg){: style="height:1em"} [**NuGet**](#nuget) `nlohmann.json`   -![Conda](../images/package_managers/conda.svg){: style="height:1em"} [**Conda**](#conda) `nlohmann_json`   -![MacPorts](../images/package_managers/macports.svg){: style="height:1em"} [**MacPorts**](#macports) `nlohmann-json`
-![cpm.cmake](../images/package_managers/CPM.png){: style="height:1em"} [**CPM.cmake**](#cpmcmake) `gh:nlohmann/json` -![xmake](../images/package_managers/xmake.svg){: style="height:1em"} [**xmake**](#xmake) `nlohmann_json` +![Swift Package Manager](../images/package_managers/swift.svg) [**Swift Package Manager**](#swift-package-manager) `nlohmann/json`
+![Nuget](../images/package_managers/nuget.svg) [**NuGet**](#nuget) `nlohmann.json`   +![Conda](../images/package_managers/conda.svg) [**Conda**](#conda) `nlohmann_json`   +![MacPorts](../images/package_managers/macports.svg) [**MacPorts**](#macports) `nlohmann-json`
+![cpm.cmake](../images/package_managers/CPM.png) [**CPM.cmake**](#cpmcmake) `gh:nlohmann/json` +![xmake](../images/package_managers/xmake.svg) [**xmake**](#xmake) `nlohmann_json`
## Running example diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b1dc5df104..5ddafc5d3b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -149,6 +149,8 @@ message(STATUS "${msg}") json_test_should_build_32bit_test(json_32bit_test json_32bit_test_only "${JSON_32bitTest}") file(GLOB files src/unit-*.cpp) +file(GLOB s-core_tests ../TSF/tests/unit-*.cpp) +list(APPEND files ${s-core_tests}) if(json_32bit_test_only) set(files src/unit-32bit.cpp) elseif(NOT json_32bit_test) diff --git a/tests/src/unit-regression2.cpp b/tests/src/unit-regression2.cpp index 2c3977fef9..bce447e363 100644 --- a/tests/src/unit-regression2.cpp +++ b/tests/src/unit-regression2.cpp @@ -388,7 +388,7 @@ struct Example_3810 Example_3810() = default; }; -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla); // NOLINT(misc-use-internal-linkage) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Example_3810, bla) // NOLINT(misc-use-internal-linkage) TEST_CASE("regression tests 2") { diff --git a/tests/src/unit-user_defined_input.cpp b/tests/src/unit-user_defined_input.cpp index befc4b17af..5115e8fd30 100644 --- a/tests/src/unit-user_defined_input.cpp +++ b/tests/src/unit-user_defined_input.cpp @@ -60,12 +60,12 @@ TEST_CASE("Custom container member begin/end") { const char* data; - const char* begin() const + const char* begin() const noexcept { return data; } - const char* end() const + const char* end() const noexcept { return data + strlen(data); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) }